FAQ | This is a LIVE service | Changelog

Skip to content
Snippets Groups Projects
Commit ff4b3a9b authored by Ondrej Famera's avatar Ondrej Famera
Browse files

Initial commit, extract of role from OndrejHome/ansible-cluster repository

with added comments, documentation and nicer names
parents
No related branches found
No related tags found
No related merge requests found
This diff is collapsed.
ha-cluster-pacemaker
=========
Role for configuring basic pacemaker cluster on CentOS/RHEL 6/7 systems.
Requirements
------------
RHEL: It is expected that machines will already be registered and subscribed for access to 'High Availability' or 'Resilient storage' channels.
Role Variables
--------------
# user used for authorizing cluster nodes
cluster_user: hacluster
cluster_user_pass: testtest
# group to which cluster user belongs (should be 'haclient')
cluster_group: haclient
# name of the cluster
cluster_name: pacemaker
# configuration of firewall for clustering, NOTE in RHEL/Centos 6 this replaces iptables configuration file!
cluster_firewall: true
# enable cluster on boot
cluster_enable_service: true
# configure cluster with fence_xvm fencing device ?
# this will copy /etc/cluster/fence_xvm.key to nodes and add fencing devices to cluster
# NOTE: you need to define 'hypervisor_name' in the inventory for each cluster node
cluster_configure_fence_xvm: true
Example Playbook
----------------
Example playbook for creating cluster named 'test1' enabled on boot, with fence_xvm and firewall settings
- hosts: servers
roles:
- { role: ondrejhome.ha-cluster-pacemaker, cluster_name='test1' }
Example for creating cluster named 'test2' without configuring firewalling and without fence_xvm.
For cluster to get properly authorize it is expected that firewall is already configured or disabled.
- hosts: servers
roles:
- { role: ondrejhome.ha-cluster-pacemaker, cluster_name='test2', cluster_firewal=false, cluster_configure_fence_xvm=false }
License
-------
GPLv3
Author Information
------------------
WARNING: this is alpha-version quality proof-of concept role that still needs some polishing and is using custom modules
to interact with pacemaker through python. This is suitable for testing purposes only.
To get in touch with author you can use email ondrej-xa2iel8u@famera.cz or create a issue on github when requesting some feature.
---
# user used for authorizing cluster nodes
cluster_user: hacluster
cluster_user_pass: testtest
# group to which cluster user belongs (should be 'haclient')
cluster_group: haclient
# name of the cluster
cluster_name: pacemaker
# configuration of firewall for clustering, NOTE in RHEL/Centos 6 this replaces iptables configuration file!
cluster_firewall: true
# enable cluster on boot
cluster_enable_service: true
# configure cluster with fence_xvm fencing device ?
# this will copy /etc/cluster/fence_xvm.key to nodes and add fencing devices to cluster
# NOTE: you need to define 'hypervisor_name' in the inventory for each cluster node
cluster_configure_fence_xvm: true
# Firewall configuration written by system-config-firewall
# Manual customization of this file is not recommended.
*filter
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
-A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
-A INPUT -p icmp -j ACCEPT
-A INPUT -i lo -j ACCEPT
-A INPUT -m state --state NEW -m tcp -p tcp --dport 22 -j ACCEPT
-A INPUT -m state --state NEW -p tcp -m tcp --dport 2224 -j ACCEPT
-A INPUT -m state --state NEW -p tcp -m tcp --dport 3121 -j ACCEPT
-A INPUT -m state --state NEW -p tcp -m tcp --dport 21064 -j ACCEPT
-A INPUT -m state --state NEW -p udp -m udp --dport 5405 -j ACCEPT
-A INPUT -m state --state NEW -p tcp -m tcp --dport 1229 -j ACCEPT
-A INPUT -j REJECT --reject-with icmp-host-prohibited
-A FORWARD -j REJECT --reject-with icmp-host-prohibited
COMMIT
---
- name : restart iptables
service: name=iptables state=restarted
#!/usr/bin/python
DOCUMENTATION = '''
---
module: pcs_auth
short_description: PCS AUTH module for pacemaker clusters
description:
- module for handling authorizations in pacemaker clusters based on 'pcs auth' logic
version_added: "0.1"
options:
fqdn:
description:
- FQDN of cluster node
required: true
default: null
username:
description:
- cluster username
required: true
default: null
password:
description:
- cluster password
required: true
default: null
notes:
- ALPHA QUALITY: Not tested extensively
- provides only creation and detection of presence, no advanced features
- tested on RHEL+CentOS 6.8 and 7.2
requirements: [ ]
author: "Ondrej Famera <ondrej-xa2iel8u@famera.cz>"
'''
EXAMPLES = '''
# Authorize all nodes in ansible play using their first part of hostname as FQDN (this part must be resolvable or available in /etc/hosts)
- pcs_auth: fqdn={{ hostvars[item]['ansible_fqdn'].split('.')[0] }} username={{ cluster_user }} password={{ cluster_user_pass }}
run_once: true
with_items: play_hosts
'''
try:
import pcs.utils
HAVE_PCS=True
except ImportError:
HAVE_PCS=False
try:
import sys
sys.path.append("/usr/lib/python2.6/site-packages/pcs/")
import pcs.utils
HAVE_PCS=True
except ImportError:
HAVE_PCS=False
def main():
module = AnsibleModule(
argument_spec = dict(
fqdn=dict(required=True),
username=dict(required=True),
password=dict(required=True),
),
supports_check_mode=True
)
if not HAVE_PCS:
module.fail_json(msg="This module requires pcs")
fqdn = module.params['fqdn']
username = module.params['username']
password = module.params['password']
result = {}
result['fqdn'] = fqdn
# hacks for centos/rhel 7
sys.argv[0]="/usr/lib/pcsd/" #FIXME pcs.utils.run_pcsdcli function doesn't work well without this
os.environ['CIB_user'] = username
status = pcs.utils.checkAuthorization(fqdn)
if status[0] == 0:
try:
auth_status = json.loads(status[1])
if auth_status["success"]:
# FIXME - this need to be checked with cluster.py to be complete
result['state'] = status[0]
result['changed'] = False
except:
pass
elif status[0] == 3:
if module.check_mode:
module.exit_json(changed=True)
else:
output = {}
output['status'] = ''
try:
retval = pcs.utils.updateToken(fqdn,fqdn,username,password)
except:
pcsd_data = {
'nodes': [ fqdn ],
'username': username,
'password': password
}
output, retval = pcs.utils.run_pcsdcli('auth', pcsd_data)
if (retval and output['status'] == 'ok' ) or output['status'] == 'access_denied':
module.fail_json(msg="Failed authorizing node - %s , %s" %(retval, output['status']) )
else:
result['changed']=True
else:
module.fail_json(msg="Failed authorizing node %s - %s" %(fqdn, status) )
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
main()
#!/usr/bin/python
DOCUMENTATION = '''
---
module: pcs_resource
short_description: PCS RESOURCE module
description:
- module for handling resources (currenlty only stonith resources) in pacemaker cluster
version_added: "0.1"
options:
name:
description:
- resource name
required: true
default: null
stonith:
description:
- is this a stonith resource?
required: true
default: no
resource_type:
description:
- resource type name
required: true
default: null
options:
description:
- pcs resource create options
required: false
default: null
notes:
- ALPHA QUALITY: Not tested extensively
- provides only creation and detection of presence, no advanced features
- tested on RHEL+CentOS 6.8 and 7.2
requirements: [ ]
author: "Ondrej Famera <ondrej-xa2iel8u@famera.cz>"
'''
EXAMPLES = '''
# create stonith devices, for each node one with fence_xvm and with name of node on hypervisor stored in hypervisor_name ansible inventory variable
- pcs_resource: name="fence-{{ hostvars[item]['ansible_fqdn'].split('.')[0] }}" stonith=true resource_type=fence_xvm options="pcmk_host_map={{ hostvars[item]['ansible_fqdn'].split('.')[0] }}:{{ hostvars[item]['hypervisor_hostname'] }}; op monitor interval=30s"
with_items: play_hosts
run_once: true
'''
try:
import pcs.utils
HAVE_PCS=True
except ImportError:
HAVE_PCS=False
try:
import sys
sys.path.append("/usr/lib/python2.6/site-packages/pcs/")
import pcs.utils
import pcs.resource
HAVE_PCS=True
except ImportError:
HAVE_PCS=False
def main():
module = AnsibleModule(
argument_spec = dict(
name=dict(required=True),
stonith=dict(default='no', type='bool'),
resource_type=dict(required=True),
options=dict(required=False),
),
supports_check_mode=True
)
if not HAVE_PCS:
module.fail_json(msg="This module requires pcs")
name = module.params['name']
stonith = module.params['stonith']
resource_type = module.params['resource_type']
options = module.params['options']
result = {}
result['name'] = name
root = pcs.utils.get_cib_etree()
resources = root.find(".//resources")
resource_found = False
for child in resources.findall(".//*"):
if "id" in child.attrib and child.attrib["id"] == name and (stonith and pcs.utils.is_stonith_resource(name) or (not stonith and not pcs.utils.is_stonith_resource(name))):
resource_found = True
break
if resource_found:
result['state'] = resource_found
result['changed'] = False
else:
if module.check_mode:
module.exit_json(changed=True)
else:
# create resource
st_values, op_values, meta_values = pcs.resource.parse_resource_options(
options.split(), with_clone=False
)
try:
resource_type_name = resource_type
if stonith:
resource_type_name = "stonith:"+resource_type
pcs.resource.resource_create(name,resource_type_name,st_values, op_values, meta_values)
result['changed'] = True
except:
module.fail_json(msg="failed to create stonith device")
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
main()
---
galaxy_info:
author: Ondrej Faměra
description: pacemaker basic cluster role with fence_xvm
license: GPLv3
min_ansible_version: 1.9
platforms:
- name: EL
versions:
- 6
- 7
categories:
- clustering
- clustering:rgmanager
dependencies: []
---
- name: Copy iptables configuration
copy: src=iptables dest=/etc/sysconfig/iptables owner=root group=root
notify:
- restart iptables
- name: Enable and start iptables service
service: name=iptables enabled=yes state=started
- meta: flush_handlers
---
- name: Enable 'high-availability' firewalld service
firewalld: service=high-availability permanent=true state=enabled immediate=true
- name: Enable fence_xvm port(1229/tcp) in firewalld
firewalld: port=1229/tcp permanent=true state=enabled immediate=true
---
- name: Check if cluster consist of at least 2 nodes
fail: msg="Cluster must have at least 2 members"
when: play_hosts|count() < 2
- name: Include distribution version specific variables
include_vars: "el{{ ansible_distribution_major_version }}.yml"
- name: Install libselinux-python to work with SELinux setting
yum: name=libselinux-python state=installed
- name: Generate /etc/hosts
template:
src=hosts.j2
dest=/etc/hosts
- name: Create /etc/cluster directory
file: path=/etc/cluster state=directory
- name: Install Pacemaker cluster packages to all nodes
yum: name={{ item }} state=installed
with_items:
- "{{ pacemaker_packages }}"
- name: Create cluster system group
group: name={{ cluster_group }} state=present
- name: Create cluster system user
user:
name={{ cluster_user }} state=present
password={{ cluster_user_pass | password_hash('sha512', ansible_hostname) }}
- name: Enable and start PCSD service
service: name=pcsd enabled=yes state=started
- name: Setup firewall for RHEL/CentOS 6 systems (iptables)
include: firewall-el6.yml
when: ansible_distribution_major_version == "6" and cluster_firewall == true
- name: Setup firewall for RHEL/CentOS 7 systems (firewalld)
include: firewall-el7.yml
when: ansible_distribution_major_version == "7" and cluster_firewall == true
- name: Authorize cluster nodes
pcs_auth: fqdn={{ hostvars[item]['ansible_fqdn'].split('.')[0] }} username={{ cluster_user }} password={{ cluster_user_pass }}
run_once: true
with_items: play_hosts
- name: Setup cluster
command: >
pcs cluster setup --name {{ cluster_name }} --start
{% for host in play_hosts %}
{% set short_name = hostvars[host]['ansible_fqdn'].split('.') %}
{{ short_name[0] }}
{% endfor %}
run_once: true
args:
creates: /var/lib/pacemaker/cib/cib.xml
- name: Start cluster services on all nodes
service: name={{ item }} state=started
with_items:
- pacemaker
- corosync
- name: Enable cluster services on boot
service: name={{ item }} enabled=yes
when: cluster_enable_service == true
with_items:
- pacemaker
- corosync
- name: Copy fence_xvm key to all cluster nodes
copy: src=/etc/cluster/fence_xvm.key dest=/etc/cluster/fence_xvm.key owner=root group=root mode=0640
when: cluster_configure_fence_xvm == true
- name: create fence_xvm STONITH devices
pcs_resource: name="fence-{{ hostvars[item]['ansible_fqdn'].split('.')[0] }}" stonith=true resource_type=fence_xvm options="pcmk_host_map={{ hostvars[item]['ansible_fqdn'].split('.')[0] }}:{{ hostvars[item]['hypervisor_hostname'] }}; op monitor interval=30s"
with_items: play_hosts
run_once: true
when: cluster_configure_fence_xvm == true
# {{ ansible_managed }}
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
{% for item in play_hosts %}
{% set short_name = hostvars[item]['ansible_fqdn'].split('.') %}
{{ hostvars[item]['ansible_default_ipv4']['address'] }} {{ short_name[0] }}
{% endfor %}
---
pacemaker_packages:
- fence-agents
- pcs
- pacemaker
- cman
---
pacemaker_packages:
- fence-agents
- pcs
- pacemaker
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment