diff --git a/docs/index.html b/docs/index.html index f66ee683..1cdf7104 100644 --- a/docs/index.html +++ b/docs/index.html @@ -225,6 +225,8 @@
ha_cluster_constraints_ticket
ha_cluster_acls
ha_cluster_alerts
ha_cluster_qnetd
ha_cluster_acls
You may take a look at an example.
+ha_cluster_alerts
structure, default: no alerts
+ha_cluster_alerts:
+ - id: alert1
+ path: /alert1/path
+ description: Alert1 description
+ instance_attrs:
+ - attrs:
+ - name: alert_attr1_name
+ value: alert_attr1_value
+ meta_attrs:
+ - attrs:
+ - name: alert_meta_attr1_name
+ value: alert_meta_attr1_value
+ recipients:
+ - value: recipient_value
+ id: recipient1
+ description: Recipient1 description
+ instance_attrs:
+ - attrs:
+ - name: recipient_attr1_name
+ value: recipient_attr1_value
+ meta_attrs:
+ - attrs:
+ - name: recipient_meta_attr1_name
+ value: recipient_meta_attr1_value
This variable defines Pacemaker alerts.
+The items of alerts
are as follows:
id
(mandatory) - ID of an alert.path
(mandatory) - Path to the alert agent
+executable.description
(optional) - Description of the alert.instance_attrs
(optional) - List of sets of the alert's
+instance attributes. Currently, only one set is supported, so the first
+set is used and the rest are ignored.meta_attrs
(optional) - List of sets of the alert's
+meta attributes. Currently, only one set is supported, so the first set
+is used and the rest are ignored.recipients
(optional) - List of alert's
+recipients.The items of recipients
are as follows:
value
(mandatory) - Value of a recipient.id
(optional) - ID of the recipient.description
(optional) - Description of the
+recipient.instance_attrs
(optional) - List of sets of the
+recipient's instance attributes. Currently, only one set is supported,
+so the first set is used and the rest are ignored.meta_attrs
(optional) - List of sets of the recipient's
+meta attributes. Currently, only one set is supported, so the first set
+is used and the rest are ignored.Note: The role configures the cluster to call +external programs to handle alerts. It is your responsibility to provide +the programs and distribute them to cluster nodes.
+You may take a look at an +example.
ha_cluster_qnetd
structure and default value:
-ha_cluster_qnetd:
- present: boolean
- start_on_boot: boolean
- regenerate_keys: boolean
ha_cluster_qnetd:
+ present: boolean
+ start_on_boot: boolean
+ regenerate_keys: boolean
This configures a qnetd host which can then serve as an external quorum device for clusters. The items are as follows:
Example inventory with targets node1
and
node2
:
all:
- hosts:
- node1:
- ha_cluster:
- node_name: node-A
- pcs_address: node1-address
- corosync_addresses:
- - 192.168.1.11
- - 192.168.2.11
- node2:
- ha_cluster:
- node_name: node-B
- pcs_address: node2-address:2224
- corosync_addresses:
- - 192.168.1.12
- - 192.168.2.12
all:
+ hosts:
+ node1:
+ ha_cluster:
+ node_name: node-A
+ pcs_address: node1-address
+ corosync_addresses:
+ - 192.168.1.11
+ - 192.168.2.11
+ node2:
+ ha_cluster:
+ node_name: node-B
+ pcs_address: node2-address:2224
+ corosync_addresses:
+ - 192.168.1.12
+ - 192.168.2.12
node_name
- the name of a node in a clusterpcs_address
- an address used by pcs to communicate
@@ -1743,28 +1809,28 @@ Example inventory with targets node1
and
node2
:
all:
- hosts:
- node1:
- ha_cluster:
- sbd_watchdog_modules:
- - module1
- - module2
- sbd_watchdog: /dev/watchdog2
- sbd_devices:
- - /dev/vdx
- - /dev/vdy
- node2:
- ha_cluster:
- sbd_watchdog_modules:
- - module1
- sbd_watchdog_modules_blocklist:
- - module2
- sbd_watchdog: /dev/watchdog1
- sbd_devices:
- - /dev/vdw
- - /dev/vdz
all:
+ hosts:
+ node1:
+ ha_cluster:
+ sbd_watchdog_modules:
+ - module1
+ - module2
+ sbd_watchdog: /dev/watchdog2
+ sbd_devices:
+ - /dev/vdx
+ - /dev/vdy
+ node2:
+ ha_cluster:
+ sbd_watchdog_modules:
+ - module1
+ sbd_watchdog_modules_blocklist:
+ - module2
+ sbd_watchdog: /dev/watchdog1
+ sbd_devices:
+ - /dev/vdw
+ - /dev/vdz
sbd_watchdog_modules
(optional) - Watchdog kernel
modules to be loaded (creates /dev/watchdog*
devices).
@@ -1789,583 +1855,583 @@ true
in your playbooks using the ha_cluster
role.
-- name: Manage HA cluster and firewall and selinux
- hosts: node1 node2
- vars:
- ha_cluster_manage_firewall: true
- ha_cluster_manage_selinux: true
-
- roles:
- - linux-system-roles.ha_cluster
- name: Manage HA cluster and firewall and selinux
+ hosts: node1 node2
+ vars:
+ ha_cluster_manage_firewall: true
+ ha_cluster_manage_selinux: true
+
+ roles:
+ - linux-system-roles.ha_cluster
certificate
roleThis example creates self-signed pcsd certificate and private key files in /var/lib/pcsd with the file name FILENAME.crt and FILENAME.key, respectively.
-- name: Manage HA cluster with certificates
- hosts: node1 node2
- vars:
- ha_cluster_pcsd_certificates:
- - name: FILENAME
- common_name: "{{ ansible_hostname }}"
- ca: self-sign
- roles:
- - linux-system-roles.ha_cluster
- name: Manage HA cluster with no resources
+class="sourceCode yaml">- name: Manage HA cluster with certificates
hosts: node1 node2
vars:
- ha_cluster_cluster_name: my-new-cluster
- ha_cluster_hacluster_password: password
-
- roles:
- - linux-system-roles.ha_cluster
- name: Manage HA cluster with Corosync options
+class="sourceCode yaml">- name: Manage HA cluster with no resources
hosts: node1 node2
vars:
ha_cluster_cluster_name: my-new-cluster
ha_cluster_hacluster_password: password
- ha_cluster_transport:
- type: knet
- options:
- - name: ip_version
- value: ipv4-6
- - name: link_mode
- value: active
- links:
- -
- - name: linknumber
- value: 1
- - name: link_priority
- value: 5
- -
- - name: linknumber
- value: 0
- - name: link_priority
- value: 10
- compression:
- - name: level
- value: 5
- - name: model
- value: zlib
- crypto:
- - name: cipher
- value: none
- - name: hash
- value: none
- ha_cluster_totem:
- options:
- - name: block_unlisted_ips
- value: 'yes'
- - name: send_join
- value: 0
- ha_cluster_quorum:
- options:
- - name: auto_tie_breaker
- value: 1
- - name: wait_for_all
- value: 1
-
- roles:
- - linux-system-roles.ha_cluster
- name: Manage HA cluster with Corosync options
+ hosts: node1 node2
+ vars:
+ ha_cluster_cluster_name: my-new-cluster
+ ha_cluster_hacluster_password: password
+ ha_cluster_transport:
+ type: knet
+ options:
+ - name: ip_version
+ value: ipv4-6
+ - name: link_mode
+ value: active
+ links:
+ -
+ - name: linknumber
+ value: 1
+ - name: link_priority
+ value: 5
+ -
+ - name: linknumber
+ value: 0
+ - name: link_priority
+ value: 10
+ compression:
+ - name: level
+ value: 5
+ - name: model
+ value: zlib
+ crypto:
+ - name: cipher
+ value: none
+ - name: hash
+ value: none
+ ha_cluster_totem:
+ options:
+ - name: block_unlisted_ips
+ value: 'yes'
+ - name: send_join
+ value: 0
+ ha_cluster_quorum:
+ options:
+ - name: auto_tie_breaker
+ value: 1
+ - name: wait_for_all
+ value: 1
+
+ roles:
+ - linux-system-roles.ha_cluster
ha_cluster_node_options
variable- hosts: node1 node2
- vars:
- ha_cluster_cluster_name: my-new-cluster
- ha_cluster_hacluster_password: password
- ha_cluster_sbd_enabled: true
- ha_cluster_sbd_options:
- - name: delay-start
- value: 'no'
- - name: startmode
- value: always
- - name: timeout-action
- value: 'flush,reboot'
- - name: watchdog-timeout
- value: 30
- ha_cluster_node_options:
- - node_name: node1
- sbd_watchdog_modules:
- - iTCO_wdt
- sbd_watchdog_modules_blocklist:
- - ipmi_watchdog
- sbd_watchdog: /dev/watchdog1
- sbd_devices:
- - /dev/vdx
- - /dev/vdy
- - /dev/vdz
- - node_name: node2
- sbd_watchdog_modules:
- - iTCO_wdt
- sbd_watchdog_modules_blocklist:
- - ipmi_watchdog
- sbd_watchdog: /dev/watchdog1
- sbd_devices:
- - /dev/vdx
- - /dev/vdy
- - /dev/vdz
- # Best practice for setting SBD timeouts:
- # watchdog-timeout * 2 = msgwait-timeout (set automatically)
- # msgwait-timeout * 1.2 = stonith-timeout
- ha_cluster_cluster_properties:
- - attrs:
- - name: stonith-timeout
- value: 72
- ha_cluster_resource_primitives:
- - id: fence_sbd
- agent: 'stonith:fence_sbd'
- instance_attrs:
- - attrs:
- # taken from host_vars
- - name: devices
- value: "{{ ha_cluster.sbd_devices | join(',') }}"
- - name: pcmk_delay_base
- value: 30
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
+ vars:
+ ha_cluster_cluster_name: my-new-cluster
+ ha_cluster_hacluster_password: password
+ ha_cluster_sbd_enabled: true
+ ha_cluster_sbd_options:
+ - name: delay-start
+ value: 'no'
+ - name: startmode
+ value: always
+ - name: timeout-action
+ value: 'flush,reboot'
+ - name: watchdog-timeout
+ value: 30
+ ha_cluster_node_options:
+ - node_name: node1
+ sbd_watchdog_modules:
+ - iTCO_wdt
+ sbd_watchdog_modules_blocklist:
+ - ipmi_watchdog
+ sbd_watchdog: /dev/watchdog1
+ sbd_devices:
+ - /dev/vdx
+ - /dev/vdy
+ - /dev/vdz
+ - node_name: node2
+ sbd_watchdog_modules:
+ - iTCO_wdt
+ sbd_watchdog_modules_blocklist:
+ - ipmi_watchdog
+ sbd_watchdog: /dev/watchdog1
+ sbd_devices:
+ - /dev/vdx
+ - /dev/vdy
+ - /dev/vdz
+ # Best practice for setting SBD timeouts:
+ # watchdog-timeout * 2 = msgwait-timeout (set automatically)
+ # msgwait-timeout * 1.2 = stonith-timeout
+ ha_cluster_cluster_properties:
+ - attrs:
+ - name: stonith-timeout
+ value: 72
+ ha_cluster_resource_primitives:
+ - id: fence_sbd
+ agent: 'stonith:fence_sbd'
+ instance_attrs:
+ - attrs:
+ # taken from host_vars
+ - name: devices
+ value: "{{ ha_cluster.sbd_devices | join(',') }}"
+ - name: pcmk_delay_base
+ value: 30
+
+ roles:
+ - linux-system-roles.ha_cluster
ha_cluster
variableThe same result can be achieved by specifying node-specific options in inventory like this:
-all:
- hosts:
- node1:
- ha_cluster:
- sbd_watchdog_modules:
- - iTCO_wdt
- sbd_watchdog_modules_blocklist:
- - ipmi_watchdog
- sbd_watchdog: /dev/watchdog1
- sbd_devices:
- - /dev/vdx
- - /dev/vdy
- - /dev/vdz
- node2:
- ha_cluster:
- sbd_watchdog_modules:
- - iTCO_wdt
- sbd_watchdog_modules_blocklist:
- - ipmi_watchdog
- sbd_watchdog: /dev/watchdog1
- sbd_devices:
- - /dev/vdx
- - /dev/vdy
- - /dev/vdz
all:
+ hosts:
+ node1:
+ ha_cluster:
+ sbd_watchdog_modules:
+ - iTCO_wdt
+ sbd_watchdog_modules_blocklist:
+ - ipmi_watchdog
+ sbd_watchdog: /dev/watchdog1
+ sbd_devices:
+ - /dev/vdx
+ - /dev/vdy
+ - /dev/vdz
+ node2:
+ ha_cluster:
+ sbd_watchdog_modules:
+ - iTCO_wdt
+ sbd_watchdog_modules_blocklist:
+ - ipmi_watchdog
+ sbd_watchdog: /dev/watchdog1
+ sbd_devices:
+ - /dev/vdx
+ - /dev/vdy
+ - /dev/vdz
Variables specified in inventory can be omitted when writing the playbook:
-- hosts: node1 node2
- vars:
- ha_cluster_cluster_name: my-new-cluster
- ha_cluster_hacluster_password: password
- ha_cluster_sbd_enabled: true
- ha_cluster_sbd_options:
- - name: delay-start
- value: 'no'
- - name: startmode
- value: always
- - name: timeout-action
- value: 'flush,reboot'
- - name: watchdog-timeout
- value: 30
- # Best practice for setting SBD timeouts:
- # watchdog-timeout * 2 = msgwait-timeout (set automatically)
- # msgwait-timeout * 1.2 = stonith-timeout
- ha_cluster_cluster_properties:
- - attrs:
- - name: stonith-timeout
- value: 72
- ha_cluster_resource_primitives:
- - id: fence_sbd
- agent: 'stonith:fence_sbd'
- instance_attrs:
- - attrs:
- # taken from host_vars
- - name: devices
- value: "{{ ha_cluster.sbd_devices | join(',') }}"
- - name: pcmk_delay_base
- value: 30
-
- roles:
- - linux-system-roles.ha_cluster
If both the ha_cluster_node_options
and
-ha_cluster
variables contain SBD options, those in
-ha_cluster_node_options
have precedence.
- hosts: node1 node2
vars:
ha_cluster_cluster_name: my-new-cluster
ha_cluster_hacluster_password: password
- ha_cluster_cluster_properties:
- - attrs:
- - name: stonith-enabled
- value: 'true'
- - name: no-quorum-policy
- value: stop
-
- roles:
- - linux-system-roles.ha_cluster
If both the ha_cluster_node_options
and
+ha_cluster
variables contain SBD options, those in
+ha_cluster_node_options
have precedence.
- hosts: node1 node2
vars:
ha_cluster_cluster_name: my-new-cluster
ha_cluster_hacluster_password: password
- ha_cluster_resource_primitives:
- - id: xvm-fencing
- agent: 'stonith:fence_xvm'
- instance_attrs:
- - attrs:
- - name: pcmk_host_list
- value: node1 node2
- - id: simple-resource
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: resource-with-options
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- instance_attrs:
- - attrs:
- - name: fake
- value: fake-value
- - name: passwd
- value: passwd-value
- meta_attrs:
- - attrs:
- - name: target-role
- value: Started
- - name: is-managed
- value: 'true'
- operations:
- - action: start
- attrs:
- - name: timeout
- value: '30s'
- - action: monitor
- attrs:
- - name: timeout
- value: '5'
- - name: interval
- value: '1min'
- - id: example-1
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: example-2
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: example-3
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: simple-clone
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: clone-with-options
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: bundled-resource
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- ha_cluster_resource_groups:
- - id: simple-group
- resource_ids:
- - example-1
- - example-2
- meta_attrs:
- - attrs:
- - name: target-role
- value: Started
- - name: is-managed
- value: 'true'
- - id: cloned-group
- resource_ids:
- - example-3
- ha_cluster_resource_clones:
- - resource_id: simple-clone
- - resource_id: clone-with-options
- promotable: true
- id: custom-clone-id
- meta_attrs:
- - attrs:
- - name: clone-max
- value: '2'
- - name: clone-node-max
- value: '1'
- - resource_id: cloned-group
- promotable: true
- ha_cluster_resource_bundles:
- - id: bundle-with-resource
- resource-id: bundled-resource
- container:
- type: podman
- options:
- - name: image
- value: my:image
- network_options:
- - name: control-port
- value: 3121
- port_map:
- -
- - name: port
- value: 10001
- -
- - name: port
- value: 10002
- - name: internal-port
- value: 10003
- storage_map:
- -
- - name: source-dir
- value: /srv/daemon-data
- - name: target-dir
- value: /var/daemon/data
- -
- - name: source-dir-root
- value: /var/log/pacemaker/bundles
- - name: target-dir
- value: /var/log/daemon
- meta_attrs:
- - attrs:
- - name: target-role
- value: Started
- - name: is-managed
- value: 'true'
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
vars:
ha_cluster_cluster_name: my-new-cluster
ha_cluster_hacluster_password: password
- # Set a different `resource-stickiness` value during and outside work
- # hours. This allows resources to automatically move back to their most
- # preferred hosts, but at a time that (in theory) does not interfere with
- # business activities.
- ha_cluster_resource_defaults:
- meta_attrs:
- - id: core-hours
- rule: date-spec hours=9-16 weekdays=1-5
- score: 2
- attrs:
- - name: resource-stickiness
- value: INFINITY
- - id: after-hours
- score: 1
- attrs:
- - name: resource-stickiness
- value: 0
- # Default the timeout on all 10-second-interval monitor actions on IPaddr2
- # resources to 8 seconds.
- ha_cluster_resource_operation_defaults:
- meta_attrs:
- - rule: resource ::IPaddr2 and op monitor interval=10s
- score: INFINITY
- attrs:
- - name: timeout
- value: 8s
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
vars:
ha_cluster_cluster_name: my-new-cluster
ha_cluster_hacluster_password: password
- ha_cluster_resource_primitives:
- - id: apc1
- agent: 'stonith:fence_apc_snmp'
- instance_attrs:
- - attrs:
- - name: ip
- value: apc1.example.com
- - name: username
- value: user
- - name: password
- value: secret
- - name: pcmk_host_map
- value: node1:1;node2:2
- - id: apc2
- agent: 'stonith:fence_apc_snmp'
- instance_attrs:
- - attrs:
- - name: ip
- value: apc2.example.com
- - name: username
- value: user
- - name: password
- value: secret
- - name: pcmk_host_map
- value: node1:1;node2:2
- # Nodes have redundant power supplies, apc1 and apc2. Cluster must ensure
- # that when attempting to reboot a node, both power supplies are turned off
- # before either power supply is turned back on.
- ha_cluster_stonith_levels:
- - level: 1
- target: node1
- resource_ids:
- - apc1
- - apc2
- - level: 1
- target: node2
- resource_ids:
- - apc1
- - apc2
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
vars:
ha_cluster_cluster_name: my-new-cluster
ha_cluster_hacluster_password: password
- # In order to use constraints, we need resources the constraints will apply
- # to.
- ha_cluster_resource_primitives:
- - id: xvm-fencing
- agent: 'stonith:fence_xvm'
- instance_attrs:
- - attrs:
- - name: pcmk_host_list
- value: node1 node2
- - id: example-1
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: example-2
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: example-3
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: example-4
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: example-5
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: example-6
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- # location constraints
- ha_cluster_constraints_location:
- # resource ID and node name
- - resource:
- id: example-1
- node: node1
- options:
- - name: score
- value: 20
- # resource pattern and node name
- - resource:
- pattern: example-\d+
- node: node1
- options:
- - name: score
- value: 10
- # resource ID and rule
- - resource:
- id: example-2
- rule: '#uname eq node2 and date in_range 2022-01-01 to 2022-02-28'
- # resource pattern and rule
- - resource:
- pattern: example-\d+
- rule: node-type eq weekend and date-spec weekdays=6-7
- # colocation constraints
- ha_cluster_constraints_colocation:
- # simple constraint
- - resource_leader:
- id: example-3
- resource_follower:
- id: example-4
- options:
- - name: score
- value: -5
- # set constraint
- - resource_sets:
- - resource_ids:
- - example-1
- - example-2
- - resource_ids:
- - example-5
- - example-6
- options:
- - name: sequential
- value: "false"
- options:
- - name: score
- value: 20
- # order constraints
- ha_cluster_constraints_order:
- # simple constraint
- - resource_first:
- id: example-1
- resource_then:
- id: example-6
- options:
- - name: symmetrical
- value: "false"
- # set constraint
- - resource_sets:
- - resource_ids:
- - example-1
- - example-2
- options:
- - name: require-all
- value: "false"
- - name: sequential
- value: "false"
- - resource_ids:
- - example-3
- - resource_ids:
- - example-4
- - example-5
- options:
- - name: sequential
- value: "false"
- # ticket constraints
- ha_cluster_constraints_ticket:
- # simple constraint
- - resource:
- id: example-1
- ticket: ticket1
- options:
- - name: loss-policy
- value: stop
- # set constraint
- - resource_sets:
- - resource_ids:
- - example-3
- - example-4
- - example-5
- ticket: ticket2
- options:
- - name: loss-policy
- value: fence
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
+ vars:
+ ha_cluster_cluster_name: my-new-cluster
+ ha_cluster_hacluster_password: password
+ # In order to use constraints, we need resources the constraints will apply
+ # to.
+ ha_cluster_resource_primitives:
+ - id: xvm-fencing
+ agent: 'stonith:fence_xvm'
+ instance_attrs:
+ - attrs:
+ - name: pcmk_host_list
+ value: node1 node2
+ - id: example-1
+ # wokeignore:rule=dummy
+ agent: 'ocf:pacemaker:Dummy'
+ - id: example-2
+ # wokeignore:rule=dummy
+ agent: 'ocf:pacemaker:Dummy'
+ - id: example-3
+ # wokeignore:rule=dummy
+ agent: 'ocf:pacemaker:Dummy'
+ - id: example-4
+ # wokeignore:rule=dummy
+ agent: 'ocf:pacemaker:Dummy'
+ - id: example-5
+ # wokeignore:rule=dummy
+ agent: 'ocf:pacemaker:Dummy'
+ - id: example-6
+ # wokeignore:rule=dummy
+ agent: 'ocf:pacemaker:Dummy'
+ # location constraints
+ ha_cluster_constraints_location:
+ # resource ID and node name
+ - resource:
+ id: example-1
+ node: node1
+ options:
+ - name: score
+ value: 20
+ # resource pattern and node name
+ - resource:
+ pattern: example-\d+
+ node: node1
+ options:
+ - name: score
+ value: 10
+ # resource ID and rule
+ - resource:
+ id: example-2
+ rule: '#uname eq node2 and date in_range 2022-01-01 to 2022-02-28'
+ # resource pattern and rule
+ - resource:
+ pattern: example-\d+
+ rule: node-type eq weekend and date-spec weekdays=6-7
+ # colocation constraints
+ ha_cluster_constraints_colocation:
+ # simple constraint
+ - resource_leader:
+ id: example-3
+ resource_follower:
+ id: example-4
+ options:
+ - name: score
+ value: -5
+ # set constraint
+ - resource_sets:
+ - resource_ids:
+ - example-1
+ - example-2
+ - resource_ids:
+ - example-5
+ - example-6
+ options:
+ - name: sequential
+ value: "false"
+ options:
+ - name: score
+ value: 20
+ # order constraints
+ ha_cluster_constraints_order:
+ # simple constraint
+ - resource_first:
+ id: example-1
+ resource_then:
+ id: example-6
+ options:
+ - name: symmetrical
+ value: "false"
+ # set constraint
+ - resource_sets:
+ - resource_ids:
+ - example-1
+ - example-2
+ options:
+ - name: require-all
+ value: "false"
+ - name: sequential
+ value: "false"
+ - resource_ids:
+ - example-3
+ - resource_ids:
+ - example-4
+ - example-5
+ options:
+ - name: sequential
+ value: "false"
+ # ticket constraints
+ ha_cluster_constraints_ticket:
+ # simple constraint
+ - resource:
+ id: example-1
+ ticket: ticket1
+ options:
+ - name: loss-policy
+ value: stop
+ # set constraint
+ - resource_sets:
+ - resource_ids:
+ - example-3
+ - example-4
+ - example-5
+ ticket: ticket2
+ options:
+ - name: loss-policy
+ value: fence
+
+ roles:
+ - linux-system-roles.ha_cluster
Note that you cannot run a quorum device on a cluster node.
-- hosts: nodeQ
- vars:
- ha_cluster_cluster_present: false
- ha_cluster_hacluster_password: password
- ha_cluster_qnetd:
- present: true
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
+class="sourceCode yaml">- hosts: nodeQ
vars:
- ha_cluster_cluster_name: my-new-cluster
+ ha_cluster_cluster_present: false
ha_cluster_hacluster_password: password
- ha_cluster_quorum:
- device:
- model: net
- model_options:
- - name: host
- value: nodeQ
- - name: algorithm
- value: lms
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
vars:
ha_cluster_cluster_name: my-new-cluster
ha_cluster_hacluster_password: password
- ha_cluster_node_options:
- - node_name: node1
- attributes:
- - attrs:
- - name: attribute1
- value: value1A
- - name: attribute2
- value: value2A
- - node_name: node2
- attributes:
- - attrs:
- - name: attribute1
- value: value1B
- - name: attribute2
- value: value2B
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
vars:
ha_cluster_cluster_name: my-new-cluster
ha_cluster_hacluster_password: password
- # To use an ACL role permission reference, the reference must exist in CIB.
- ha_cluster_resource_primitives:
- - id: not-for-operator
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- # ACLs must be enabled (using the enable-acl cluster property) in order to
- # be effective.
- ha_cluster_cluster_properties:
- - attrs:
- - name: enable-acl
- value: 'true'
- ha_cluster_acls:
- acl_roles:
- - id: operator
- description: HA cluster operator
- permissions:
- - kind: write
- xpath: //crm_config//nvpair[@name='maintenance-mode']
- - kind: deny
- reference: not-for-operator
- - id: administrator
- permissions:
- - kind: write
- xpath: /cib
- acl_users:
- - id: alice
- roles:
- - operator
- - administrator
- - id: bob
- roles:
- - administrator
- acl_groups:
- - id: admins
- roles:
- - administrator
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
vars:
ha_cluster_cluster_name: my-new-cluster
ha_cluster_hacluster_password: password
- # For utilization to have an effect, the `placement-strategy` property
- # must be set and its value must be different from the value `default`.
- ha_cluster_cluster_properties:
- - attrs:
- - name: placement-strategy
- value: utilization
- ha_cluster_node_options:
- - node_name: node1
- utilization:
- - attrs:
- - name: utilization1
- value: 1
- - name: utilization2
- value: 2
- - node_name: node2
- utilization:
- - attrs:
- - name: utilization1
- value: 3
- - name: utilization2
- value: 4
- ha_cluster_resource_primitives:
- - id: resource1
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- utilization:
- - attrs:
- - name: utilization1
- value: 2
- - name: utilization2
- value: 3
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
vars:
- ha_cluster_cluster_present: false
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
+ vars:
+ ha_cluster_cluster_name: my-new-cluster
+ ha_cluster_hacluster_password: password
+ ha_cluster_alerts:
+ - id: alert1
+ path: /alert1/path
+ description: Alert1 description
+ instance_attrs:
+ - attrs:
+ - name: alert_attr1_name
+ value: alert_attr1_value
+ meta_attrs:
+ - attrs:
+ - name: alert_meta_attr1_name
+ value: alert_meta_attr1_value
+ recipients:
+ - value: recipient_value
+ id: recipient1
+ description: Recipient1 description
+ instance_attrs:
+ - attrs:
+ - name: recipient_attr1_name
+ value: recipient_attr1_value
+ meta_attrs:
+ - attrs:
+ - name: recipient_meta_attr1_name
+ value: recipient_meta_attr1_value
+
+ roles:
+ - linux-system-roles.ha_cluster
- hosts: node1 node2
+ vars:
+ ha_cluster_cluster_present: false
+
+ roles:
+ - linux-system-roles.ha_cluster
See README-ostree.md
ha_cluster_constraints_ticket
ha_cluster_acls
ha_cluster_alerts
ha_cluster_qnetd
ha_cluster_acls
You may take a look at an example.
+ha_cluster_alerts
structure, default: no alerts
+ha_cluster_alerts:
+ - id: alert1
+ path: /alert1/path
+ description: Alert1 description
+ instance_attrs:
+ - attrs:
+ - name: alert_attr1_name
+ value: alert_attr1_value
+ meta_attrs:
+ - attrs:
+ - name: alert_meta_attr1_name
+ value: alert_meta_attr1_value
+ recipients:
+ - value: recipient_value
+ id: recipient1
+ description: Recipient1 description
+ instance_attrs:
+ - attrs:
+ - name: recipient_attr1_name
+ value: recipient_attr1_value
+ meta_attrs:
+ - attrs:
+ - name: recipient_meta_attr1_name
+ value: recipient_meta_attr1_value
This variable defines Pacemaker alerts.
+The items of alerts
are as follows:
id
(mandatory) - ID of an alert.path
(mandatory) - Path to the alert agent
+executable.description
(optional) - Description of the alert.instance_attrs
(optional) - List of sets of the alert's
+instance attributes. Currently, only one set is supported, so the first
+set is used and the rest are ignored.meta_attrs
(optional) - List of sets of the alert's
+meta attributes. Currently, only one set is supported, so the first set
+is used and the rest are ignored.recipients
(optional) - List of alert's
+recipients.The items of recipients
are as follows:
value
(mandatory) - Value of a recipient.id
(optional) - ID of the recipient.description
(optional) - Description of the
+recipient.instance_attrs
(optional) - List of sets of the
+recipient's instance attributes. Currently, only one set is supported,
+so the first set is used and the rest are ignored.meta_attrs
(optional) - List of sets of the recipient's
+meta attributes. Currently, only one set is supported, so the first set
+is used and the rest are ignored.Note: The role configures the cluster to call +external programs to handle alerts. It is your responsibility to provide +the programs and distribute them to cluster nodes.
+You may take a look at an +example.
ha_cluster_qnetd
structure and default value:
-ha_cluster_qnetd:
- present: boolean
- start_on_boot: boolean
- regenerate_keys: boolean
ha_cluster_qnetd:
+ present: boolean
+ start_on_boot: boolean
+ regenerate_keys: boolean
This configures a qnetd host which can then serve as an external quorum device for clusters. The items are as follows:
Example inventory with targets node1
and
node2
:
all:
- hosts:
- node1:
- ha_cluster:
- node_name: node-A
- pcs_address: node1-address
- corosync_addresses:
- - 192.168.1.11
- - 192.168.2.11
- node2:
- ha_cluster:
- node_name: node-B
- pcs_address: node2-address:2224
- corosync_addresses:
- - 192.168.1.12
- - 192.168.2.12
all:
+ hosts:
+ node1:
+ ha_cluster:
+ node_name: node-A
+ pcs_address: node1-address
+ corosync_addresses:
+ - 192.168.1.11
+ - 192.168.2.11
+ node2:
+ ha_cluster:
+ node_name: node-B
+ pcs_address: node2-address:2224
+ corosync_addresses:
+ - 192.168.1.12
+ - 192.168.2.12
node_name
- the name of a node in a clusterpcs_address
- an address used by pcs to communicate
@@ -1743,28 +1809,28 @@ Example inventory with targets node1
and
node2
:
all:
- hosts:
- node1:
- ha_cluster:
- sbd_watchdog_modules:
- - module1
- - module2
- sbd_watchdog: /dev/watchdog2
- sbd_devices:
- - /dev/vdx
- - /dev/vdy
- node2:
- ha_cluster:
- sbd_watchdog_modules:
- - module1
- sbd_watchdog_modules_blocklist:
- - module2
- sbd_watchdog: /dev/watchdog1
- sbd_devices:
- - /dev/vdw
- - /dev/vdz
all:
+ hosts:
+ node1:
+ ha_cluster:
+ sbd_watchdog_modules:
+ - module1
+ - module2
+ sbd_watchdog: /dev/watchdog2
+ sbd_devices:
+ - /dev/vdx
+ - /dev/vdy
+ node2:
+ ha_cluster:
+ sbd_watchdog_modules:
+ - module1
+ sbd_watchdog_modules_blocklist:
+ - module2
+ sbd_watchdog: /dev/watchdog1
+ sbd_devices:
+ - /dev/vdw
+ - /dev/vdz
sbd_watchdog_modules
(optional) - Watchdog kernel
modules to be loaded (creates /dev/watchdog*
devices).
@@ -1789,583 +1855,583 @@ true
in your playbooks using the ha_cluster
role.
-- name: Manage HA cluster and firewall and selinux
- hosts: node1 node2
- vars:
- ha_cluster_manage_firewall: true
- ha_cluster_manage_selinux: true
-
- roles:
- - linux-system-roles.ha_cluster
- name: Manage HA cluster and firewall and selinux
+ hosts: node1 node2
+ vars:
+ ha_cluster_manage_firewall: true
+ ha_cluster_manage_selinux: true
+
+ roles:
+ - linux-system-roles.ha_cluster
certificate
roleThis example creates self-signed pcsd certificate and private key files in /var/lib/pcsd with the file name FILENAME.crt and FILENAME.key, respectively.
-- name: Manage HA cluster with certificates
- hosts: node1 node2
- vars:
- ha_cluster_pcsd_certificates:
- - name: FILENAME
- common_name: "{{ ansible_hostname }}"
- ca: self-sign
- roles:
- - linux-system-roles.ha_cluster
- name: Manage HA cluster with no resources
+class="sourceCode yaml">- name: Manage HA cluster with certificates
hosts: node1 node2
vars:
- ha_cluster_cluster_name: my-new-cluster
- ha_cluster_hacluster_password: password
-
- roles:
- - linux-system-roles.ha_cluster
- name: Manage HA cluster with Corosync options
+class="sourceCode yaml">- name: Manage HA cluster with no resources
hosts: node1 node2
vars:
ha_cluster_cluster_name: my-new-cluster
ha_cluster_hacluster_password: password
- ha_cluster_transport:
- type: knet
- options:
- - name: ip_version
- value: ipv4-6
- - name: link_mode
- value: active
- links:
- -
- - name: linknumber
- value: 1
- - name: link_priority
- value: 5
- -
- - name: linknumber
- value: 0
- - name: link_priority
- value: 10
- compression:
- - name: level
- value: 5
- - name: model
- value: zlib
- crypto:
- - name: cipher
- value: none
- - name: hash
- value: none
- ha_cluster_totem:
- options:
- - name: block_unlisted_ips
- value: 'yes'
- - name: send_join
- value: 0
- ha_cluster_quorum:
- options:
- - name: auto_tie_breaker
- value: 1
- - name: wait_for_all
- value: 1
-
- roles:
- - linux-system-roles.ha_cluster
- name: Manage HA cluster with Corosync options
+ hosts: node1 node2
+ vars:
+ ha_cluster_cluster_name: my-new-cluster
+ ha_cluster_hacluster_password: password
+ ha_cluster_transport:
+ type: knet
+ options:
+ - name: ip_version
+ value: ipv4-6
+ - name: link_mode
+ value: active
+ links:
+ -
+ - name: linknumber
+ value: 1
+ - name: link_priority
+ value: 5
+ -
+ - name: linknumber
+ value: 0
+ - name: link_priority
+ value: 10
+ compression:
+ - name: level
+ value: 5
+ - name: model
+ value: zlib
+ crypto:
+ - name: cipher
+ value: none
+ - name: hash
+ value: none
+ ha_cluster_totem:
+ options:
+ - name: block_unlisted_ips
+ value: 'yes'
+ - name: send_join
+ value: 0
+ ha_cluster_quorum:
+ options:
+ - name: auto_tie_breaker
+ value: 1
+ - name: wait_for_all
+ value: 1
+
+ roles:
+ - linux-system-roles.ha_cluster
ha_cluster_node_options
variable- hosts: node1 node2
- vars:
- ha_cluster_cluster_name: my-new-cluster
- ha_cluster_hacluster_password: password
- ha_cluster_sbd_enabled: true
- ha_cluster_sbd_options:
- - name: delay-start
- value: 'no'
- - name: startmode
- value: always
- - name: timeout-action
- value: 'flush,reboot'
- - name: watchdog-timeout
- value: 30
- ha_cluster_node_options:
- - node_name: node1
- sbd_watchdog_modules:
- - iTCO_wdt
- sbd_watchdog_modules_blocklist:
- - ipmi_watchdog
- sbd_watchdog: /dev/watchdog1
- sbd_devices:
- - /dev/vdx
- - /dev/vdy
- - /dev/vdz
- - node_name: node2
- sbd_watchdog_modules:
- - iTCO_wdt
- sbd_watchdog_modules_blocklist:
- - ipmi_watchdog
- sbd_watchdog: /dev/watchdog1
- sbd_devices:
- - /dev/vdx
- - /dev/vdy
- - /dev/vdz
- # Best practice for setting SBD timeouts:
- # watchdog-timeout * 2 = msgwait-timeout (set automatically)
- # msgwait-timeout * 1.2 = stonith-timeout
- ha_cluster_cluster_properties:
- - attrs:
- - name: stonith-timeout
- value: 72
- ha_cluster_resource_primitives:
- - id: fence_sbd
- agent: 'stonith:fence_sbd'
- instance_attrs:
- - attrs:
- # taken from host_vars
- - name: devices
- value: "{{ ha_cluster.sbd_devices | join(',') }}"
- - name: pcmk_delay_base
- value: 30
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
+ vars:
+ ha_cluster_cluster_name: my-new-cluster
+ ha_cluster_hacluster_password: password
+ ha_cluster_sbd_enabled: true
+ ha_cluster_sbd_options:
+ - name: delay-start
+ value: 'no'
+ - name: startmode
+ value: always
+ - name: timeout-action
+ value: 'flush,reboot'
+ - name: watchdog-timeout
+ value: 30
+ ha_cluster_node_options:
+ - node_name: node1
+ sbd_watchdog_modules:
+ - iTCO_wdt
+ sbd_watchdog_modules_blocklist:
+ - ipmi_watchdog
+ sbd_watchdog: /dev/watchdog1
+ sbd_devices:
+ - /dev/vdx
+ - /dev/vdy
+ - /dev/vdz
+ - node_name: node2
+ sbd_watchdog_modules:
+ - iTCO_wdt
+ sbd_watchdog_modules_blocklist:
+ - ipmi_watchdog
+ sbd_watchdog: /dev/watchdog1
+ sbd_devices:
+ - /dev/vdx
+ - /dev/vdy
+ - /dev/vdz
+ # Best practice for setting SBD timeouts:
+ # watchdog-timeout * 2 = msgwait-timeout (set automatically)
+ # msgwait-timeout * 1.2 = stonith-timeout
+ ha_cluster_cluster_properties:
+ - attrs:
+ - name: stonith-timeout
+ value: 72
+ ha_cluster_resource_primitives:
+ - id: fence_sbd
+ agent: 'stonith:fence_sbd'
+ instance_attrs:
+ - attrs:
+ # taken from host_vars
+ - name: devices
+ value: "{{ ha_cluster.sbd_devices | join(',') }}"
+ - name: pcmk_delay_base
+ value: 30
+
+ roles:
+ - linux-system-roles.ha_cluster
ha_cluster
variableThe same result can be achieved by specifying node-specific options in inventory like this:
-all:
- hosts:
- node1:
- ha_cluster:
- sbd_watchdog_modules:
- - iTCO_wdt
- sbd_watchdog_modules_blocklist:
- - ipmi_watchdog
- sbd_watchdog: /dev/watchdog1
- sbd_devices:
- - /dev/vdx
- - /dev/vdy
- - /dev/vdz
- node2:
- ha_cluster:
- sbd_watchdog_modules:
- - iTCO_wdt
- sbd_watchdog_modules_blocklist:
- - ipmi_watchdog
- sbd_watchdog: /dev/watchdog1
- sbd_devices:
- - /dev/vdx
- - /dev/vdy
- - /dev/vdz
all:
+ hosts:
+ node1:
+ ha_cluster:
+ sbd_watchdog_modules:
+ - iTCO_wdt
+ sbd_watchdog_modules_blocklist:
+ - ipmi_watchdog
+ sbd_watchdog: /dev/watchdog1
+ sbd_devices:
+ - /dev/vdx
+ - /dev/vdy
+ - /dev/vdz
+ node2:
+ ha_cluster:
+ sbd_watchdog_modules:
+ - iTCO_wdt
+ sbd_watchdog_modules_blocklist:
+ - ipmi_watchdog
+ sbd_watchdog: /dev/watchdog1
+ sbd_devices:
+ - /dev/vdx
+ - /dev/vdy
+ - /dev/vdz
Variables specified in inventory can be omitted when writing the playbook:
-- hosts: node1 node2
- vars:
- ha_cluster_cluster_name: my-new-cluster
- ha_cluster_hacluster_password: password
- ha_cluster_sbd_enabled: true
- ha_cluster_sbd_options:
- - name: delay-start
- value: 'no'
- - name: startmode
- value: always
- - name: timeout-action
- value: 'flush,reboot'
- - name: watchdog-timeout
- value: 30
- # Best practice for setting SBD timeouts:
- # watchdog-timeout * 2 = msgwait-timeout (set automatically)
- # msgwait-timeout * 1.2 = stonith-timeout
- ha_cluster_cluster_properties:
- - attrs:
- - name: stonith-timeout
- value: 72
- ha_cluster_resource_primitives:
- - id: fence_sbd
- agent: 'stonith:fence_sbd'
- instance_attrs:
- - attrs:
- # taken from host_vars
- - name: devices
- value: "{{ ha_cluster.sbd_devices | join(',') }}"
- - name: pcmk_delay_base
- value: 30
-
- roles:
- - linux-system-roles.ha_cluster
If both the ha_cluster_node_options
and
-ha_cluster
variables contain SBD options, those in
-ha_cluster_node_options
have precedence.
- hosts: node1 node2
vars:
ha_cluster_cluster_name: my-new-cluster
ha_cluster_hacluster_password: password
- ha_cluster_cluster_properties:
- - attrs:
- - name: stonith-enabled
- value: 'true'
- - name: no-quorum-policy
- value: stop
-
- roles:
- - linux-system-roles.ha_cluster
If both the ha_cluster_node_options
and
+ha_cluster
variables contain SBD options, those in
+ha_cluster_node_options
have precedence.
- hosts: node1 node2
vars:
ha_cluster_cluster_name: my-new-cluster
ha_cluster_hacluster_password: password
- ha_cluster_resource_primitives:
- - id: xvm-fencing
- agent: 'stonith:fence_xvm'
- instance_attrs:
- - attrs:
- - name: pcmk_host_list
- value: node1 node2
- - id: simple-resource
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: resource-with-options
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- instance_attrs:
- - attrs:
- - name: fake
- value: fake-value
- - name: passwd
- value: passwd-value
- meta_attrs:
- - attrs:
- - name: target-role
- value: Started
- - name: is-managed
- value: 'true'
- operations:
- - action: start
- attrs:
- - name: timeout
- value: '30s'
- - action: monitor
- attrs:
- - name: timeout
- value: '5'
- - name: interval
- value: '1min'
- - id: example-1
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: example-2
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: example-3
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: simple-clone
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: clone-with-options
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: bundled-resource
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- ha_cluster_resource_groups:
- - id: simple-group
- resource_ids:
- - example-1
- - example-2
- meta_attrs:
- - attrs:
- - name: target-role
- value: Started
- - name: is-managed
- value: 'true'
- - id: cloned-group
- resource_ids:
- - example-3
- ha_cluster_resource_clones:
- - resource_id: simple-clone
- - resource_id: clone-with-options
- promotable: true
- id: custom-clone-id
- meta_attrs:
- - attrs:
- - name: clone-max
- value: '2'
- - name: clone-node-max
- value: '1'
- - resource_id: cloned-group
- promotable: true
- ha_cluster_resource_bundles:
- - id: bundle-with-resource
- resource-id: bundled-resource
- container:
- type: podman
- options:
- - name: image
- value: my:image
- network_options:
- - name: control-port
- value: 3121
- port_map:
- -
- - name: port
- value: 10001
- -
- - name: port
- value: 10002
- - name: internal-port
- value: 10003
- storage_map:
- -
- - name: source-dir
- value: /srv/daemon-data
- - name: target-dir
- value: /var/daemon/data
- -
- - name: source-dir-root
- value: /var/log/pacemaker/bundles
- - name: target-dir
- value: /var/log/daemon
- meta_attrs:
- - attrs:
- - name: target-role
- value: Started
- - name: is-managed
- value: 'true'
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
vars:
ha_cluster_cluster_name: my-new-cluster
ha_cluster_hacluster_password: password
- # Set a different `resource-stickiness` value during and outside work
- # hours. This allows resources to automatically move back to their most
- # preferred hosts, but at a time that (in theory) does not interfere with
- # business activities.
- ha_cluster_resource_defaults:
- meta_attrs:
- - id: core-hours
- rule: date-spec hours=9-16 weekdays=1-5
- score: 2
- attrs:
- - name: resource-stickiness
- value: INFINITY
- - id: after-hours
- score: 1
- attrs:
- - name: resource-stickiness
- value: 0
- # Default the timeout on all 10-second-interval monitor actions on IPaddr2
- # resources to 8 seconds.
- ha_cluster_resource_operation_defaults:
- meta_attrs:
- - rule: resource ::IPaddr2 and op monitor interval=10s
- score: INFINITY
- attrs:
- - name: timeout
- value: 8s
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
vars:
ha_cluster_cluster_name: my-new-cluster
ha_cluster_hacluster_password: password
- ha_cluster_resource_primitives:
- - id: apc1
- agent: 'stonith:fence_apc_snmp'
- instance_attrs:
- - attrs:
- - name: ip
- value: apc1.example.com
- - name: username
- value: user
- - name: password
- value: secret
- - name: pcmk_host_map
- value: node1:1;node2:2
- - id: apc2
- agent: 'stonith:fence_apc_snmp'
- instance_attrs:
- - attrs:
- - name: ip
- value: apc2.example.com
- - name: username
- value: user
- - name: password
- value: secret
- - name: pcmk_host_map
- value: node1:1;node2:2
- # Nodes have redundant power supplies, apc1 and apc2. Cluster must ensure
- # that when attempting to reboot a node, both power supplies are turned off
- # before either power supply is turned back on.
- ha_cluster_stonith_levels:
- - level: 1
- target: node1
- resource_ids:
- - apc1
- - apc2
- - level: 1
- target: node2
- resource_ids:
- - apc1
- - apc2
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
vars:
ha_cluster_cluster_name: my-new-cluster
ha_cluster_hacluster_password: password
- # In order to use constraints, we need resources the constraints will apply
- # to.
- ha_cluster_resource_primitives:
- - id: xvm-fencing
- agent: 'stonith:fence_xvm'
- instance_attrs:
- - attrs:
- - name: pcmk_host_list
- value: node1 node2
- - id: example-1
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: example-2
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: example-3
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: example-4
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: example-5
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- - id: example-6
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- # location constraints
- ha_cluster_constraints_location:
- # resource ID and node name
- - resource:
- id: example-1
- node: node1
- options:
- - name: score
- value: 20
- # resource pattern and node name
- - resource:
- pattern: example-\d+
- node: node1
- options:
- - name: score
- value: 10
- # resource ID and rule
- - resource:
- id: example-2
- rule: '#uname eq node2 and date in_range 2022-01-01 to 2022-02-28'
- # resource pattern and rule
- - resource:
- pattern: example-\d+
- rule: node-type eq weekend and date-spec weekdays=6-7
- # colocation constraints
- ha_cluster_constraints_colocation:
- # simple constraint
- - resource_leader:
- id: example-3
- resource_follower:
- id: example-4
- options:
- - name: score
- value: -5
- # set constraint
- - resource_sets:
- - resource_ids:
- - example-1
- - example-2
- - resource_ids:
- - example-5
- - example-6
- options:
- - name: sequential
- value: "false"
- options:
- - name: score
- value: 20
- # order constraints
- ha_cluster_constraints_order:
- # simple constraint
- - resource_first:
- id: example-1
- resource_then:
- id: example-6
- options:
- - name: symmetrical
- value: "false"
- # set constraint
- - resource_sets:
- - resource_ids:
- - example-1
- - example-2
- options:
- - name: require-all
- value: "false"
- - name: sequential
- value: "false"
- - resource_ids:
- - example-3
- - resource_ids:
- - example-4
- - example-5
- options:
- - name: sequential
- value: "false"
- # ticket constraints
- ha_cluster_constraints_ticket:
- # simple constraint
- - resource:
- id: example-1
- ticket: ticket1
- options:
- - name: loss-policy
- value: stop
- # set constraint
- - resource_sets:
- - resource_ids:
- - example-3
- - example-4
- - example-5
- ticket: ticket2
- options:
- - name: loss-policy
- value: fence
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
+ vars:
+ ha_cluster_cluster_name: my-new-cluster
+ ha_cluster_hacluster_password: password
+ # In order to use constraints, we need resources the constraints will apply
+ # to.
+ ha_cluster_resource_primitives:
+ - id: xvm-fencing
+ agent: 'stonith:fence_xvm'
+ instance_attrs:
+ - attrs:
+ - name: pcmk_host_list
+ value: node1 node2
+ - id: example-1
+ # wokeignore:rule=dummy
+ agent: 'ocf:pacemaker:Dummy'
+ - id: example-2
+ # wokeignore:rule=dummy
+ agent: 'ocf:pacemaker:Dummy'
+ - id: example-3
+ # wokeignore:rule=dummy
+ agent: 'ocf:pacemaker:Dummy'
+ - id: example-4
+ # wokeignore:rule=dummy
+ agent: 'ocf:pacemaker:Dummy'
+ - id: example-5
+ # wokeignore:rule=dummy
+ agent: 'ocf:pacemaker:Dummy'
+ - id: example-6
+ # wokeignore:rule=dummy
+ agent: 'ocf:pacemaker:Dummy'
+ # location constraints
+ ha_cluster_constraints_location:
+ # resource ID and node name
+ - resource:
+ id: example-1
+ node: node1
+ options:
+ - name: score
+ value: 20
+ # resource pattern and node name
+ - resource:
+ pattern: example-\d+
+ node: node1
+ options:
+ - name: score
+ value: 10
+ # resource ID and rule
+ - resource:
+ id: example-2
+ rule: '#uname eq node2 and date in_range 2022-01-01 to 2022-02-28'
+ # resource pattern and rule
+ - resource:
+ pattern: example-\d+
+ rule: node-type eq weekend and date-spec weekdays=6-7
+ # colocation constraints
+ ha_cluster_constraints_colocation:
+ # simple constraint
+ - resource_leader:
+ id: example-3
+ resource_follower:
+ id: example-4
+ options:
+ - name: score
+ value: -5
+ # set constraint
+ - resource_sets:
+ - resource_ids:
+ - example-1
+ - example-2
+ - resource_ids:
+ - example-5
+ - example-6
+ options:
+ - name: sequential
+ value: "false"
+ options:
+ - name: score
+ value: 20
+ # order constraints
+ ha_cluster_constraints_order:
+ # simple constraint
+ - resource_first:
+ id: example-1
+ resource_then:
+ id: example-6
+ options:
+ - name: symmetrical
+ value: "false"
+ # set constraint
+ - resource_sets:
+ - resource_ids:
+ - example-1
+ - example-2
+ options:
+ - name: require-all
+ value: "false"
+ - name: sequential
+ value: "false"
+ - resource_ids:
+ - example-3
+ - resource_ids:
+ - example-4
+ - example-5
+ options:
+ - name: sequential
+ value: "false"
+ # ticket constraints
+ ha_cluster_constraints_ticket:
+ # simple constraint
+ - resource:
+ id: example-1
+ ticket: ticket1
+ options:
+ - name: loss-policy
+ value: stop
+ # set constraint
+ - resource_sets:
+ - resource_ids:
+ - example-3
+ - example-4
+ - example-5
+ ticket: ticket2
+ options:
+ - name: loss-policy
+ value: fence
+
+ roles:
+ - linux-system-roles.ha_cluster
Note that you cannot run a quorum device on a cluster node.
-- hosts: nodeQ
- vars:
- ha_cluster_cluster_present: false
- ha_cluster_hacluster_password: password
- ha_cluster_qnetd:
- present: true
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
+class="sourceCode yaml">- hosts: nodeQ
vars:
- ha_cluster_cluster_name: my-new-cluster
+ ha_cluster_cluster_present: false
ha_cluster_hacluster_password: password
- ha_cluster_quorum:
- device:
- model: net
- model_options:
- - name: host
- value: nodeQ
- - name: algorithm
- value: lms
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
vars:
ha_cluster_cluster_name: my-new-cluster
ha_cluster_hacluster_password: password
- ha_cluster_node_options:
- - node_name: node1
- attributes:
- - attrs:
- - name: attribute1
- value: value1A
- - name: attribute2
- value: value2A
- - node_name: node2
- attributes:
- - attrs:
- - name: attribute1
- value: value1B
- - name: attribute2
- value: value2B
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
vars:
ha_cluster_cluster_name: my-new-cluster
ha_cluster_hacluster_password: password
- # To use an ACL role permission reference, the reference must exist in CIB.
- ha_cluster_resource_primitives:
- - id: not-for-operator
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- # ACLs must be enabled (using the enable-acl cluster property) in order to
- # be effective.
- ha_cluster_cluster_properties:
- - attrs:
- - name: enable-acl
- value: 'true'
- ha_cluster_acls:
- acl_roles:
- - id: operator
- description: HA cluster operator
- permissions:
- - kind: write
- xpath: //crm_config//nvpair[@name='maintenance-mode']
- - kind: deny
- reference: not-for-operator
- - id: administrator
- permissions:
- - kind: write
- xpath: /cib
- acl_users:
- - id: alice
- roles:
- - operator
- - administrator
- - id: bob
- roles:
- - administrator
- acl_groups:
- - id: admins
- roles:
- - administrator
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
vars:
ha_cluster_cluster_name: my-new-cluster
ha_cluster_hacluster_password: password
- # For utilization to have an effect, the `placement-strategy` property
- # must be set and its value must be different from the value `default`.
- ha_cluster_cluster_properties:
- - attrs:
- - name: placement-strategy
- value: utilization
- ha_cluster_node_options:
- - node_name: node1
- utilization:
- - attrs:
- - name: utilization1
- value: 1
- - name: utilization2
- value: 2
- - node_name: node2
- utilization:
- - attrs:
- - name: utilization1
- value: 3
- - name: utilization2
- value: 4
- ha_cluster_resource_primitives:
- - id: resource1
- # wokeignore:rule=dummy
- agent: 'ocf:pacemaker:Dummy'
- utilization:
- - attrs:
- - name: utilization1
- value: 2
- - name: utilization2
- value: 3
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
vars:
- ha_cluster_cluster_present: false
-
- roles:
- - linux-system-roles.ha_cluster
- hosts: node1 node2
+ vars:
+ ha_cluster_cluster_name: my-new-cluster
+ ha_cluster_hacluster_password: password
+ ha_cluster_alerts:
+ - id: alert1
+ path: /alert1/path
+ description: Alert1 description
+ instance_attrs:
+ - attrs:
+ - name: alert_attr1_name
+ value: alert_attr1_value
+ meta_attrs:
+ - attrs:
+ - name: alert_meta_attr1_name
+ value: alert_meta_attr1_value
+ recipients:
+ - value: recipient_value
+ id: recipient1
+ description: Recipient1 description
+ instance_attrs:
+ - attrs:
+ - name: recipient_attr1_name
+ value: recipient_attr1_value
+ meta_attrs:
+ - attrs:
+ - name: recipient_meta_attr1_name
+ value: recipient_meta_attr1_value
+
+ roles:
+ - linux-system-roles.ha_cluster
- hosts: node1 node2
+ vars:
+ ha_cluster_cluster_present: false
+
+ roles:
+ - linux-system-roles.ha_cluster
See README-ostree.md