From 1e9b67feccd2015dab0dce9fbaab1f346dcb44c9 Mon Sep 17 00:00:00 2001 From: Tobias Wolter Date: Tue, 29 Dec 2020 14:53:46 +0100 Subject: [PATCH 1/2] Support for pcs 0.10.0 and newer Debian systems * Removed the `promotable` attribute from `cs_primitive`. It's just a convenience function that was causing serious headaches in properly parsing the configuration, as promotability is *not* a property of the managed resource. Thus, it arguably should not be managed like it relates to the resource. * Rename `version_corosync` et al. to `ensure_corosync` etc. to reduce confusion and clear up meaning. * Add support for Debian 10, 11 * Add support for Ubuntu 18.04. 20.04 * Implicitly add support for `pcs` version 0.10.0+ commands; the CLI interface was changed here. * Fix SLES references in `metadata.json` --- REFERENCE.md | 13 -- data/common.yaml | 3 + data/os/Debian.yaml | 3 + data/os/Debian/9.yaml | 3 + data/os/RedHat.yaml | 4 + data/os/RedHat/6.yaml | 2 + data/os/RedHat/7.yaml | 2 + data/os/SLES.yaml | 3 + data/os/Ubuntu.yaml | 3 + data/os/Ubuntu/18.04.yaml | 2 + hiera.yaml | 18 ++ lib/puppet/provider/cs_location/pcs.rb | 2 +- lib/puppet/provider/cs_primitive/crm.rb | 50 ------ lib/puppet/provider/cs_primitive/pcs.rb | 74 +------- lib/puppet/type/cs_primitive.rb | 56 ------ .../voxpupuli/corosync/provider/pcs.rb | 13 ++ manifests/init.pp | 119 ++++++------- manifests/params.pp | 45 +---- manifests/qdevice.pp | 111 ++++++------ metadata.json | 20 ++- spec/acceptance/cs_colocation_spec.rb | 16 +- spec/acceptance/cs_commit_spec.rb | 38 +++-- spec/acceptance/cs_primitive_spec.rb | 144 ++++++++++------ spec/acceptance/cs_shadow_spec.rb | 38 +++-- spec/classes/corosync_qdevice_spec.rb | 120 +++++++------ spec/classes/corosync_spec.rb | 161 +++++++++++------- spec/spec_helper.rb | 28 +++ spec/spec_helper_acceptance.rb | 39 +++++ spec/spec_helper_corosync.rb | 41 +++++ .../puppet/provider/cs_primitive_crm_spec.rb | 8 - .../puppet/provider/cs_primitive_pcs_spec.rb | 36 ++-- spec/unit/puppet/type/cs_primitive_spec.rb | 24 +-- 32 files changed, 665 insertions(+), 574 deletions(-) create mode 100644 data/common.yaml create mode 100644 data/os/Debian.yaml create mode 100644 data/os/Debian/9.yaml create mode 100644 data/os/RedHat.yaml create mode 100644 data/os/RedHat/6.yaml create mode 100644 data/os/RedHat/7.yaml create mode 100644 data/os/SLES.yaml create mode 100644 data/os/Ubuntu.yaml create mode 100644 data/os/Ubuntu/18.04.yaml create mode 100644 hiera.yaml diff --git a/REFERENCE.md b/REFERENCE.md index 5386a035..1260fdb2 100644 --- a/REFERENCE.md +++ b/REFERENCE.md @@ -1387,19 +1387,6 @@ A hash of metadata for the master/slave primitive state. Default value: Hash.new -##### `promotable` - -Valid values: `true`, `false` - -Designates if the primitive is capable of being managed in a master/slave -state. This will create a new ms resource in your Corosync config and add -this primitive to it. Concequently Corosync will be helpful and update all -your colocation and order resources too but Puppet won't. Currenlty we unmunge -configuraiton entries that start with ms_ so that you don't have to account for -name change in all our manifests. - -Default value: false - #### Parameters The following parameters are available in the `cs_primitive` type. diff --git a/data/common.yaml b/data/common.yaml new file mode 100644 index 00000000..974b9d26 --- /dev/null +++ b/data/common.yaml @@ -0,0 +1,3 @@ +--- +corosync::package_fence_agents: false +corosync::qdevice::provider: "%{alias('corosync::provider')}" diff --git a/data/os/Debian.yaml b/data/os/Debian.yaml new file mode 100644 index 00000000..092666ef --- /dev/null +++ b/data/os/Debian.yaml @@ -0,0 +1,3 @@ +--- +corosync::provider: 'pcs' +corosync::pcs_version: '0.10.0' diff --git a/data/os/Debian/9.yaml b/data/os/Debian/9.yaml new file mode 100644 index 00000000..397da8ee --- /dev/null +++ b/data/os/Debian/9.yaml @@ -0,0 +1,3 @@ +--- +corosync::provider: 'crm' +corosync::pcs_version: NULL diff --git a/data/os/RedHat.yaml b/data/os/RedHat.yaml new file mode 100644 index 00000000..f80fce80 --- /dev/null +++ b/data/os/RedHat.yaml @@ -0,0 +1,4 @@ +--- +corosync::provider: 'pcs' +corosync::pcs_version: '0.10.0' +corosync::package_fence_agents: true diff --git a/data/os/RedHat/6.yaml b/data/os/RedHat/6.yaml new file mode 100644 index 00000000..baf9491f --- /dev/null +++ b/data/os/RedHat/6.yaml @@ -0,0 +1,2 @@ +--- +corosync::pcs_version: '0.9.0' diff --git a/data/os/RedHat/7.yaml b/data/os/RedHat/7.yaml new file mode 100644 index 00000000..baf9491f --- /dev/null +++ b/data/os/RedHat/7.yaml @@ -0,0 +1,2 @@ +--- +corosync::pcs_version: '0.9.0' diff --git a/data/os/SLES.yaml b/data/os/SLES.yaml new file mode 100644 index 00000000..397da8ee --- /dev/null +++ b/data/os/SLES.yaml @@ -0,0 +1,3 @@ +--- +corosync::provider: 'crm' +corosync::pcs_version: NULL diff --git a/data/os/Ubuntu.yaml b/data/os/Ubuntu.yaml new file mode 100644 index 00000000..a5a6c1cb --- /dev/null +++ b/data/os/Ubuntu.yaml @@ -0,0 +1,3 @@ +--- +corosync::pcs_version: '0.10.0' +corosync::provider: 'pcs' diff --git a/data/os/Ubuntu/18.04.yaml b/data/os/Ubuntu/18.04.yaml new file mode 100644 index 00000000..baf9491f --- /dev/null +++ b/data/os/Ubuntu/18.04.yaml @@ -0,0 +1,2 @@ +--- +corosync::pcs_version: '0.9.0' diff --git a/hiera.yaml b/hiera.yaml new file mode 100644 index 00000000..fe07438b --- /dev/null +++ b/hiera.yaml @@ -0,0 +1,18 @@ +version: 5 + +defaults: # Used for any hierarchy level that omits these keys. + datadir: data # This path is relative to hiera.yaml's directory. + data_hash: yaml_data # Use the built-in YAML backend. + +hierarchy: + - name: "osfamily/major release" + paths: + # Used to distinguish between Debian and Ubuntu + - "os/%{facts.os.name}/%{facts.os.release.major}.yaml" + - "os/%{facts.os.family}/%{facts.os.release.major}.yaml" + - name: "osfamily" + paths: + - "os/%{facts.os.name}.yaml" + - "os/%{facts.os.family}.yaml" + - name: 'common' + path: 'common.yaml' diff --git a/lib/puppet/provider/cs_location/pcs.rb b/lib/puppet/provider/cs_location/pcs.rb index 2867f76c..05db0380 100644 --- a/lib/puppet/provider/cs_location/pcs.rb +++ b/lib/puppet/provider/cs_location/pcs.rb @@ -87,7 +87,7 @@ def flush cmd = [command(:pcs), 'constraint', 'remove', @resource[:name]] self.class.run_command_in_cib(cmd, @resource[:cib], false) unless @property_hash[:node_name].nil? - cmd = [command(:pcs), 'constraint', 'location', 'add', @property_hash[:name], @property_hash[:primitive], @property_hash[:node_name], @property_hash[:score]] + cmd = [command(:pcs), 'constraint', 'location', 'add', '--force', @property_hash[:name], @property_hash[:primitive], @property_hash[:node_name], @property_hash[:score]] cmd << "resource-discovery=#{@property_hash[:resource_discovery]}" unless @property_hash[:resource_discovery].nil? self.class.run_command_in_cib(cmd, @resource[:cib]) end diff --git a/lib/puppet/provider/cs_primitive/crm.rb b/lib/puppet/provider/cs_primitive/crm.rb index 004a7fec..911cb32c 100644 --- a/lib/puppet/provider/cs_primitive/crm.rb +++ b/lib/puppet/provider/cs_primitive/crm.rb @@ -35,7 +35,6 @@ def self.element_to_hash(e) metadata: nvpairs_to_hash(e.elements['meta_attributes']), existing_metadata: nvpairs_to_hash(e.elements['meta_attributes']), ms_metadata: {}, - promotable: :false } operations = e.elements['operations'] @@ -56,15 +55,6 @@ def self.element_to_hash(e) hash[:operations] << operation end end - if e.parent.name == 'master' - hash[:promotable] = :true - unless e.parent.elements['meta_attributes'].nil? - e.parent.elements['meta_attributes'].each_element do |m| - hash[:ms_metadata][m.attributes['name']] = m.attributes['value'] - end - end - hash[:existing_ms_metadata] = hash[:ms_metadata].dup - end hash end @@ -93,13 +83,11 @@ def create primitive_class: @resource[:primitive_class], provided_by: @resource[:provided_by], primitive_type: @resource[:primitive_type], - promotable: @resource[:promotable] } @property_hash[:parameters] = @resource[:parameters] unless @resource[:parameters].nil? @property_hash[:operations] = @resource[:operations] unless @resource[:operations].nil? @property_hash[:utilization] = @resource[:utilization] unless @resource[:utilization].nil? @property_hash[:metadata] = @resource[:metadata] unless @resource[:metadata].nil? - @property_hash[:ms_metadata] = @resource[:ms_metadata] unless @resource[:ms_metadata].nil? @property_hash[:cib] = @resource[:cib] unless @resource[:cib].nil? end @@ -134,14 +122,6 @@ def metadata @property_hash[:metadata] end - def ms_metadata - @property_hash[:ms_metadata] - end - - def promotable - @property_hash[:promotable] - end - # Our setters for parameters and operations. Setters are used when the # resource already exists so we just update the current value in the # property_hash and doing this marks it to be flushed. @@ -161,23 +141,6 @@ def metadata=(should) @property_hash[:metadata] = should end - def ms_metadata=(should) - @property_hash[:ms_metadata] = should - end - - def promotable=(should) - case should - when :true - @property_hash[:promotable] = should - when :false - @property_hash[:promotable] = should - cmd = [command(:crm), 'resource', 'stop', "ms_#{@resource[:name]}"] - self.class.run_command_in_cib(cmd, @resource[:cib]) - cmd = [command(:crm), 'configure', 'delete', "ms_#{@resource[:name]}"] - self.class.run_command_in_cib(cmd, @resource[:cib]) - end - end - # Flush is triggered on anything that has been detected as being # modified in the property_hash. It generates a temporary file with # the updates that need to be made. The temporary file is then used @@ -202,9 +165,6 @@ def flush if @property_hash[:existing_metadata] && @property_hash[:existing_metadata][parameter_name] @property_hash[:metadata][parameter_name] = @property_hash[:existing_metadata]['target-role'] end - if @property_hash[:existing_ms_metadata] && @property_hash[:existing_ms_metadata][parameter_name] - @property_hash[:ms_metadata][parameter_name] = @property_hash[:existing_ms_metadata]['target-role'] - end end end unless @property_hash[:parameters].empty? @@ -233,16 +193,6 @@ def flush updated << "#{parameters} " unless parameters.nil? updated << "#{utilization} " unless utilization.nil? updated << "#{metadatas} " unless metadatas.nil? - if @property_hash[:promotable] == :true - updated << "\n" - updated << "ms ms_#{@property_hash[:name]} #{@property_hash[:name]} " - unless @property_hash[:ms_metadata].empty? - updated << 'meta ' - @property_hash[:ms_metadata].each_pair do |k, v| - updated << "#{k}=#{v} " - end - end - end debug("Loading update: #{updated}") Tempfile.open('puppet_crm_update') do |tmpfile| tmpfile.write(updated) diff --git a/lib/puppet/provider/cs_primitive/pcs.rb b/lib/puppet/provider/cs_primitive/pcs.rb index ce182876..cd40d552 100644 --- a/lib/puppet/provider/cs_primitive/pcs.rb +++ b/lib/puppet/provider/cs_primitive/pcs.rb @@ -36,15 +36,11 @@ def self.element_to_hash(e) operations: [], utilization: nvpairs_to_hash(e.elements['utilization']), metadata: nvpairs_to_hash(e.elements['meta_attributes']), - ms_metadata: {}, - promotable: :false, existing_resource: :true, existing_primitive_class: e.attributes['class'], existing_primitive_type: e.attributes['type'], - existing_promotable: :false, existing_provided_by: e.attributes['provider'], existing_metadata: nvpairs_to_hash(e.elements['meta_attributes']), - existing_ms_metadata: {}, existing_operations: [] } @@ -67,16 +63,6 @@ def self.element_to_hash(e) hash[:existing_operations] << operation end end - if e.parent.name == 'master' - hash[:promotable] = :true - hash[:existing_promotable] = :true - unless e.parent.elements['meta_attributes'].nil? - e.parent.elements['meta_attributes'].each_element do |m| - hash[:ms_metadata][m.attributes['name']] = m.attributes['value'] - end - hash[:existing_ms_metadata] = hash[:ms_metadata].dup - end - end hash end @@ -117,14 +103,12 @@ def create primitive_class: @resource[:primitive_class], provided_by: @resource[:provided_by], primitive_type: @resource[:primitive_type], - promotable: @resource[:promotable], existing_resource: :false } @property_hash[:parameters] = @resource[:parameters] unless @resource[:parameters].nil? @property_hash[:operations] = @resource[:operations] unless @resource[:operations].nil? @property_hash[:utilization] = @resource[:utilization] unless @resource[:utilization].nil? @property_hash[:metadata] = @resource[:metadata] unless @resource[:metadata].nil? - @property_hash[:ms_metadata] = @resource[:ms_metadata] unless @resource[:ms_metadata].nil? @property_hash[:existing_metadata] = {} end @@ -136,16 +120,6 @@ def destroy @property_hash.clear end - def promotable=(should) - case should - when :true - @property_hash[:promotable] = should - when :false - @property_hash[:promotable] = should - self.class.run_command_in_cib([command(:pcs), 'resource', 'delete', "ms_#{@resource[:name]}"], @resource[:cib]) - end - end - # Performs a subset of flush operations which are relevant only to stonith # resources. Non stonith resources will never call this method. # @@ -229,39 +203,18 @@ def _flush_resource(operations, parameters, utilization, metadatas) end if @property_hash[:existing_resource] == :false || force_reinstall == :true - cmd = if Facter.value(:osfamily) == 'RedHat' && Facter.value(:operatingsystemmajrelease).to_s == '7' - [command(:pcs), pcs_subcommand, 'create', '--force', '--no-default-ops', (@property_hash[:name]).to_s] - else - cmd = [command(:pcs), pcs_subcommand, 'create', '--force', (@property_hash[:name]).to_s] - end + cmd = [command(:pcs), pcs_subcommand, 'create', '--force', '--no-default-ops', (@property_hash[:name]).to_s] cmd << resource_type cmd += parameters unless parameters.nil? cmd += operations unless operations.nil? cmd += utilization unless utilization.nil? cmd += metadatas unless metadatas.nil? - self.class.run_command_in_cib(cmd, @resource[:cib]) - # if we are using a master/slave resource, prepend ms_ before its name - # and declare it as a master/slave resource - if @property_hash[:promotable] == :true - cmd = [command(:pcs), pcs_subcommand, 'master', "ms_#{@property_hash[:name]}", (@property_hash[:name]).to_s] - unless @property_hash[:ms_metadata].empty? - cmd << 'meta' - @property_hash[:ms_metadata].each_pair do |k, v| - cmd << "#{k}=#{v}" - end - end - self.class.run_command_in_cib(cmd, @resource[:cib]) - end - # try to remove the default monitor operation - default_op = { 'monitor' => { 'interval' => '60s' } } - unless @property_hash[:operations].include?(default_op) - cmd = [command(:pcs), pcs_subcommand, 'op', 'remove', (@property_hash[:name]).to_s, 'monitor', 'interval=60s'] - self.class.run_command_in_cib(cmd, @resource[:cib], false) - end + # default_op = { 'monitor' => { 'interval' => '60s' } } + # unless @property_hash[:operations].include?(default_op) + # cmd = [command(:pcs), pcs_subcommand, 'op', 'remove', (@property_hash[:name]).to_s, 'monitor', 'interval=60s'] + # end + self.class.run_command_in_cib(cmd, @resource[:cib], false) else - if @property_hash[:promotable] == :false && @property_hash[:existing_promotable] == :true - self.class.run_command_in_cib([command(:pcs), pcs_subcommand, 'delete', '--force', "ms_#{@property_hash[:name]}"], @resource[:cib]) - end @property_hash[:existing_operations].reject { |op| @property_hash[:operations].include?(op) }.each do |o| cmd = [command(:pcs), pcs_subcommand, 'op', 'remove', (@property_hash[:name]).to_s] cmd << o.keys.first.to_s @@ -276,19 +229,6 @@ def _flush_resource(operations, parameters, utilization, metadatas) cmd += utilization unless utilization.nil? cmd += metadatas unless metadatas.nil? self.class.run_command_in_cib(cmd, @resource[:cib]) - if @property_hash[:promotable] == :true - cmd = [command(:pcs), pcs_subcommand, 'update', "ms_#{@property_hash[:name]}"] - unless @property_hash[:ms_metadata].empty? && @property_hash[:existing_ms_metadata].empty? - cmd << 'meta' - @property_hash[:ms_metadata].each_pair do |k, v| - cmd << "#{k}=#{v}" - end - @property_hash[:existing_ms_metadata].keys.reject { |key| @property_hash[:ms_metadata].key?(key) }.each do |k| - cmd << "#{k}=" - end - end - self.class.run_command_in_cib(cmd, @resource[:cib]) - end end end @@ -332,8 +272,6 @@ def flush if @resource && @resource.class.name == :cs_primitive && @resource[:unmanaged_metadata] @resource[:unmanaged_metadata].each do |parameter_name| @property_hash[:metadata].delete(parameter_name) - @property_hash[:ms_metadata].delete(parameter_name) if @property_hash[:ms_metadata] - @property_hash[:existing_ms_metadata].delete(parameter_name) if @property_hash[:existing_ms_metadata] @property_hash[:existing_metadata].delete(parameter_name) if @property_hash[:existing_metadata] end end diff --git a/lib/puppet/type/cs_primitive.rb b/lib/puppet/type/cs_primitive.rb index 2c22f2a6..e13f41d9 100644 --- a/lib/puppet/type/cs_primitive.rb +++ b/lib/puppet/type/cs_primitive.rb @@ -243,62 +243,6 @@ def change_to_s(currentvalue, newvalue) # rubocop:enable Style/EmptyLiteral end - newproperty(:ms_metadata) do - desc 'A hash of metadata for the master/slave primitive state.' - - munge do |value_hash| - # Ruby 1.8.7 does not support each_with_object - # rubocop:disable Style/EachWithObject - value_hash.reduce({}) do |memo, (key, value)| - # rubocop:enable Style/EachWithObject - memo[key] = String(value) - memo - end - end - - def insync?(is) - super(is.reject { |k| @resource[:unmanaged_metadata].include?(k) }) - end - - # rubocop:disable Style/PredicateName - def is_to_s(is) - # rubocop:enable Style/PredicateName - super(is.reject { |k| @resource[:unmanaged_metadata].include?(k) }) - end - - def should_to_s(should) - super(should.reject { |k| @resource[:unmanaged_metadata].include?(k) }) - end - - def change_to_s(currentvalue, newvalue) - if @resource[:unmanaged_metadata].count.zero? - super - else - super + " (unmanaged parameters: #{@resource[:unmanaged_metadata].join(', ')})" - end - end - - validate do |value| - raise Puppet::Error, 'Puppet::Type::Cs_Primitive: ms_metadata property must be a hash' unless value.is_a? Hash - end - # rubocop:disable Style/EmptyLiteral - defaultto Hash.new - # rubocop:enable Style/EmptyLiteral - end - - newproperty(:promotable) do - desc "Designates if the primitive is capable of being managed in a master/slave - state. This will create a new ms resource in your Corosync config and add - this primitive to it. Concequently Corosync will be helpful and update all - your colocation and order resources too but Puppet won't. Currenlty we unmunge - configuraiton entries that start with ms_ so that you don't have to account for - name change in all our manifests." - - newvalues(:true, :false) - - defaultto :false - end - autorequire(:cs_shadow) do autos = [] autos << @parameters[:cib].value if @parameters[:cib] diff --git a/lib/puppet_x/voxpupuli/corosync/provider/pcs.rb b/lib/puppet_x/voxpupuli/corosync/provider/pcs.rb index d33235ff..3cb4e97b 100644 --- a/lib/puppet_x/voxpupuli/corosync/provider/pcs.rb +++ b/lib/puppet_x/voxpupuli/corosync/provider/pcs.rb @@ -99,4 +99,17 @@ def exists? debug(@property_hash.inspect) !(@property_hash[:ensure] == :absent || @property_hash.empty?) end + + def self.version + cmd = [command(:pcs), '--version'] + Puppet::Util::Execution.execute(cmd, { failonfail: true }) + end + + def self.syntax_010(old, new) + if Puppet::Util::Package.versioncmp(version, '0.10.0') >= 0 + new + else + old + end + end end diff --git a/manifests/init.pp b/manifests/init.pp index 42b9642e..1b4fe15f 100644 --- a/manifests/init.pp +++ b/manifests/init.pp @@ -46,7 +46,7 @@ # # @param unicast_addresses # An array of IP addresses that make up the cluster's members. These are -# used if you are not able to use multicast on your network and instead opt +# used if you are not able to use multicast on your network and instead opt # for the udpu transport. You need a relatively recent version of Corosync to # make this possible. # You can also have an array of arrays to have multiple rings. In that case, @@ -104,19 +104,9 @@ # @param package_corosync # Define if package corosync should be managed. # -# @param package_crmsh -# Define if package crmsh should be managed. -# Default (Debian): true -# Default: false -# # @param package_pacemaker # Define if package pacemaker should be managed. # -# @param package_pcs -# Define if package pcs should be managed. -# Default (Debian): false -# Default: true -# # @param package_fence_agents # Define if package fence-agents should be managed. # Default (Red Hat based): true @@ -142,23 +132,23 @@ # Additional install-options for the pcs package resource. # Default: undef # -# @param version_corosync +# @param ensure_corosync # Define what version of the corosync package should be installed. # Default: 'present' # -# @param version_crmsh +# @param ensure_crmsh # Define what version of the crmsh package should be installed. # Default: 'present' # -# @param version_pacemaker +# @param ensure_pacemaker # Define what version of the pacemaker package should be installed. # Default: 'present' # -# @param version_pcs +# @param ensure_pcs # Define what version of the pcs package should be installed. # Default: 'present' # -# @param version_fence_agents +# @param ensure_fence_agents # Define what version of the fence-agents-all package should be installed. # Default: 'present' # @@ -323,6 +313,9 @@ # Watchdog device to use, for example '/dev/watchdog' or 'off'. # Its presence (or lack thereof) shifted with corosync versions. # +# @param provider +# What command line utility provides corosync configuration capabilities. +# # @example Simple configuration without secauth # # class { 'corosync': @@ -364,20 +357,18 @@ Optional[Integer[0,255]] $ttl = undef, Optional[Enum['ykd', 'none']] $vsftype = undef, Boolean $package_corosync = $corosync::params::package_corosync, - Boolean $package_crmsh = $corosync::params::package_crmsh, Boolean $package_pacemaker = $corosync::params::package_pacemaker, - Boolean $package_pcs = $corosync::params::package_pcs, - Boolean $package_fence_agents = $corosync::params::package_fence_agents, + Boolean $package_fence_agents = false, Optional[Array[String[1]]] $packageopts_corosync = $corosync::params::package_install_options, Optional[Array[String[1]]] $packageopts_pacemaker = $corosync::params::package_install_options, Optional[Array[String[1]]] $packageopts_crmsh = $corosync::params::package_install_options, Optional[Array[String[1]]] $packageopts_pcs = $corosync::params::package_install_options, Optional[Array[String[1]]] $packageopts_fence_agents = $corosync::params::package_install_options, - String[1] $version_corosync = $corosync::params::version_corosync, - String[1] $version_crmsh = $corosync::params::version_crmsh, - String[1] $version_pacemaker = $corosync::params::version_pacemaker, - String[1] $version_pcs = $corosync::params::version_pcs, - String[1] $version_fence_agents = $corosync::params::version_fence_agents, + String[1] $ensure_corosync = $corosync::params::ensure_corosync, + String[1] $ensure_crmsh = $corosync::params::ensure_crmsh, + String[1] $ensure_pacemaker = $corosync::params::ensure_pacemaker, + String[1] $ensure_pcs = $corosync::params::ensure_pcs, + String[1] $ensure_fence_agents = $corosync::params::ensure_fence_agents, Boolean $set_votequorum = $corosync::params::set_votequorum, Optional[Integer] $votequorum_expected_votes = undef, Array $quorum_members = ['localhost'], @@ -410,6 +401,8 @@ String[1] $config_validate_cmd = '/usr/bin/env COROSYNC_MAIN_CONFIG_FILE=% /usr/sbin/corosync -t', Boolean $test_corosync_config = $corosync::params::test_corosync_config, Optional[Variant[Stdlib::Absolutepath, Enum['off']]] $watchdog_device = undef, + Enum['pcs', 'crm'] $provider = 'pcs', + String $pcs_version = '', ) inherits corosync::params { if $set_votequorum and (empty($quorum_members) and empty($multicast_address) and !$cluster_name) { fail('set_votequorum is true, so you must set either quorum_members, or one of multicast_address or cluster_name.') @@ -425,7 +418,7 @@ if $package_corosync { package { 'corosync': - ensure => $version_corosync, + ensure => $ensure_corosync, install_options => $packageopts_corosync, } $corosync_package_require = Package['corosync'] @@ -441,42 +434,44 @@ if $package_pacemaker { package { 'pacemaker': - ensure => $version_pacemaker, + ensure => $ensure_pacemaker, install_options => $packageopts_pacemaker, } } - if $package_crmsh { - package { 'crmsh': - ensure => $version_crmsh, - install_options => $packageopts_crmsh, - } - } - - if $package_pcs { - package { 'pcs': - ensure => $version_pcs, - install_options => $packageopts_pcs, + case $provider { + 'crm': { + package { 'crmsh': + ensure => $ensure_crmsh, + install_options => $packageopts_crmsh, + } } - - # Set the password for the hacluster user if it was provided - if $sensitive_hacluster_hash { - group { 'haclient': - ensure => 'present', - require => Package['pcs'], + 'pcs': { + package { 'pcs': + ensure => $ensure_pcs, + install_options => $packageopts_pcs, } + if $sensitive_hacluster_hash { + group { 'haclient': + ensure => 'present', + require => Package['pcs'], + } - user { 'hacluster': - ensure => 'present', - gid => 'haclient', - password => $sensitive_hacluster_hash.unwrap, + user { 'hacluster': + ensure => 'present', + gid => 'haclient', + password => $sensitive_hacluster_hash.unwrap, + } } } + default: { + fail("Unknown corosync provider ${provider}") + } } if $package_fence_agents { package { 'fence-agents-all': - ensure => $version_fence_agents, + ensure => $ensure_fence_agents, install_options => $packageopts_fence_agents, } } @@ -596,12 +591,18 @@ # addresses $node_string = join($quorum_members, ' ') + # Define the pcs host command, this changed with 0.10.0 as per #513 + $pcs_auth_command = versioncmp($pcs_version, '0.10.0') ? { + -1 => 'pcs cluster auth', + default => 'pcs host auth', + } + # Attempt to authorize all members. The command will return successfully # if they were already authenticated so it's safe to run every time this # is applied. # TODO - make it run only once - exec { 'pcs_cluster_auth': - command => "pcs cluster auth ${node_string} ${auth_credential_string}", + exec { 'authorize_members': + command => "${pcs_auth_command} ${node_string} ${auth_credential_string}", path => $exec_path, require => [ Service['pcsd'], @@ -624,14 +625,18 @@ } if $manage_quorum_device and $manage_pcsd_auth and $is_auth_node and $set_votequorum { + $pcs_cluster_setup_namearg = versioncmp($pcs_version, '0.10.0') ? { + -1 => '--name', + default => '', + } # If the cluster hasn't been configured yet, temporarily configure it so - # the pcs_cluster_auth_qdevice command doesn't fail. This should generate + # the Authorize qdevice command doesn't fail. This should generate # a temporary corosync.conf which will then be overwritten exec { 'pcs_cluster_temporary': - command => "pcs cluster setup --force --name ${cluster_name} ${node_string}", + command => "pcs cluster setup --force ${pcs_cluster_setup_namearg} ${cluster_name} ${node_string}", path => $exec_path, onlyif => 'test ! -f /etc/corosync/corosync.conf', - require => Exec['pcs_cluster_auth'], + require => Exec['authorize_members'], } # We need to do this so the temporary cluster doesn't delete our authkey if $enable_secauth { @@ -644,13 +649,13 @@ $qdevice_token_check = "${token_prefix} ${quorum_device_host} ${token_suffix}" $quorum_device_password = $sensitive_quorum_device_password.unwrap - exec { 'pcs_cluster_auth_qdevice': - command => "pcs cluster auth ${quorum_device_host} -u hacluster -p ${quorum_device_password}", + exec { 'authorize_qdevice': + command => "${pcs_auth_command} ${quorum_device_host} -u hacluster -p ${quorum_device_password}", path => $exec_path, onlyif => $qdevice_token_check, require => [ Package[$package_quorum_device], - Exec['pcs_cluster_auth'], + Exec['authorize_members'], Exec['pcs_cluster_temporary'], ], } @@ -666,7 +671,7 @@ onlyif => [ 'test 0 -ne $(pcs quorum config | grep "host:" >/dev/null 2>&1; echo $?)', ], - require => Exec['pcs_cluster_auth_qdevice'], + require => Exec['authorize_qdevice'], before => File['/etc/corosync/corosync.conf'], notify => Service['corosync-qdevice'], } diff --git a/manifests/params.pp b/manifests/params.pp index 6b9e3838..7f81489b 100644 --- a/manifests/params.pp +++ b/manifests/params.pp @@ -15,11 +15,11 @@ $log_function_name = false $package_corosync = true $package_pacemaker = true - $version_corosync = 'present' - $version_crmsh = 'present' - $version_pacemaker = 'present' - $version_pcs = 'present' - $version_fence_agents = 'present' + $ensure_corosync = 'present' + $ensure_crmsh = 'present' + $ensure_pacemaker = 'present' + $ensure_pcs = 'present' + $ensure_fence_agents = 'present' $enable_corosync_service = true $manage_corosync_service = true $enable_pacemaker_service = true @@ -28,38 +28,5 @@ $set_votequorum = true $manage_pacemaker_service = true $test_corosync_config = true - - case $facts['os']['family'] { - 'RedHat': { - $package_crmsh = false - $package_pcs = true - $package_fence_agents = true - $package_install_options = undef - } - - 'Debian': { - $package_crmsh = true - $package_pcs = false - $package_fence_agents = false - $package_install_options = undef - } - - 'Suse': { - case $facts['os']['name'] { - 'SLES': { - $package_crmsh = true - $package_pcs = false - $package_fence_agents = false - $package_install_options = undef - } - default: { - fail("Unsupported flavour of ${facts['os']['family']}: ${facts['os']['name']}") - } - } - } - - default: { - fail("Unsupported operating system: ${facts['os']['name']}") - } - } + $package_install_options = undef } diff --git a/manifests/qdevice.pp b/manifests/qdevice.pp index dd0d127c..a09b1c94 100644 --- a/manifests/qdevice.pp +++ b/manifests/qdevice.pp @@ -16,6 +16,9 @@ # @param package_corosync_qnetd # Name of the corosync qnetd package for this system. # +# @param provider +# What command line utility provides corosync configuration capabilities. +# # @summary Performs basic initial configuration of the qdevice daemon on a node. # # @example Quorum node with default password & configuring the firewall @@ -51,66 +54,74 @@ # # @see https://www.systutorials.com/docs/linux/man/8-corosync-qnetd/ class corosync::qdevice ( + String $provider, String[1] $package_pcs = 'pcs', String[1] $package_corosync_qnetd = 'corosync-qnetd', Optional[Sensitive[String]] $sensitive_hacluster_hash = undef, ) { - $cluster_group = 'haclient' - $cluster_user = 'hacluster' + case $provider { + 'pcs': { + $cluster_group = 'haclient' + $cluster_user = 'hacluster' - # Install the required packages - [$package_pcs, $package_corosync_qnetd].each |$package| { - package { $package: - ensure => present, - } - } + # Install the required packages + [$package_pcs, $package_corosync_qnetd].each |$package| { + package { $package: + ensure => present, + } + } - if $sensitive_hacluster_hash { - # Cluster control group - group { $cluster_group: - ensure => 'present', - require => Package[$package_pcs, $package_corosync_qnetd], - } + if $sensitive_hacluster_hash { + # Cluster control group + group { $cluster_group: + ensure => 'present', + require => Package[$package_pcs, $package_corosync_qnetd], + } - # Cluster admin credentials - user { $cluster_user: - ensure => 'present', - password => $sensitive_hacluster_hash.unwrap, - gid => $cluster_group, - } - } + # Cluster admin credentials + user { $cluster_user: + ensure => 'present', + password => $sensitive_hacluster_hash.unwrap, + gid => $cluster_group, + } + } - # Enable the PCS service - service { 'pcsd': - ensure => 'running', - enable => true, - require => [ - Package[$package_pcs], - Package[$package_corosync_qnetd], - ], - } + # Enable the PCS service + service { 'pcsd': + ensure => 'running', + enable => true, + require => [ + Package[$package_pcs], + Package[$package_corosync_qnetd], + ], + } - $exec_path = '/sbin:/bin:/usr/sbin:/usr/bin' + $exec_path = '/sbin:/bin:/usr/sbin:/usr/bin' - # Configure the quorum device - exec { 'pcs qdevice setup model net --enable --start': - path => $exec_path, - onlyif => [ - 'test ! -f /etc/corosync/qnetd/nssdb/qnetd-cacert.crt', - ], - require => Service['pcsd'], - } + # Configure the quorum device + exec { 'pcs qdevice setup model net --enable --start': + path => $exec_path, + onlyif => [ + 'test ! -f /etc/corosync/qnetd/nssdb/qnetd-cacert.crt', + ], + require => Service['pcsd'], + } - # Ensure the net device is running - exec { 'pcs qdevice start net': - path => $exec_path, - onlyif => [ - 'test -f /etc/corosync/qnetd/nssdb/qnetd-cacert.crt', - 'test 0 -ne $(pcs qdevice status net >/dev/null 2>&1; echo $?)', - ], - require => [ - Package['pcs'], - Package['corosync-qnetd'], - ], + # Ensure the net device is running + exec { 'pcs qdevice start net': + path => $exec_path, + onlyif => [ + 'test -f /etc/corosync/qnetd/nssdb/qnetd-cacert.crt', + 'test 0 -ne $(pcs qdevice status net >/dev/null 2>&1; echo $?)', + ], + require => [ + Package['pcs'], + Package['corosync-qnetd'], + ], + } + } + default: { + fail("qdevice not supported by this module with provider ${provider}") + } } } diff --git a/metadata.json b/metadata.json index 293cbea7..3061a670 100644 --- a/metadata.json +++ b/metadata.json @@ -23,20 +23,32 @@ { "operatingsystem": "Debian", "operatingsystemrelease": [ - "9" + "9", + "10", + "11" ] }, { "operatingsystem": "Ubuntu", "operatingsystemrelease": [ - "16.04" + "18.04", + "20.04" ] }, { "operatingsystem": "SLES", - "operatingsystemmajrelease": [ + "operatingsystemrelease": [ "12", - "15" + "12.1", + "12.2", + "12.3", + "12.4", + "12.5", + "15", + "15.1", + "15.2", + "15.3", + "15.4" ] } ], diff --git a/spec/acceptance/cs_colocation_spec.rb b/spec/acceptance/cs_colocation_spec.rb index 0af246e9..e6b428ea 100644 --- a/spec/acceptance/cs_colocation_spec.rb +++ b/spec/acceptance/cs_colocation_spec.rb @@ -65,8 +65,12 @@ class { 'corosync': end it 'creates the service resource' do - command = if fact('osfamily') == 'RedHat' - 'pcs resource show' + command = if fact('default_provider') == 'pcs' + if Gem::Version.new(fact('pcs_version')) < Gem::Version.new('0.10.0') + 'pcs resource show' + else + 'pcs resource status' + end else 'crm_resource --list' end @@ -76,8 +80,12 @@ class { 'corosync': end it 'creates the vip resource' do - command = if fact('osfamily') == 'RedHat' - 'pcs resource show' + command = if fact('default_provider') == 'pcs' + if Gem::Version.new(fact('pcs_version')) < Gem::Version.new('0.10.0') + 'pcs resource show' + else + 'pcs resource status' + end else 'crm_resource --list' end diff --git a/spec/acceptance/cs_commit_spec.rb b/spec/acceptance/cs_commit_spec.rb index c2fea3af..827f6cff 100644 --- a/spec/acceptance/cs_commit_spec.rb +++ b/spec/acceptance/cs_commit_spec.rb @@ -81,8 +81,12 @@ class { 'corosync': end it 'creates the service resource in the cib' do - command = if fact('osfamily') == 'RedHat' - 'pcs resource show' + command = if fact('default_provider') == 'pcs' + if Gem::Version.new(fact('pcs_version')) < Gem::Version.new('0.10.0') + 'pcs resource show' + else + 'pcs resource status' + end else 'crm_resource --list' end @@ -92,8 +96,12 @@ class { 'corosync': end it 'creates the vip resource in the cib' do - command = if fact('osfamily') == 'RedHat' - 'pcs resource show' + command = if fact('default_provider') == 'pcs' + if Gem::Version.new(fact('pcs_version')) < Gem::Version.new('0.10.0') + 'pcs resource show' + else + 'pcs resource status' + end else 'crm_resource --list' end @@ -115,7 +123,7 @@ class { 'corosync': end it 'creates the cib and a shadow cib' do - if fact('osfamily') == 'RedHat' + if fact('default_provider') == 'pcs' shell('pcs cluster cib') shell("pcs cluster cib -f #{pcs_shadow_cib}") else @@ -125,8 +133,12 @@ class { 'corosync': end it 'creates the vip resource in the shadow cib' do - command = if fact('osfamily') == 'RedHat' - "pcs resource show -f #{pcs_shadow_cib}" + command = if fact('default_provider') == 'pcs' + if Gem::Version.new(fact('pcs_version')) < Gem::Version.new('0.10.0') + "pcs resource show -f #{pcs_shadow_cib}" + else + "pcs resource status -f #{pcs_shadow_cib}" + end else 'CIB_shadow=puppet crm_resource --list' end @@ -136,8 +148,12 @@ class { 'corosync': end it 'creates the service resource in the shadow cib' do - command = if fact('osfamily') == 'RedHat' - "pcs resource show -f #{pcs_shadow_cib}" + command = if fact('default_provider') == 'pcs' + if Gem::Version.new(fact('pcs_version')) < Gem::Version.new('0.10.0') + "pcs resource show -f #{pcs_shadow_cib}" + else + "pcs resource status -f #{pcs_shadow_cib}" + end else 'CIB_shadow=puppet crm_resource --list' end @@ -147,7 +163,7 @@ class { 'corosync': end it 'creates the colocation in the shadow cib and apache2_vip is the "with" resource' do - command = if fact('osfamily') == 'RedHat' + command = if fact('default_provider') == 'pcs' "pcs cluster cib -f #{pcs_shadow_cib} | grep apache2_vip_with_service" else 'CIB_shadow=puppet cibadmin --query | grep apache2_vip_with_service' @@ -158,7 +174,7 @@ class { 'corosync': end it 'creates the colocation in the shadow cib and apache2_service is the main resource' do - command = if fact('osfamily') == 'RedHat' + command = if fact('default_provider') == 'pcs' "pcs cluster cib -f #{pcs_shadow_cib} | grep apache2_vip_with_service" else 'CIB_shadow=puppet cibadmin --query | grep apache2_vip_with_service' diff --git a/spec/acceptance/cs_primitive_spec.rb b/spec/acceptance/cs_primitive_spec.rb index a8369ac3..6d735de8 100644 --- a/spec/acceptance/cs_primitive_spec.rb +++ b/spec/acceptance/cs_primitive_spec.rb @@ -1,4 +1,5 @@ require 'spec_helper_acceptance' +require 'pry' describe 'corosync' do cert = '-----BEGIN CERTIFICATE----- @@ -37,19 +38,14 @@ class { 'corosync': cs_primitive { 'pgsql': primitive_class => 'ocf', primitive_type => 'pgsql', - promotable => true, provided_by => 'heartbeat', parameters => { 'pgctl' => '/bin/pg_ctl', 'psql' => '/bin/psql', 'pgdata' => '/var/lib/pgsql/data/', 'rep_mode' => 'sync', 'restore_command' => 'cp /var/lib/pgsql/pg_archive/%f %p', 'primary_conninfo_opt' => 'keepalives_idle=60 keepalives_interval=5 keepalives_count=5', 'restart_on_promote' => 'true' }, operations => [ { 'start' => { 'interval' => '0s', 'timeout' => '60s', 'on-fail' => 'restart' } }, { 'monitor' => { 'interval' => '4s', 'timeout' => '60s', 'on-fail' => 'restart' } }, - { 'monitor' => { 'interval' => '3s', 'timeout' => '60s', 'on-fail' => 'restart', 'role' => 'Master' } }, - { 'promote' => { 'interval' => '0s', 'timeout' => '60s', 'on-fail' => 'restart' } }, - { 'demote' => { 'interval' => '1s', 'timeout' => '60s', 'on-fail' => 'stop' } }, { 'stop' => { 'interval' => '0s', 'timeout' => '60s', 'on-fail' => 'block' } }, { 'notify' => { 'interval' => '0s', 'timeout' => '60s' } }, ], - ms_metadata => { 'master-max' => '1', 'master-node-max' => '1', 'clone-max' => '2', 'clone-node-max' => '1', 'notify' => 'true' }, } EOS @@ -62,8 +58,12 @@ class { 'corosync': end it 'creates the resources' do - command = if fact('osfamily') == 'RedHat' - 'pcs resource show' + command = if fact('default_provider') == 'pcs' + if Gem::Version.new(fact('pcs_version')) < Gem::Version.new('0.10.0') + 'pcs resource show' + else + 'pcs resource status' + end else 'crm_resource --list' end @@ -72,31 +72,6 @@ class { 'corosync': end end - it 'updates master/slave primitive parameters' do - pp_master_update = <<-EOS - cs_primitive { 'pgsql': - primitive_class => 'ocf', - primitive_type => 'pgsql', - promotable => true, - provided_by => 'heartbeat', - parameters => { 'pgctl' => '/bin/pg_ctl', 'psql' => '/bin/psql', 'pgdata' => '/var/lib/pgsql/data/', 'rep_mode' => 'sync', 'restore_command' => 'cp /var/lib/pgsql/pg_archive/%f %p', 'primary_conninfo_opt' => 'keepalives_idle=60 keepalives_interval=1 keepalives_count=5', 'restart_on_promote' => 'true' }, - operations => [ - { 'start' => { 'interval' => '0s', 'timeout' => '60s', 'on-fail' => 'restart' } }, - { 'monitor' => { 'interval' => '4s', 'timeout' => '60s', 'on-fail' => 'restart' } }, - { 'monitor' => { 'interval' => '3s', 'timeout' => '60s', 'on-fail' => 'restart', 'role' => 'Master' } }, - { 'promote' => { 'interval' => '0s', 'timeout' => '60s', 'on-fail' => 'restart' } }, - { 'demote' => { 'interval' => '1s', 'timeout' => '30s', 'on-fail' => 'stop' } }, - { 'stop' => { 'interval' => '0s', 'timeout' => '60s', 'on-fail' => 'block' } }, - { 'notify' => { 'interval' => '0s', 'timeout' => '60s' } }, - ], - ms_metadata => { 'master-max' => '1', 'master-node-max' => '1', 'clone-max' => '2', 'clone-node-max' => '1', 'notify' => 'true' }, - } - EOS - - apply_manifest(pp_master_update, expect_changes: true, debug: false, trace: true) - apply_manifest(pp_master_update, catch_changes: true, debug: false, trace: true) - end - it 'creates a haproxy_vip resources' do pp = <<-EOS cs_primitive { 'haproxy_vip': @@ -133,7 +108,11 @@ class { 'corosync': apply_manifest(pp, expect_changes: true, debug: false, trace: true) apply_manifest(pp, catch_changes: true, debug: false, trace: true) - shell('crm_resource -r test_stop -m -p target-role -v Stopped') + if fact('default_provider') == 'crm' + shell('crm_resource -r test_stop -m -p target-role -v Stopped') + else + shell('pcs resource update test_stop meta target-role=Stopped') + end apply_manifest(pp, expect_changes: true, debug: false, trace: true) apply_manifest(pp, catch_changes: true, debug: false, trace: true) @@ -153,8 +132,11 @@ class { 'corosync': apply_manifest(pp, expect_changes: true, debug: false, trace: true) apply_manifest(pp, catch_changes: true, debug: false, trace: true) - shell('crm_resource -r test_stop2 -m -p target-role -v Stopped') - + if fact('default_provider') == 'crm' + shell('crm_resource -r test_stop2 -m -p target-role -v Stopped') + else + shell('pcs resource update test_stop2 meta target-role=Stopped') + end apply_manifest(pp, catch_changes: true, debug: false, trace: true) pp = <<-EOS @@ -188,14 +170,36 @@ class { 'corosync': # rubocop:disable RSpec/RepeatedExample it 'does set is-managed in test_md' do - shell('crm_resource -r test_md -q') do |r| - expect(r.stdout).to match(%r{is-managed.*false}) + if fact('default_provider') == 'crm' + shell('crm_resource -r test_md -q') do |r| + expect(r.stdout).to match(%r{is-managed.*false}) + end + else + subcommand = if Gem::Version.new(fact('pcs_version')) < Gem::Version.new('0.10.0') + 'show' + else + 'config' + end + shell("pcs resource #{subcommand} test_md") do |r| + expect(r.stdout).to match(%r{is-managed.*false}) + end end end it 'does set target-role in test_md' do - shell('crm_resource -r test_md -q') do |r| - expect(r.stdout).to match(%r{target-role.*stopped}) + if fact('default_provider') == 'crm' + shell('crm_resource -r test_md -q') do |r| + expect(r.stdout).to match(%r{target-role.*stopped}) + end + else + subcommand = if Gem::Version.new(fact('pcs_version')) < Gem::Version.new('0.10.0') + 'show' + else + 'config' + end + shell("pcs resource #{subcommand} test_md") do |r| + expect(r.stdout).to match(%r{target-role.*stopped}) + end end end @@ -215,14 +219,36 @@ class { 'corosync': end it 'does not delete or change is-managed if it is in unmanaged_metadata' do - shell('crm_resource -r test_md -q') do |r| - expect(r.stdout).to match(%r{is-managed.*false}) + if fact('default_provider') == 'crm' + shell('crm_resource -r test_md -q') do |r| + expect(r.stdout).to match(%r{is-managed.*false}) + end + else + subcommand = if Gem::Version.new(fact('pcs_version')) < Gem::Version.new('0.10.0') + 'show' + else + 'config' + end + shell("pcs resource #{subcommand} test_md") do |r| + expect(r.stdout).to match(%r{is-managed.*false}) + end end end it 'does not delete or change target-role if it is in unmanaged_metadata' do - shell('crm_resource -r test_md -q') do |r| - expect(r.stdout).to match(%r{target-role.*stopped}) + if fact('default_provider') == 'crm' + shell('crm_resource -r test_md -q') do |r| + expect(r.stdout).to match(%r{target-role.*stopped}) + end + else + subcommand = if Gem::Version.new(fact('pcs_version')) < Gem::Version.new('0.10.0') + 'show' + else + 'config' + end + shell("pcs resource #{subcommand} test_md") do |r| + expect(r.stdout).to match(%r{target-role.*stopped}) + end end end @@ -243,17 +269,39 @@ class { 'corosync': end it 'does delete is-managed because it is no longer in unmanaged_metadata' do - shell('crm_resource -r test_md -q') do |r| - expect(r.stdout).not_to match(%r{is-managed.*false}) + if fact('default_provider') == 'crm' + shell('crm_resource -r test_md -q') do |r| + expect(r.stdout).not_to match(%r{is-managed.*false}) + end + else + subcommand = if Gem::Version.new(fact('pcs_version')) < Gem::Version.new('0.10.0') + 'show' + else + 'config' + end + shell("pcs resource #{subcommand} test_md") do |r| + expect(r.stdout).not_to match(%r{is-managed.*false}) + end end end it 'does not delete target-role because it is still in unmanaged_metadata' do - shell('crm_resource -r test_md -q') do |r| - expect(r.stdout).to match(%r{target-role.*stopped}) + if fact('default_provider') == 'crm' + shell('crm_resource -r test_md -q') do |r| + expect(r.stdout).to match(%r{target-role.*stopped}) + end + else + subcommand = if Gem::Version.new(fact('pcs_version')) < Gem::Version.new('0.10.0') + 'show' + else + 'config' + end + shell("pcs resource #{subcommand} test_md") do |r| + expect(r.stdout).to match(%r{target-role.*stopped}) + end end + # rubocop:enable RSpec/RepeatedExample end - # rubocop:enable RSpec/RepeatedExample context 'on RedHat derivitives' do it 'applies stonith resources without error' do diff --git a/spec/acceptance/cs_shadow_spec.rb b/spec/acceptance/cs_shadow_spec.rb index fd675198..5c7312f5 100644 --- a/spec/acceptance/cs_shadow_spec.rb +++ b/spec/acceptance/cs_shadow_spec.rb @@ -77,8 +77,12 @@ class { 'corosync': end it 'does not create the service resource in the cib' do - command = if fact('osfamily') == 'RedHat' - 'pcs resource show' + command = if fact('default_provider') == 'pcs' + if Gem::Version.new(fact('pcs_version')) < Gem::Version.new('0.10.0') + 'pcs resource show' + else + 'pcs resource status' + end else 'crm_resource --list' end @@ -88,8 +92,12 @@ class { 'corosync': end it 'does not create the vip resource in the cib' do - command = if fact('osfamily') == 'RedHat' - 'pcs resource show' + command = if fact('default_provider') == 'pcs' + if Gem::Version.new(fact('pcs_version')) < Gem::Version.new('0.10.0') + 'pcs resource show' + else + 'pcs resource status' + end else 'crm_resource --list' end @@ -111,8 +119,12 @@ class { 'corosync': end it 'creates the service resource in the shadow cib' do - command = if fact('osfamily') == 'RedHat' - "pcs resource show -f #{pcs_shadow_cib}" + command = if fact('default_provider') == 'pcs' + if Gem::Version.new(fact('pcs_version')) < Gem::Version.new('0.10.0') + "pcs resource show -f #{pcs_shadow_cib}" + else + "pcs resource status -f #{pcs_shadow_cib}" + end else 'CIB_shadow=puppet crm_resource --list' end @@ -122,8 +134,12 @@ class { 'corosync': end it 'creates the vip resource in the shadow cib' do - command = if fact('osfamily') == 'RedHat' - "pcs resource show -f #{pcs_shadow_cib}" + command = if fact('default_provider') == 'pcs' + if Gem::Version.new(fact('pcs_version')) < Gem::Version.new('0.10.0') + "pcs resource show -f #{pcs_shadow_cib}" + else + "pcs resource status -f #{pcs_shadow_cib}" + end else 'CIB_shadow=puppet crm_resource --list' end @@ -133,7 +149,7 @@ class { 'corosync': end it 'creates the colocation identified by with-rsc="apache_vip" in the shadow cib' do - command = if fact('osfamily') == 'RedHat' + command = if fact('default_provider') == 'pcs' "pcs cluster cib -f #{pcs_shadow_cib} | grep apache_vip_with_service" else 'CIB_shadow=puppet cibadmin --query | grep apache_vip_with_service' @@ -144,7 +160,7 @@ class { 'corosync': end it 'creates the colocation identified by rsc="apache_service" in the shadow cib' do - command = if fact('osfamily') == 'RedHat' + command = if fact('default_provider') == 'pcs' "pcs cluster cib -f #{pcs_shadow_cib} | grep apache_vip_with_service" else 'CIB_shadow=puppet cibadmin --query | grep apache_vip_with_service' @@ -176,7 +192,7 @@ class { 'corosync': EOS apply_manifest(pp, catch_failures: true, debug: false, trace: true) apply_manifest(pp, expect_changes: true, debug: false, trace: true) - command = if fact('osfamily') == 'RedHat' + command = if fact('default_provider') == 'pcs' "pcs cluster cib -f #{pcs_shadow_cib} | grep apache3_vip_clone_primitive" else 'CIB_shadow=puppet cibadmin --query | grep apache3_vip_clone_primitive' diff --git a/spec/classes/corosync_qdevice_spec.rb b/spec/classes/corosync_qdevice_spec.rb index 66f88d51..ca20e505 100644 --- a/spec/classes/corosync_qdevice_spec.rb +++ b/spec/classes/corosync_qdevice_spec.rb @@ -7,60 +7,84 @@ } end - context 'standard quorum node install' do - ['pcs', 'corosync-qnetd'].each do |package| - it "does install #{package}" do - is_expected.to contain_package(package).with( - ensure: 'present' - ) + on_supported_os.each do |os, os_facts| + context "on #{os}" do + let(:facts) do + os_facts end - end - it 'creates the cluster group' do - is_expected.to contain_group('haclient').that_requires('Package[pcs]') - end + provider_package = case corosync_stack(os_facts)[:provider] + when 'pcs' + 'pcs' + else + 'crmsh' + end - it 'sets the hacluster password' do - is_expected.to contain_user('hacluster').with( - ensure: 'present', - password: 'some-secret-hash', - gid: 'haclient' - ) - end + case corosync_stack(os_facts)[:provider] + when 'pcs' + context 'standard quorum node install' do + [provider_package, 'corosync-qnetd'].each do |package| + it "does install #{package}" do + is_expected.to contain_package(package).with( + ensure: 'present' + ) + end + end - it 'configures the pcsd service' do - is_expected.to contain_service('pcsd').with( - ensure: 'running', - enable: 'true', - require: [ - 'Package[pcs]', - 'Package[corosync-qnetd]' - ] - ) - end + it 'creates the cluster group' do + is_expected.to contain_group('haclient').that_requires("Package[#{provider_package}]") + end - it 'configures the net quorum device' do - is_expected.to contain_exec('pcs qdevice setup model net --enable --start').with( - path: '/sbin:/bin:/usr/sbin:/usr/bin', - onlyif: [ - 'test ! -f /etc/corosync/qnetd/nssdb/qnetd-cacert.crt' - ], - require: 'Service[pcsd]' - ) - end + it 'sets the hacluster password' do + is_expected.to contain_user('hacluster').with( + ensure: 'present', + password: 'some-secret-hash', + gid: 'haclient' + ) + end + + it 'configures the pcsd service' do + is_expected.to contain_service('pcsd').with( + ensure: 'running', + enable: 'true', + require: [ + 'Package[pcs]', + 'Package[corosync-qnetd]' + ] + ) + end - it 'makes sure the net quorum device is started' do - is_expected.to contain_exec('pcs qdevice start net').with( - path: '/sbin:/bin:/usr/sbin:/usr/bin', - onlyif: [ - 'test -f /etc/corosync/qnetd/nssdb/qnetd-cacert.crt', - 'test 0 -ne $(pcs qdevice status net >/dev/null 2>&1; echo $?)' - ], - require: [ - 'Package[pcs]', - 'Package[corosync-qnetd]' - ] - ) + it 'configures the net quorum device' do + is_expected.to contain_exec('pcs qdevice setup model net --enable --start').with( + path: '/sbin:/bin:/usr/sbin:/usr/bin', + onlyif: [ + 'test ! -f /etc/corosync/qnetd/nssdb/qnetd-cacert.crt' + ], + require: 'Service[pcsd]' + ) + end + + it 'makes sure the net quorum device is started' do + is_expected.to contain_exec('pcs qdevice start net').with( + path: '/sbin:/bin:/usr/sbin:/usr/bin', + onlyif: [ + 'test -f /etc/corosync/qnetd/nssdb/qnetd-cacert.crt', + 'test 0 -ne $(pcs qdevice status net >/dev/null 2>&1; echo $?)' + ], + require: [ + 'Package[pcs]', + 'Package[corosync-qnetd]' + ] + ) + end + end + when 'crm' + it 'shoud fail because we do not have qdevice support for crm' do + is_expected.to compile.and_raise_error(%r{qdevice not supported by this module with provider crm}) + end + else + raise('Unknown provider!') + end end end end diff --git a/spec/classes/corosync_spec.rb b/spec/classes/corosync_spec.rb index e8ff37fa..ea87f527 100644 --- a/spec/classes/corosync_spec.rb +++ b/spec/classes/corosync_spec.rb @@ -558,7 +558,7 @@ let(:params) do super().merge( "package_#{package}" => true, - "version_#{package}" => '1.1.1' + "ensure_#{package}" => '1.1.1' ) end @@ -650,7 +650,40 @@ on_supported_os.each do |os, os_facts| context "on #{os}" do - let(:facts) { os_facts } + let(:facts) do + os_facts + end + + auth_command = if corosync_stack(os_facts)[:provider] == 'pcs' + if Gem::Version.new(corosync_stack(os_facts)[:pcs_version]) < Gem::Version.new('0.10.0') + 'cluster auth' + else + 'host auth' + end + else + 'cluster auth' + end + cluster_name_arg = if corosync_stack(os_facts)[:provider] == 'pcs' + if Gem::Version.new(corosync_stack(os_facts)[:pcs_version]) < Gem::Version.new('0.10.0') + '--name' + else + '' + end + else + '--name' + end + provider_package = case corosync_stack(os_facts)[:provider] + when 'pcs' + 'pcs' + else + 'crmsh' + end + + it 'has the correct pcs version' do + is_expected.to contain_class('corosync').with( + 'pcs_version' => corosync_stack(os_facts)[:pcs_version] + ) + end context 'without secauth' do let(:params) do @@ -692,25 +725,29 @@ it_configures 'corosync' - context 'on RH osfamily', if: os_facts[:os]['family'] == 'RedHat' do + # Check default package installations per platform + case os_facts[:os]['family'] + when 'RedHat' it 'installs fence-agents-all' do is_expected.to contain_package('fence-agents-all') end + end - it 'installs the pcs package' do - is_expected.to contain_package('pcs').with( - ensure: 'present', - install_options: nil - ) - end + it 'installs the provider package' do + is_expected.to contain_package(provider_package).with( + ensure: 'present', + install_options: nil + ) + end - it 'does manage the pacemaker service' do - is_expected.to contain_service('pacemaker').with( - ensure: 'running' - ) - end + it 'does manage the pacemaker service' do + is_expected.to contain_service('pacemaker').with( + ensure: 'running' + ) + end - # Tests for pcsd_auth management + # Tests for pcsd_auth management + if corosync_stack(os_facts)[:provider] == 'pcs' context 'when mananging pcsd authorization' do let(:params) do super().merge( @@ -746,12 +783,12 @@ let(:node) { 'node2.test.org' } it 'does not perform the auth' do - is_expected.not_to contain_exec('pcs_cluster_auth') + is_expected.not_to contain_exec('authorize_members') end end it 'configures the hacluster user and haclient group' do - is_expected.to contain_group('haclient').that_requires('Package[pcs]') + is_expected.to contain_group('haclient').that_requires("Package[#{provider_package}]") is_expected.to contain_user('hacluster').with( ensure: 'present', gid: 'haclient', @@ -768,8 +805,8 @@ end it 'authorizes all nodes' do - is_expected.to contain_exec('pcs_cluster_auth').with( - command: 'pcs cluster auth node1.test.org node2.test.org node3.test.org -u hacluster -p some-secret-sauce', + is_expected.to contain_exec('authorize_members').with( + command: "pcs #{auth_command} node1.test.org node2.test.org node3.test.org -u hacluster -p some-secret-sauce", path: '/sbin:/bin:/usr/sbin:/usr/bin', require: [ 'Service[pcsd]', @@ -800,8 +837,8 @@ let(:facts) { override_facts(super(), networking: { ip: '192.168.0.10' }) } it 'match ip and auth nodes by member names' do - is_expected.to contain_exec('pcs_cluster_auth').with( - command: 'pcs cluster auth 192.168.0.10 192.168.0.12 192.168.0.13 -u hacluster -p some-secret-sauce', + is_expected.to contain_exec('authorize_members').with( + command: "pcs #{auth_command} 192.168.0.10 192.168.0.12 192.168.0.13 -u hacluster -p some-secret-sauce", path: '/sbin:/bin:/usr/sbin:/usr/bin', require: [ 'Service[pcsd]', @@ -827,13 +864,15 @@ end it 'still detects that this is the auth-node' do - is_expected.to contain_exec('pcs_cluster_auth') + is_expected.to contain_exec('authorize_members') end end end end + end - # Corosync qnet device is enabled + # Corosync qnet device is enabled + if corosync_stack(os_facts)[:provider] == 'pcs' context 'when quorum device is configured' do let(:params) do super().merge( @@ -881,7 +920,7 @@ ) end - it 'fails to delpoy' do + it 'fails to deploy' do is_expected.to raise_error( Puppet::Error, %r{Quorum device host cannot also be a member of the cluster!} @@ -909,7 +948,7 @@ end it 'does not attempt to authorize or configure the quorum node' do - is_expected.not_to contain_exec('pcs_cluster_auth_qdevice') + is_expected.not_to contain_exec('authorized_qdevice') is_expected.not_to contain_exec('pcs_cluster_add_qdevice') end end @@ -950,7 +989,7 @@ end it 'does not authorize or add the quorum device' do - is_expected.not_to contain_exec('pcs_cluster_auth_qdevice') + is_expected.not_to contain_exec('authorize_qdevice') is_expected.not_to contain_exec('pcs_cluster_add_qdevice') end end @@ -971,39 +1010,41 @@ ) end - it 'configures a temporary cluster if corosync.conf is missing' do - is_expected.to contain_exec('pcs_cluster_temporary').with( - command: 'pcs cluster setup --force --name cluster_test node1.test.org node2.test.org node3.test.org', - path: '/sbin:/bin:/usr/sbin:/usr/bin', - onlyif: 'test ! -f /etc/corosync/corosync.conf', - require: 'Exec[pcs_cluster_auth]' - ) - end - - it 'authorizes and adds the quorum device' do - is_expected.to contain_exec('pcs_cluster_auth_qdevice').with( - command: 'pcs cluster auth quorum1.test.org -u hacluster -p quorum-secret-password', - path: '/sbin:/bin:/usr/sbin:/usr/bin', - onlyif: 'test 0 -ne $(grep quorum1.test.org /var/lib/pcsd/tokens >/dev/null 2>&1; echo $?)', - require: [ - 'Package[corosync-qdevice]', - 'Exec[pcs_cluster_auth]', - 'Exec[pcs_cluster_temporary]' - ] - ) - is_expected.to contain_exec('pcs_cluster_add_qdevice').with( - command: 'pcs quorum device add model net host=quorum1.test.org algorithm=ffsplit', - path: '/sbin:/bin:/usr/sbin:/usr/bin', - onlyif: [ - 'test 0 -ne $(pcs quorum config | grep "host:" >/dev/null 2>&1; echo $?)' - ], - require: 'Exec[pcs_cluster_auth_qdevice]' - ) - end + case corosync_stack(os_facts)[:provider] + when 'pcs' + it 'configures a temporary cluster if corosync.conf is missing' do + is_expected.to contain_exec('pcs_cluster_temporary').with( + command: "pcs cluster setup --force #{cluster_name_arg} cluster_test node1.test.org node2.test.org node3.test.org", + path: '/sbin:/bin:/usr/sbin:/usr/bin', + onlyif: 'test ! -f /etc/corosync/corosync.conf', + require: 'Exec[authorize_members]' + ) + end - it 'contains the quorum configuration' do - is_expected.to contain_file('/etc/corosync/corosync.conf').with_content( - %r!quorum { + it 'authorizes and adds the quorum device' do + is_expected.to contain_exec('authorize_qdevice').with( + command: "pcs #{auth_command} quorum1.test.org -u hacluster -p quorum-secret-password", + path: '/sbin:/bin:/usr/sbin:/usr/bin', + onlyif: 'test 0 -ne $(grep quorum1.test.org /var/lib/pcsd/tokens >/dev/null 2>&1; echo $?)', + require: [ + 'Package[corosync-qdevice]', + 'Exec[authorize_members]', + 'Exec[pcs_cluster_temporary]' + ] + ) + + is_expected.to contain_exec('pcs_cluster_add_qdevice').with( + command: 'pcs quorum device add model net host=quorum1.test.org algorithm=ffsplit', + path: '/sbin:/bin:/usr/sbin:/usr/bin', + onlyif: [ + 'test 0 -ne $(pcs quorum config | grep "host:" >/dev/null 2>&1; echo $?)' + ], + require: 'Exec[authorize_qdevice]' + ) + end + it 'contains the quorum configuration' do + is_expected.to contain_file('/etc/corosync/corosync.conf').with_content( + %r!quorum { provider: corosync_votequorum device { model: net @@ -1015,7 +1056,8 @@ } } }!m - ) + ) + end end end @@ -1034,6 +1076,7 @@ %r{two_node: 1\n} ) end + # else - to implement end end end diff --git a/spec/spec_helper.rb b/spec/spec_helper.rb index 50265422..b978b128 100644 --- a/spec/spec_helper.rb +++ b/spec/spec_helper.rb @@ -23,3 +23,31 @@ require 'spec_helper_corosync' require 'spec_helper_methods' + +# add_custom_fact :corosync_stack, lambda { |os, facts| +# case facts[:os]['family'] +# when 'RedHat' +# 'pcs' +# when 'Debian' +# case facts[:os]['name'] +# when 'Debian' +# if facts[:os]['release']['major'].to_i > 9 +# 'pcs' +# else +# 'crm' +# end +# when 'Ubuntu' +# if facts[:os]['release']['major'].to_i > 18 +# 'pcs' +# elsif facts[:os]['release']['major'].to_i > 16 +# 'pcs' +# else +# 'crm' +# end +# end +# when 'Suse' +# 'crm' +# else +# 'crm' +# end +# } diff --git a/spec/spec_helper_acceptance.rb b/spec/spec_helper_acceptance.rb index 91bb4c1c..fa2686ad 100644 --- a/spec/spec_helper_acceptance.rb +++ b/spec/spec_helper_acceptance.rb @@ -1,6 +1,45 @@ require 'voxpupuli/acceptance/spec_helper_acceptance' configure_beaker do |host| + case fact_on(host, 'os.family') + when 'RedHat' + default_provider = 'pcs' + pcs_version = if fact_on(host, 'os.release.major').to_i > 7 + '0.10.0' + else + '0.9.0' + end + when 'Debian' + case fact_on(host, 'os.name') + when 'Debian' + if fact_on(host, 'os.release.major').to_i > 9 + default_provider = 'pcs' + pcs_version = '0.10.0' + else + default_provider = 'crm' + pcs_version = '' + end + when 'Ubuntu' + if fact_on(host, 'os.release.major').to_i > 18 + default_provider = 'pcs' + pcs_version = '0.10.0' + elsif fact_on(host, 'os.release.major').to_i > 16 + default_provider = 'pcs' + pcs_version = '0.9.0' + else + default_provider = 'crm' + pcs_version = '' + end + end + when 'Suse' + default_provider = 'crm' + pcs_version = '' + else + default_provider = 'crm' + pcs_version = '' + end + on host, "echo default_provider=#{default_provider} > /opt/puppetlabs/facter/facts.d/pacemaker-provider.txt" + on host, "echo pcs_version=#{pcs_version} >> /opt/puppetlabs/facter/facts.d/pacemaker-provider.txt" # On Debian-based, service state transitions (restart, stop) hang indefinitely and # lead to test timeouts if there is a service unit of Type=notify involved. # Use Type=simple as a workaround. See issue 455. diff --git a/spec/spec_helper_corosync.rb b/spec/spec_helper_corosync.rb index 91355bc5..97fc9a9f 100644 --- a/spec/spec_helper_corosync.rb +++ b/spec/spec_helper_corosync.rb @@ -1,5 +1,46 @@ # This file contains helpers that are specific to this module +def corosync_stack(facts) + case facts[:os]['family'] + when 'RedHat' + corosync_stack = 'pcs' + pcs_version = if facts[:os]['release']['major'].to_i > 7 + '0.10.0' + else + '0.9.0' + end + when 'Debian' + case facts[:os]['name'] + when 'Debian' + if facts[:os]['release']['major'].to_i > 9 + corosync_stack = 'pcs' + pcs_version = '0.10.0' + else + corosync_stack = 'crm' + pcs_version = '' + end + when 'Ubuntu' + if facts[:os]['release']['major'].to_i > 18 + corosync_stack = 'pcs' + pcs_version = '0.10.0' + elsif facts[:os]['release']['major'].to_i > 16 + corosync_stack = 'pcs' + pcs_version = '0.9.0' + else + corosync_stack = 'crm' + pcs_version = '' + end + end + when 'Suse' + corosync_stack = 'crm' + pcs_version = '' + else + corosync_stack = 'crm' + pcs_version = '' + end + { provider: corosync_stack, pcs_version: pcs_version } +end + def expect_commands(patterns) command_suite = sequence('pcs commands') Array(patterns).each do |pattern| diff --git a/spec/unit/puppet/provider/cs_primitive_crm_spec.rb b/spec/unit/puppet/provider/cs_primitive_crm_spec.rb index 2b21b507..a4316303 100644 --- a/spec/unit/puppet/provider/cs_primitive_crm_spec.rb +++ b/spec/unit/puppet/provider/cs_primitive_crm_spec.rb @@ -96,14 +96,6 @@ expect(instance.metadata).to eq('target-role' => 'Started', 'priority' => '7') end - - it 'has an ms_metadata property' do - expect(instance).to respond_to(:ms_metadata) - end - - it 'has a promotable property that is :false' do - expect(instance.promotable).to eq(:false) - end end end diff --git a/spec/unit/puppet/provider/cs_primitive_pcs_spec.rb b/spec/unit/puppet/provider/cs_primitive_pcs_spec.rb index 2a0f399b..2c2960b7 100644 --- a/spec/unit/puppet/provider/cs_primitive_pcs_spec.rb +++ b/spec/unit/puppet/provider/cs_primitive_pcs_spec.rb @@ -71,14 +71,6 @@ expect(instance.metadata).to eq('target-role' => 'Started', 'priority' => '7') end - - it 'has an ms_metadata property' do - expect(instance).to respond_to(:ms_metadata) - end - - it 'has a promotable property that is :false' do - expect(instance.promotable).to eq(:false) - end end end @@ -192,38 +184,37 @@ it 'sets operations' do instance.operations = [{ 'monitor' => { 'interval' => '20s' } }] expect_commands([ - %r{^pcs resource create --force testResource ocf:heartbeat:IPaddr2 op monitor interval=20s$}, - %r{^pcs resource op remove testResource monitor interval=60s$} + %r{^pcs resource create --force --no-default-ops testResource ocf:heartbeat:IPaddr2 op monitor interval=20s$}, ]) instance.flush end it 'do not remove default operations if explicitely set' do instance.operations = [{ 'monitor' => { 'interval' => '60s' } }] - expect_commands(%r{^pcs resource create --force testResource ocf:heartbeat:IPaddr2 op monitor interval=60s$}) + expect_commands(%r{^pcs resource create --force --no-default-ops testResource ocf:heartbeat:IPaddr2 op monitor interval=60s$}) instance.flush end it 'sets utilization' do instance.utilization = { 'waffles' => '5' } - expect_commands(%r{^pcs resource create --force testResource ocf:heartbeat:IPaddr2 op .* utilization waffles=5$}) + expect_commands(%r{^pcs resource create --force --no-default-ops testResource ocf:heartbeat:IPaddr2 op .* utilization waffles=5$}) instance.flush end it 'sets parameters' do instance.parameters = { 'fluffyness' => '12' } - expect_commands(%r{^pcs resource create --force testResource ocf:heartbeat:IPaddr2 fluffyness=12 op.*}) + expect_commands(%r{^pcs resource create --force --no-default-ops testResource ocf:heartbeat:IPaddr2 fluffyness=12 op.*}) instance.flush end it 'sets metadata' do instance.metadata = { 'target-role' => 'Started' } - expect_commands(%r{^pcs resource create --force testResource ocf:heartbeat:IPaddr2 op .* meta target-role=Started$}) + expect_commands(%r{^pcs resource create --force --no-default-ops testResource ocf:heartbeat:IPaddr2 op .* meta target-role=Started$}) instance.flush end it 'sets the primitive name and type' do - expect_commands(%r{^pcs resource create --force testResource ocf:heartbeat:IPaddr2}) + expect_commands(%r{^pcs resource create --force --no-default-ops testResource ocf:heartbeat:IPaddr2}) instance.flush end @@ -245,8 +236,7 @@ expect_commands([ %r{^pcs resource unclone example_vip$}, %r{^pcs resource delete --force example_vip$}, - %r{^pcs resource create --force example_vip systemd:heartbeat:IPaddr2}, - %r{^pcs resource op remove example_vip monitor interval=60s$} + %r{^pcs resource create --force --no-default-ops example_vip systemd:heartbeat:IPaddr2}, ]) vip_instance.flush end @@ -256,8 +246,7 @@ expect_commands([ %r{^pcs resource unclone example_vip$}, %r{^pcs resource delete --force example_vip$}, - %r{^pcs resource create --force example_vip ocf:voxpupuli:IPaddr2}, - %r{^pcs resource op remove example_vip monitor interval=60s$} + %r{^pcs resource create --force --no-default-ops example_vip ocf:voxpupuli:IPaddr2}, ]) vip_instance.flush end @@ -267,8 +256,7 @@ expect_commands([ %r{^pcs resource unclone example_vip$}, %r{^pcs resource delete --force example_vip$}, - %r{^pcs resource create --force example_vip ocf:heartbeat:IPaddr3}, - %r{^pcs resource op remove example_vip monitor interval=60s$} + %r{^pcs resource create --force --no-default-ops example_vip ocf:heartbeat:IPaddr3}, ]) vip_instance.flush end @@ -280,8 +268,7 @@ expect_commands([ %r{^pcs resource unclone example_vip$}, %r{^pcs resource delete --force example_vip$}, - %r{^pcs resource create --force example_vip systemd:httpd}, - %r{^pcs resource op remove example_vip monitor interval=60s$} + %r{^pcs resource create --force --no-default-ops example_vip systemd:httpd}, ]) vip_instance.flush end @@ -291,8 +278,7 @@ expect_commands([ %r{^pcs resource unclone example_vip$}, %r{^pcs resource delete --force example_vip$}, - %r{^pcs resource create --force example_vip}, - %r{^pcs resource op remove example_vip monitor interval=60s$} + %r{^pcs resource create --force --no-default-ops example_vip}, ]) vip_instance.flush end diff --git a/spec/unit/puppet/type/cs_primitive_spec.rb b/spec/unit/puppet/type/cs_primitive_spec.rb index d13749ff..4ce95343 100644 --- a/spec/unit/puppet/type/cs_primitive_spec.rb +++ b/spec/unit/puppet/type/cs_primitive_spec.rb @@ -27,7 +27,7 @@ end end - [:parameters, :operations, :metadata, :ms_metadata, :promotable].each do |property| + [:parameters, :operations, :metadata].each do |property| it "should have a #{property} property" do expect(subject).to be_validproperty(property) end @@ -39,7 +39,7 @@ end describe 'when validating attributes' do - [:parameters, :operations, :metadata, :ms_metadata].each do |attribute| + [:parameters, :operations, :metadata].each do |attribute| it "should validate that the #{attribute} attribute defaults to a hash" do expect(subject.new(name: 'mock_primitive')[:parameters]).to eq({}) end @@ -53,26 +53,6 @@ end.to raise_error Puppet::Error, %r{hash} end end - - it 'validates that the promotable attribute can be true/false' do - [true, false].each do |value| - expect(subject.new( - name: 'mock_primitive', - promotable: value - )[:promotable]).to eq(value.to_s.to_sym) - end - end - - it 'validates that the promotable attribute cannot be other values' do - ['fail', 42].each do |value| - expect do - subject.new( - name: 'mock_primitive', - promotable: value - ) - end.to raise_error Puppet::Error, %r{(true|false)} - end - end end describe 'when munging the operations attributes' do From 2603790649be8b4ccced008f963977a5b0b06e25 Mon Sep 17 00:00:00 2001 From: Tobias Wolter Date: Thu, 30 Dec 2021 13:29:09 +0100 Subject: [PATCH 2/2] Apply @bastelfreak's suggestions. --- data/os/Debian/9.yaml | 2 +- data/os/RedHat/6.yaml | 2 -- data/os/SLES.yaml | 2 +- 3 files changed, 2 insertions(+), 4 deletions(-) delete mode 100644 data/os/RedHat/6.yaml diff --git a/data/os/Debian/9.yaml b/data/os/Debian/9.yaml index 397da8ee..2cdfebbf 100644 --- a/data/os/Debian/9.yaml +++ b/data/os/Debian/9.yaml @@ -1,3 +1,3 @@ --- corosync::provider: 'crm' -corosync::pcs_version: NULL +corosync::pcs_version: ~ diff --git a/data/os/RedHat/6.yaml b/data/os/RedHat/6.yaml deleted file mode 100644 index baf9491f..00000000 --- a/data/os/RedHat/6.yaml +++ /dev/null @@ -1,2 +0,0 @@ ---- -corosync::pcs_version: '0.9.0' diff --git a/data/os/SLES.yaml b/data/os/SLES.yaml index 397da8ee..2cdfebbf 100644 --- a/data/os/SLES.yaml +++ b/data/os/SLES.yaml @@ -1,3 +1,3 @@ --- corosync::provider: 'crm' -corosync::pcs_version: NULL +corosync::pcs_version: ~