diff --git a/crmsh/bootstrap.py b/crmsh/bootstrap.py index a56156711..3e4b6e5f8 100644 --- a/crmsh/bootstrap.py +++ b/crmsh/bootstrap.py @@ -499,7 +499,7 @@ def is_online(): return False # if peer_node is None, this is in the init process - if _context.cluster_node is None: + if not _context or _context.cluster_node is None: return True # In join process # If the joining node is already online but can't find the init node @@ -2373,9 +2373,7 @@ def remove_qdevice() -> None: if qdevice_reload_policy == qdevice.QdevicePolicy.QDEVICE_RELOAD: invoke("crm cluster run 'crm corosync reload'") elif qdevice_reload_policy == qdevice.QdevicePolicy.QDEVICE_RESTART: - logger.info("Restarting cluster service") - utils.cluster_run_cmd("crm cluster restart") - wait_for_cluster() + restart_cluster() else: logger.warning("To remove qdevice service, need to restart cluster service manually on each node") @@ -2796,4 +2794,10 @@ def sync_file(path): utils.cluster_copy_file(path, nodes=_context.node_list_in_cluster, output=False) else: csync2_update(path) + + +def restart_cluster(): + logger.info("Restarting cluster service") + utils.cluster_run_cmd("crm cluster restart") + wait_for_cluster() # EOF diff --git a/crmsh/qdevice.py b/crmsh/qdevice.py index 32c8cb5f9..fd1dac002 100644 --- a/crmsh/qdevice.py +++ b/crmsh/qdevice.py @@ -599,9 +599,7 @@ def start_qdevice_service(self): logger.info("Starting corosync-qdevice.service in cluster") utils.cluster_run_cmd("systemctl restart corosync-qdevice") elif self.qdevice_reload_policy == QdevicePolicy.QDEVICE_RESTART: - logger.info("Restarting cluster service") - utils.cluster_run_cmd("crm cluster restart") - bootstrap.wait_for_cluster() + bootstrap.restart_cluster() else: logger.warning("To use qdevice service, need to restart cluster service manually on each node") diff --git a/crmsh/sbd.py b/crmsh/sbd.py index d7f569e68..cc0df1d19 100644 --- a/crmsh/sbd.py +++ b/crmsh/sbd.py @@ -456,9 +456,7 @@ def _restart_cluster_and_configure_sbd_ra(self): Try to configure sbd resource, restart cluster on needed """ if not xmlutil.CrmMonXmlParser().is_any_resource_running(): - logger.info("Restarting cluster service") - utils.cluster_run_cmd("crm cluster restart") - bootstrap.wait_for_cluster() + bootstrap.restart_cluster() self.configure_sbd_resource_and_properties() else: logger.warning("To start sbd.service, need to restart cluster service manually on each node") diff --git a/crmsh/ui_cluster.py b/crmsh/ui_cluster.py index d951e5f14..e328c9d0d 100644 --- a/crmsh/ui_cluster.py +++ b/crmsh/ui_cluster.py @@ -16,6 +16,7 @@ from . import bootstrap from . import corosync from . import qdevice +from . import xmlutil from .cibconfig import cib_factory from .prun import prun from .service_manager import ServiceManager @@ -564,8 +565,17 @@ def do_rename(self, context, new_name): ''' Rename the cluster. ''' - if not ServiceManager(sh.ClusterShellAdaptorForLocalShell(sh.LocalShell())).service_is_active("corosync.service"): + service_manager = ServiceManager() + if not service_manager.service_is_active("corosync.service"): context.fatal_error("Can't rename cluster when cluster service is stopped") + if service_manager.service_is_active("corosync-qdevice.service"): + logger.error("Can't rename cluster when QDevice service is running") + suggestion = '''Please run `crm cluster remove --qdevice` on any node in the cluster to remove the QDevice configuration; +Then rename the cluster; +Finally run `crm cluster init qdevice` on any node in the cluster to re-deploy the QDevice.''' + logger.info(suggestion) + return + old_name = cib_factory.get_property('cluster-name') if old_name and new_name == old_name: context.fatal_error("Expected a different name") @@ -583,8 +593,11 @@ def do_rename(self, context, new_name): if not cib_factory.commit(): context.fatal_error("Change property cluster-name failed!") - # it's a safe way to give user a hints that need to restart service - context.info("To apply the change, restart the cluster service at convenient time") + if xmlutil.CrmMonXmlParser().is_any_resource_running(): + context.info("To apply the change, restart the cluster service at convenient time") + else: + bootstrap.restart_cluster() + def _parse_clustermap(self, clusters): '''