diff --git a/_cmd.py b/_cmd.py index ad6f4f6..bcff2bc 100644 --- a/_cmd.py +++ b/_cmd.py @@ -32,17 +32,22 @@ from core import ObdHome from _stdio import IO, FormtatText from _lock import LockMode +from _types import Capacity from tool import DirectoryUtil, FileUtil, NetUtil, COMMAND_ENV from _errno import DOC_LINK_MSG, LockError import _environ as ENV from ssh import LocalClient -from const import * +from const import ( + CONST_OBD_HOME, CONST_OBD_INSTALL_PATH, CONST_OBD_INSTALL_PRE, + VERSION, REVISION, BUILD_BRANCH, BUILD_TIME, FORBIDDEN_VARS, + COMP_OCEANBASE_DIAGNOSTIC_TOOL +) ROOT_IO = IO(1) OBD_HOME_PATH = os.path.join(os.environ.get(CONST_OBD_HOME, os.getenv('HOME')), '.obd') -OBDIAG_HOME_PATH = os.path.join(os.environ.get(CONST_OBD_HOME, os.getenv('HOME')), 'oceanbase-diagnostic-tool') +OBDIAG_HOME_PATH = os.path.join(os.environ.get(CONST_OBD_HOME, os.getenv('HOME')), COMP_OCEANBASE_DIAGNOSTIC_TOOL) COMMAND_ENV.load(os.path.join(OBD_HOME_PATH, '.obd_environ'), ROOT_IO) ROOT_IO.default_confirm = COMMAND_ENV.get(ENV.ENV_DEFAULT_CONFIRM, '0') == '1' @@ -187,6 +192,8 @@ def init_home(self): version_fobj = FileUtil.open(version_path, 'a+', stdio=ROOT_IO) version_fobj.seek(0) version = version_fobj.read() + if not COMMAND_ENV.get(ENV.ENV_OBD_ID): + COMMAND_ENV.set(ENV.ENV_OBD_ID, uuid()) if VERSION != version: for part in ['plugins', 'config_parser', 'optimize', 'mirror/remote']: obd_part_dir = os.path.join(self.OBD_PATH, part) @@ -630,8 +637,8 @@ def lock_mode(self): def show_repo(self, repos, name=None): ROOT_IO.print_list( repos, - ['name', 'version', 'release', 'arch', 'md5', 'tags'], - lambda x: [x.name, x.version, x.release, x.arch, x.md5, ', '.join(x.tags)], + ['name', 'version', 'release', 'arch', 'md5', 'tags', 'size'], + lambda x: [x.name, x.version, x.release, x.arch, x.md5, ', '.join(x.tags), Capacity(x.size, 2).value], title='%s Local Repository List' % name if name else 'Local Repository List' ) @@ -749,7 +756,7 @@ def _do_command(self, obd): url = '/#/updateWelcome' if self.cmds and self.cmds[0] in ('upgrade', 'update') else '' ROOT_IO.print('start OBD WEB in 0.0.0.0:%s' % self.opts.port) - ROOT_IO.print('please open http://{0}:{1}{2}'.format(NetUtil.get_host_ip(), self.opts.port, url)) + ROOT_IO.print('please open http://{0}:{1}{2}'.format(NetUtil.get_host_ip(), self.opts.port, url)) try: COMMAND_ENV.set(ENV.ENV_DISABLE_PARALLER_EXTRACT, True, stdio=obd.stdio) OBDWeb(obd, None, self.OBD_INSTALL_PATH).start(self.opts.port) @@ -1543,13 +1550,17 @@ def _do_command(self, obd): return self._show_help() -class ToolCommand(HiddenMajorCommand): +class ToolCommand(MajorCommand): def __init__(self): super(ToolCommand, self).__init__('tool', 'Tools') self.register_command(DbConnectCommand()) self.register_command(CommandsCommand()) self.register_command(DoobaCommand()) + self.register_command(ToolListCommand()) + self.register_command(ToolInstallCommand()) + self.register_command(ToolUninstallCommand()) + self.register_command(ToolUpdateCommand()) class BenchMajorCommand(MajorCommand): @@ -1616,6 +1627,8 @@ def __init__(self): self.register_command(ObdiagGatherCommand()) self.register_command(ObdiagAnalyzeCommand()) self.register_command(ObdiagCheckCommand()) + self.register_command(ObdiagRcaCommand()) + self.register_command(ObdiagUpdateSceneCommand()) class ObdiagDeployCommand(ObdCommand): @@ -1627,8 +1640,8 @@ def __init__(self): self.parser.undefine_warn = False def _do_command(self, obd): - obd.set_options(self.opts) - return obd.obdiag_deploy() + ROOT_IO.print("Use 'obd tool install %s' instead" % COMP_OCEANBASE_DIAGNOSTIC_TOOL) + return obd.install_tool(COMP_OCEANBASE_DIAGNOSTIC_TOOL) class ObdiagGatherMirrorCommand(ObdCommand): @@ -1658,6 +1671,24 @@ def __init__(self): self.register_command(ObdiagGatherClogCommand()) self.register_command(ObdiagGatherPlanMonitorCommand()) self.register_command(ObdiagGatherObproxyLogCommand()) + self.register_command(ObdiagGatherSceneCommand()) + + +class ObdiagGatherSceneCommand(MajorCommand): + + def __init__(self): + super(ObdiagGatherSceneCommand, self).__init__('scene', 'Gather scene diagnostic info') + self.register_command(ObdiagGatherSceneListCommand()) + self.register_command(ObdiagGatherSceneRunCommand()) + + +class ObdiagRcaCommand(MajorCommand): + + def __init__(self): + super(ObdiagRcaCommand, self).__init__('rca', 'root cause analysis of oceanbase problem') + self.register_command(ObdiagRcaListCommand()) + self.register_command(ObdiagRcaRunCommand()) + class ObdiagGatherAllCommand(ObdiagGatherMirrorCommand): @@ -1803,11 +1834,9 @@ def lock_mode(self): def __init__(self): super(ObdiagGatherPlanMonitorCommand, self).__init__('plan_monitor', 'Gather ParalleSQL information') - self.parser.add_option('-c', '--component', type='string', help="Component name to connect.", default='oceanbase-ce') self.parser.add_option('--trace_id', type='string', help='sql trace id') self.parser.add_option('--store_dir', type='string', help='the dir to store gather result, current dir by default.', default='./') - self.parser.add_option('-u', '--user', type='string', help='The username used by database connection. [root]',default='root') - self.parser.add_option('-p', '--password', type='string', help='The password used by database connection.',default='') + self.parser.add_option('--env', type='string', help='env, eg: "{env1=xxx, env2=xxx}"') self.parser.add_option('--obdiag_dir', type='string', help="obdiag install dir",default=OBDIAG_HOME_PATH) @@ -1832,6 +1861,54 @@ def __init__(self): self.parser.add_option('--store_dir', type='string', help='the dir to store gather result, current dir by default.', default='./') self.parser.add_option('--obdiag_dir', type='string', help="obdiag install dir",default=OBDIAG_HOME_PATH) + +class ObdiagGatherSceneListCommand(ObdCommand): + + def __init__(self): + super(ObdiagGatherSceneListCommand, self).__init__('list', 'root cause analysis of oceanbase problem list') + self.parser.add_option('--obdiag_dir', type='string', help="obdiag install dir",default=OBDIAG_HOME_PATH) + + def init(self, cmd, args): + super(ObdiagGatherSceneListCommand, self).init(cmd, args) + return self + + @property + def lock_mode(self): + return LockMode.NO_LOCK + + def _do_command(self, obd): + return obd.obdiag_offline_func("gather_scene_list", self.opts) + + +class ObdiagGatherSceneRunCommand(ObdCommand): + + def __init__(self): + super(ObdiagGatherSceneRunCommand, self).__init__('run', 'root cause analysis of oceanbase problem') + self.parser.add_option('--scene', type='string', help="Specify the scene to be gather") + self.parser.add_option('--from', type='string', help="specify the start of the time range. format: yyyy-mm-dd hh:mm:ss") + self.parser.add_option('--to', type='string', help="specify the end of the time range. format: yyyy-mm-dd hh:mm:ss") + self.parser.add_option('--since', type='string', help="Specify time range that from 'n' [d]ays, 'n' [h]ours or 'n' [m]inutes. before to now. format: . example: 1h.",default='30m') + self.parser.add_option('--env', type='string', help='env, eg: "{env1=xxx, env2=xxx}"') + self.parser.add_option('--dis_update', type='string', help='The type is bool, assigned any value representing true', default='true') + self.parser.add_option('--store_dir', type='string', help='the dir to store gather result, current dir by default.', default='./') + self.parser.add_option('--obdiag_dir', type='string', help="obdiag install dir",default=OBDIAG_HOME_PATH) + + def init(self, cmd, args): + super(ObdiagGatherSceneRunCommand, self).init(cmd, args) + self.parser.set_usage('%s [options]' % self.prev_cmd) + return self + + @property + def lock_mode(self): + return LockMode.NO_LOCK + + def _do_command(self, obd): + if len(self.cmds) > 0: + return obd.obdiag_online_func(self.cmds[0], "gather_scene_run", self.opts) + else: + return self._show_help() + + class ObdiagAnalyzeMirrorCommand(ObdCommand): def init(self, cmd, args): @@ -1909,7 +1986,8 @@ class ObdiagCheckCommand(ObdCommand): def __init__(self): super(ObdiagCheckCommand, self).__init__('check', 'check oceanbase cluster') self.parser.add_option('--cases', type='string', help="The name of the check task set that needs to be executed") - self.parser.add_option('--report_path', type='string', help='ouput report path', default='./check_report/') + self.parser.add_option('--store_dir', type='string', help='ouput report path', default='./check_report/') + self.parser.add_option('--dis_update', type='string', help='The type is bool, assigned any value representing true', default='true') self.parser.add_option('--obdiag_dir', type='string', help="obdiag install dir", default=OBDIAG_HOME_PATH) def init(self, cmd, args): @@ -1924,6 +2002,143 @@ def _do_command(self, obd): else: return self._show_help() +class ObdiagRcaListCommand(ObdCommand): + + def __init__(self): + super(ObdiagRcaListCommand, self).__init__('list', 'root cause analysis of oceanbase problem list') + self.parser.add_option('--obdiag_dir', type='string', help="obdiag install dir", default=OBDIAG_HOME_PATH) + + @property + def lock_mode(self): + return LockMode.NO_LOCK + + def _do_command(self, obd): + return obd.obdiag_offline_func("rca_list", self.opts) + + +class ObdiagRcaRunCommand(ObdCommand): + + def __init__(self): + super(ObdiagRcaRunCommand, self).__init__('run', 'to run root cause analysis of oceanbase problem') + self.parser.add_option('--scene', type='string', help="The name of the rca scene set that needs to be executed") + self.parser.add_option('--store_dir', type='string', help='ouput result path', default='./rca/') + self.parser.add_option('--parameters', type='string', help='parameters') + self.parser.add_option('--obdiag_dir', type='string', help="obdiag install dir", default=OBDIAG_HOME_PATH) + + def init(self, cmd, args): + super(ObdiagRcaRunCommand, self).init(cmd, args) + self.parser.set_usage('%s [options]' % self.prev_cmd) + return self + + def _do_command(self, obd): + if len(self.cmds) > 0: + return obd.obdiag_online_func(self.cmds[0], "rca_run", self.opts) + else: + return self._show_help() + + +class ObdiagUpdateSceneCommand(ObdCommand): + + def __init__(self): + super(ObdiagUpdateSceneCommand, self).__init__('update', 'update obdiag scenes') + self.parser.add_option('--file', type='string', help="obdiag update cheat file path") + self.parser.add_option('--force', type='string', help='Force Update') + self.parser.add_option('--obdiag_dir', type='string', help="obdiag install dir", default=OBDIAG_HOME_PATH) + + def init(self, cmd, args): + super(ObdiagUpdateSceneCommand, self).init(cmd, args) + self.parser.set_usage('%s [options]' % self.prev_cmd) + return self + + def _do_command(self, obd): + return obd.obdiag_offline_func("update_scene", self.opts) + + +class ToolListCommand(ObdCommand): + + def __init__(self): + super(ToolListCommand, self).__init__('list', 'list tool') + + @property + def lock_mode(self): + return LockMode.NO_LOCK + + def _do_command(self, obd): + if self.cmds: + return self._show_help() + else: + return obd.list_tools() + + +class ToolInstallCommand(ObdCommand): + + def __init__(self): + super(ToolInstallCommand, self).__init__('install', 'install tool') + self.parser.add_option('-V', '--version', type='string', help="The version of tool.") + self.parser.add_option('-p', '--prefix', type='string', help="The install prefix path of tool.") + self.parser.add_option('-y', '--assumeyes', action='store_true', help="answer yes for all questions", default=False) + self.parser.add_option('-f', '--force', action='store_true', help="Force install if the tool is already present and conflicts between tools.", default=False) + + def init(self, cmd, args): + super(ToolInstallCommand, self).init(cmd, args) + self.parser.set_usage('%s [options]' % self.prev_cmd) + return self + + def _do_command(self, obd): + if self.cmds: + if self.opts.assumeyes: + ROOT_IO.default_confirm = True + res = obd.install_tool(self.cmds[0]) + return res + else: + return self._show_help() + + +class ToolUninstallCommand(ObdCommand): + + def __init__(self): + super(ToolUninstallCommand, self).__init__('uninstall', 'uninstall tool') + self.parser.add_option('-y', '--assumeyes', action='store_true', help="answer yes for all questions", default=False) + self.parser.add_option('-f', '--force', action='store_true', help="Force uninstall if the tool is already required by other tools.", default=False) + + def init(self, cmd, args): + super(ToolUninstallCommand, self).init(cmd, args) + self.parser.set_usage('%s [options]' % self.prev_cmd) + return self + + def _do_command(self, obd): + if self.cmds: + if self.opts.assumeyes: + ROOT_IO.default_confirm = True + res = obd.uninstall_tool(self.cmds[0]) + return res + else: + return self._show_help() + + +class ToolUpdateCommand(ObdCommand): + + def __init__(self): + super(ToolUpdateCommand, self).__init__('update', 'update tool') + self.parser.add_option('-V', '--version', type='string', help="The version of tool.") + self.parser.add_option('-p', '--prefix', type='string', help="The install prefix path of tool.") + self.parser.add_option('-y', '--assumeyes', action='store_true', help="answer yes for all questions", default=False) + self.parser.add_option('-f', '--force', action='store_true', help="Force install if the tool is already present and conflicts between tools.", default=False) + + def init(self, cmd, args): + super(ToolUpdateCommand, self).init(cmd, args) + self.parser.set_usage('%s [options]' % self.prev_cmd) + return self + + def _do_command(self, obd): + if self.cmds: + if self.opts.assumeyes: + ROOT_IO.default_confirm = True + res = obd.update_tool(self.cmds[0]) + return res + else: + return self._show_help() + class MainCommand(MajorCommand): diff --git a/_deploy.py b/_deploy.py index f1a41c1..ef51580 100644 --- a/_deploy.py +++ b/_deploy.py @@ -642,6 +642,9 @@ def get_deploy_added_components(self): def get_deploy_changed_components(self): return self._deploy_config.changed_components + + def get_deploy_removed_components(self): + return self._deploy_config.removed_components def get_depend_config(self, name, server=None, with_default=True): if name not in self._depends: @@ -1052,7 +1055,7 @@ def __init__(self, yaml_path, yaml_loader=yaml, inner_config=None, config_parser self._load() self._added_components = [] self._changed_components = [] - self._removed_components = [] + self._removed_components = set() self._do_not_dump = False self._mem_mode = False @@ -1256,7 +1259,6 @@ def del_components(self, components, dryrun=False): ret = True src_data = deepcopy(self._src_data) if dryrun else self._src_data component_map = deepcopy(self.components) if dryrun else self.components - removed_components = deepcopy(self._removed_components) if dryrun else self._removed_components for del_comp in components: if del_comp not in component_map: self.stdio.error(err.EC_COMPONENT_NOT_EXISTS.format(component=del_comp)) @@ -1264,7 +1266,7 @@ def del_components(self, components, dryrun=False): continue del component_map[del_comp] del src_data[del_comp] - removed_components.append(del_comp) + self.removed_components.add(del_comp) for comp_name in component_map: for del_comp in components: if del_comp in component_map[comp_name].depends: @@ -1521,7 +1523,7 @@ def __init__(self, config_dir, config_parser_manager=None, stdio=None): def use_model(self, name, repository, dump=True): self.deploy_info.components[name] = { 'hash': repository.hash, - 'version': repository.version, + 'version': repository.version } return self.dump_deploy_info() if dump else True diff --git a/_environ.py b/_environ.py index c8ab40e..2485f16 100644 --- a/_environ.py +++ b/_environ.py @@ -20,6 +20,9 @@ from __future__ import absolute_import, division, print_function +# obd id +ENV_OBD_ID = "OBD_ID" + # obd dev mode. {0/1} ENV_DEV_MODE = "OBD_DEV_MODE" diff --git a/_errno.py b/_errno.py index 83c0c09..8ebe55d 100644 --- a/_errno.py +++ b/_errno.py @@ -138,6 +138,7 @@ class InitDirFailedErrorMessage(object): EC_COMPONENT_REMOVE_DEPENDS = OBDErrorCodeTemplate(1022, 'Component {component1} still depends by {component2}, could not remove') EC_COMPONENT_FAILED_TO_MERGE_CONFIG = OBDErrorCodeTemplate(1023, 'Failed to merge config: {message}') EC_COMPONENT_NO_REMAINING_COMPS = OBDErrorCodeTemplate(1024, 'The cluster will have no remaining components. If you are absolutely sure about DELETING ALL COMPONENTS, please use "obd cluster destroy " command to completely destroy the cluster') +EC_COMPONENT_PASSWD_ERROR = OBDErrorCodeTemplate(1025, '({ip}) {component} {key} invalid. (Rule: {rule})') WC_ULIMIT_CHECK = OBDErrorCodeTemplate(1007, '({server}) The recommended number of {key} is {need} (Current value: {now})') WC_AIO_NOT_ENOUGH = OBDErrorCodeTemplate(1011, '({ip}) The recommended value of fs.aio-max-nr is 1048576 (Current value: {current})') @@ -178,9 +179,11 @@ class InitDirFailedErrorMessage(object): # obagent EC_OBAGENT_RELOAD_FAILED = OBDErrorCodeTemplate(4000, 'Fail to reload {server}') EC_OBAGENT_SEND_CONFIG_FAILED = OBDErrorCodeTemplate(4001, 'Fail to send config file to {server}') + # obproxy EC_OBPROXY_NEED_CONFIG = OBDErrorCodeTemplate(4100, '{server} need config "rs_list" or "obproxy_config_server_url"') EC_OBPROXY_START_FAILED = OBDErrorCodeTemplate(4101, 'failed to start {server} obproxy: {stderr}') +EC_OBPROXY_ID_OVER_LIMIT = OBDErrorCodeTemplate(4102, 'When the value of client_session_id_version is set to {id}, the valid range of proxy_id is {limit}') # grafana EC_GRAFANA_DEFAULT_PWD = OBDErrorCodeTemplate(4200, "{server} grafana admin password should not be 'admin'") EC_GRAFANA_PWD_LESS_5 = OBDErrorCodeTemplate(4201, "{server} grafana admin password length should not be less than 5") @@ -195,7 +198,6 @@ class InitDirFailedErrorMessage(object): EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_LOG_DISK_AVAILABLE = OBDErrorCodeTemplate(4305, 'There is not enough log disk for ocp meta tenant. (Avail: {avail}, Need: {need})') EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_LOG_DISK = OBDErrorCodeTemplate(4305, 'There is not enough log disk for ocp meta tenant.') EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_MEM = OBDErrorCodeTemplate(4305, 'There is not enough memory for ocp meta tenant') -EC_OCP_EXPRESS_ADMIN_PASSWD_ERROR = OBDErrorCodeTemplate(4306, '({ip}) ocp-express admin_passwd invalid.(Current :{current})') # ocp-server @@ -219,6 +221,8 @@ class InitDirFailedErrorMessage(object): EC_OCP_SERVER_NOT_ENOUGH_MEMORY = OBDErrorCodeTemplate(4364, '({ip}) not enough memory. (Free: {free}, Need: {need})') EC_OCP_SERVER_NOT_ENOUGH_DISK = OBDErrorCodeTemplate(4365, '({ip}) {disk} not enough disk space. (Avail: {avail}, Need: {need})') + + WC_OCP_EXPRESS_FAILED_TO_GET_DISK_INFO = OBDErrorCodeTemplate(4303, '({ip}) failed to get disk information, skip disk space check') WC_OCP_SERVER_FAILED_TO_GET_DISK_INFO = OBDErrorCodeTemplate(4365, '({ip}) failed to get disk information, skip disk space check') @@ -286,13 +290,15 @@ class InitDirFailedErrorMessage(object): SUG_OCP_EXPRESS_COMP_VERSION = OBDErrorSuggestionTemplate('Please use {comp} with version {version} or above') SUG_OCP_EXPRESS_REDUCE_META_DB_MEM = OBDErrorSuggestionTemplate('Please reduce the `ocp_meta_tenant_memory_size`', fix_eval=[FixEval(FixEval.DEL, 'ocp_meta_tenant_memory_size')]) SUG_OCP_EXPRESS_REDUCE_META_DB_LOG_DISK = OBDErrorSuggestionTemplate('Please reduce the `ocp_meta_tenant_log_disk_size`', fix_eval=[FixEval(FixEval.DEL, 'ocp_meta_tenant_log_disk_size')]) -SUG_OCP_EXPRESS_EDIT_ADMIN_PASSWD_ERROR = OBDErrorSuggestionTemplate('Please edit the `admin_passwd`, must be 8 to 32 characters in length, and must contain at least two digits, two uppercase letters, two lowercase letters, and two of the following special characters:~!@#%^&*_-+=|(){{}}[]:;,.?/)', fix_eval=[FixEval(FixEval.DEL, 'admin_passwd')], auto_fix=True) +SUG_OCP_EXPRESS_EDIT_ADMIN_PASSWD = OBDErrorSuggestionTemplate('Please edit the `admin_passwd`, must be 8 to 32 characters in length, and must contain at least two digits, two uppercase letters, two lowercase letters, and two of the following special characters:~!@#%^&*_-+=|(){{}}[]:;,.?/)', fix_eval=[FixEval(FixEval.DEL, 'admin_passwd')], auto_fix=True) SUG_RESTART_OR_RELOAD = OBDErrorSuggestionTemplate('Please restart or reload the cluster manually') SUG_OCP_SERVER_JDBC_URL_CONFIG_ERROR = OBDErrorSuggestionTemplate('Please ensure that the `jdbc_url` in the `config.yaml` configuration file is set correctly to establish a successful connection with your database') SUG_OCP_SERVER_SUDO_NOPASSWD = OBDErrorSuggestionTemplate('Please execute `bash -c \'echo "{user} ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers`\' as root in {ip}.') SUG_OCP_SERVER_INSTALL_JAVA_WITH_VERSION = OBDErrorSuggestionTemplate('Please install java with version {version}. If java is already installed, please set `java_bin` to the expected java binary path') SUG_OCP_SERVER_REDUCE_MEM = OBDErrorSuggestionTemplate('Please reduce the `memory_size`', fix_eval=[FixEval(FixEval.DEL, 'memory_size')]) SUG_OCP_SERVER_REDUCE_DISK = OBDErrorSuggestionTemplate('Please reduce the `logging_file_total_size_cap`', fix_eval=[FixEval(FixEval.DEL, 'logging_file_total_size_cap')]) +SUG_OCP_SERVER_EDIT_ADMIN_PASSWD_ERROR = OBDErrorSuggestionTemplate('Please edit the `admin_password`, must be 8 to 32 characters in length, containing at least 3 types from digits, lowercase letters, uppercase letters and the following special characters: ~!@#%^&*_-+=|(){{}}[]:;,.?/)', fix_eval=[FixEval(FixEval.DEL, 'admin_password')], auto_fix=True) SUG_SUDO_NOPASSWD = OBDErrorSuggestionTemplate('Please execute `bash -c \'echo "{user} ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers`\' as root in {ip}.') SUG_OB_SYS_USERNAME = OBDErrorSuggestionTemplate('Please delete the "ob_sys_username" parameter.') SUG_OB_SYS_PASSWORD = OBDErrorSuggestionTemplate('''Please set the "ob_sys_password" for oblogproxy by configuring the "cdcro_password" parameter in the "oceanbase" or "oceanbase-ce" component.''') +SUG_OBAGENT_EDIT_HTTP_BASIC_AUTH_PASSWORD = OBDErrorSuggestionTemplate('Please edit the `http_basic_auth_password`, cannot contain characters other than uppercase letters, lowercase characters, digits, special characters:~^*{{}}[]_-+', fix_eval=[FixEval(FixEval.DEL, 'http_basic_auth_password')], auto_fix=True) \ No newline at end of file diff --git a/_mirror.py b/_mirror.py index 5d26a3b..ce155ea 100644 --- a/_mirror.py +++ b/_mirror.py @@ -59,6 +59,7 @@ 'anolis': {'23': 7}, 'openEuler': {'22.03': 7}, 'kylin': {'V10': 8}, + 'alinux': {'2': 7, '3': 8} } _SERVER_VARS = { 'basearch': getBaseArch(), @@ -148,7 +149,7 @@ def __init__(self, elem): self.checksum = (None,None) # type,value self.openchecksum = (None,None) # type,value self.time = (None, None) - super(RemotePackageInfo, self).__init__(None, None, None, None, None) + super(RemotePackageInfo, self).__init__(None, None, None, None, None, None) self._parser(elem) @property @@ -488,8 +489,12 @@ def get_exact_pkg_info(self, **pattern): self.stdio and getattr(self.stdio, 'verbose', print)('arch is %s' % arch) release = pattern['release'] if 'release' in pattern else None self.stdio and getattr(self.stdio, 'verbose', print)('release is %s' % release) - version = pattern['version'] if 'version' in pattern else None + version = ConfigUtil.get_value_from_dict(pattern, 'version', transform_func=Version) self.stdio and getattr(self.stdio, 'verbose', print)('version is %s' % version) + min_version = ConfigUtil.get_value_from_dict(pattern, 'min_version', transform_func=Version) + self.stdio and getattr(self.stdio, 'verbose', print)('min_version is %s' % min_version) + max_version = ConfigUtil.get_value_from_dict(pattern, 'max_version', transform_func=Version) + self.stdio and getattr(self.stdio, 'verbose', print)('max_version is %s' % max_version) pkgs = [] for key in self.db: info = self.db[key] @@ -501,12 +506,22 @@ def get_exact_pkg_info(self, **pattern): continue if version and version != info.version: continue + if min_version and min_version > info.version: + continue + if max_version and max_version <= info.version: + continue pkgs.append(info) if pkgs: pkgs.sort() return pkgs[-1] else: return None + + def get_best_pkg_info_with_score(self, **pattern): + matchs = self.get_pkgs_info_with_score(**pattern) + if matchs: + return [info[0] for info in sorted(matchs, key=lambda x: x[1], reverse=True)] + return None def get_pkgs_info_with_score(self, **pattern): matchs = [] @@ -541,12 +556,16 @@ def get_pkgs_info_with_score(self, **pattern): matchs.append([info, score]) return matchs - def match_score(self, info, name, arch, version=None, release=None): + def match_score(self, info, name, arch, version=None, min_version=None, max_version=None, release=None): if info.arch not in arch: return [0, ] info_version = '%s.' % info.version if version and info_version.find(version) != 0: return [0 ,] + if min_version and Version(info_version) <= Version(min_version): + return [0 ,] + if max_version and Version(info_version) > Version(max_version): + return [0 ,] if release and info.release != release: raise Exception ('break') return [0 ,] @@ -605,6 +624,8 @@ class LocalMirrorRepository(MirrorRepository): MIRROR_TYPE = MirrorRepositoryType.LOCAL _DB_FILE = '.db' + __VERSION_KEY__ = '__version__' + __VERSION__ = Version("1.0") def __init__(self, mirror_path, stdio=None): super(LocalMirrorRepository, self).__init__(mirror_path, stdio=stdio) @@ -623,21 +644,31 @@ def _load_db(self): if os.path.isfile(self.db_path): with open(self.db_path, 'rb') as f: db = pickle.load(f) - for key in db: - data = db[key] - path = getattr(data, 'path', False) - if not path or not os.path.exists(path): - continue - self.db[key] = data + self._flush_db(db) except: self.stdio.exception('') pass + + def _flush_db(self, db): + need_flush = self.__VERSION__ > Version(db.get(self.__VERSION_KEY__, '0')) + for key in db: + data = db[key] + path = getattr(data, 'path', False) + if not path or not os.path.exists(path): + continue + if need_flush: + data = Package(path) + self.db[key] = data + if need_flush: + self._dump_db() def _dump_db(self): # 所有 dump方案都为临时 try: + data = deepcopy(self.db) + data[self.__VERSION_KEY__] = self.__VERSION__ with open(self.db_path, 'wb') as f: - pickle.dump(self.db, f) + pickle.dump(data, f) return True except: self.stdio.exception('') @@ -721,8 +752,12 @@ def get_exact_pkg_info(self, **pattern): self.stdio and getattr(self.stdio, 'verbose', print)('arch is %s' % arch) release = pattern['release'] if 'release' in pattern else None self.stdio and getattr(self.stdio, 'verbose', print)('release is %s' % release) - version = pattern['version'] if 'version' in pattern else None + version = ConfigUtil.get_value_from_dict(pattern, 'version', transform_func=Version) self.stdio and getattr(self.stdio, 'verbose', print)('version is %s' % version) + min_version = ConfigUtil.get_value_from_dict(pattern, 'min_version', transform_func=Version) + self.stdio and getattr(self.stdio, 'verbose', print)('min_version is %s' % min_version) + max_version = ConfigUtil.get_value_from_dict(pattern, 'max_version', transform_func=Version) + self.stdio and getattr(self.stdio, 'verbose', print)('max_version is %s' % max_version) pkgs = [] for key in self.db: info = self.db[key] @@ -734,6 +769,10 @@ def get_exact_pkg_info(self, **pattern): continue if version and version != info.version: continue + if min_version and min_version > info.version: + continue + if max_version and max_version <= info.version: + continue pkgs.append(info) if pkgs: pkgs.sort() @@ -779,12 +818,16 @@ def get_pkgs_info_with_score(self, **pattern): matchs.append([info, score]) return matchs - def match_score(self, info, name, arch, version=None, release=None): + def match_score(self, info, name, arch, version=None, min_version=None, max_version=None, release=None): if info.arch not in arch: return [0, ] info_version = '%s.' % info.version if version and info_version.find(version) != 0: return [0 ,] + if min_version and Version(info_version) <= Version(min_version): + return [0 ,] + if max_version and Version(info_version) > Version(max_version): + return [0 ,] if release and info.release != release: return [0 ,] @@ -958,7 +1001,7 @@ def get_remote_mirrors(self, is_enabled=True): def get_mirrors(self, is_enabled=True): self._lock() mirrors = self.get_remote_mirrors(is_enabled=is_enabled) - mirrors.append(self.local_mirror) + mirrors.insert(0, self.local_mirror) return mirrors def get_exact_pkg(self, **pattern): diff --git a/_plugin.py b/_plugin.py index 1d0e08b..f6fe261 100644 --- a/_plugin.py +++ b/_plugin.py @@ -30,7 +30,7 @@ from copy import deepcopy, copy from _manager import Manager -from _rpm import Version +from _rpm import Version, get_prefix_version, add_sub_version from ssh import ConcurrentExecutor from tool import ConfigUtil, DynamicLoading, YamlLoader, FileUtil from _types import * @@ -613,6 +613,7 @@ class FileItemType(Enum): FILE = 0 DIR = 1 BIN = 2 + JAR = 3 class InstallMethod(Enum): @@ -621,16 +622,33 @@ class InstallMethod(Enum): class FileItem(object): - def __init__(self, src_path, target_path, _type, install_method): + def __init__(self, src_path, target_path, _type, install_method, require): self.src_path = src_path self.target_path = target_path self.type = _type if _type else InstallPlugin.FileItemType.FILE self.install_method = install_method or InstallPlugin.InstallMethod.ANY + self.require = require + + class RequirementItem(object): + + def __init__(self, name, version, min_version, max_version): + self.name = name + self.version = version + self.min_version = min_version + self.max_version = max_version + + def __hash__(self): + return hash(tuple(sorted(self.__dict__.items()))) + + def __eq__(self, other): + return self.__dict__ == other.__dict__ PLUGIN_TYPE = PluginType.INSTALL FILES_MAP_YAML = 'file_map.yaml' FLAG_FILE = FILES_MAP_YAML + REQUIREMENT_YAML = 'requirement.yaml' _KEYCRE = re.compile(r"\$(\w+)") + _VERSION_KEYCRE = re.compile(r"\$version(\[(\d+)\])?") def __init__(self, component_name, plugin_path, version, dev_mode): super(InstallPlugin, self).__init__(component_name, plugin_path, version, dev_mode) @@ -638,6 +656,9 @@ def __init__(self, component_name, plugin_path, version, dev_mode): self._file_map = {} self._file_map_data = None self._check_value = None + self.requirement_path = os.path.join(self.plugin_path, self.REQUIREMENT_YAML) + self._requirement = {} + self._requirement_data = None @classmethod def var_replace(cls, string, var): @@ -650,7 +671,6 @@ def var_replace(cls, string, var): if not m: done.append(string) break - varname = m.group(1).lower() replacement = var.get(varname, m.group()) @@ -673,6 +693,16 @@ def file_map_data(self): with open(self.file_map_path, 'rb') as f: self._file_map_data = yaml.load(f) return self._file_map_data + + @property + def requirement_data(self): + if self._requirement_data is None: + if os.path.exists(self.requirement_path): + with open(self.requirement_path, 'rb') as f: + self._requirement_data = yaml.load(f) + else: + self._requirement_data = {} + return self._requirement_data def file_map(self, package_info): var = { @@ -697,6 +727,7 @@ def file_map(self, package_info): ConfigUtil.get_value_from_dict(data, 'target_path', k), getattr(InstallPlugin.FileItemType, ConfigUtil.get_value_from_dict(data, 'type', 'FILE').upper(), None), getattr(InstallPlugin.InstallMethod, ConfigUtil.get_value_from_dict(data, 'install_method', 'ANY').upper(), None), + ConfigUtil.get_value_from_dict(data, 'require', None), ) self._file_map[key] = file_map except: @@ -706,8 +737,70 @@ def file_map(self, package_info): def file_list(self, package_info): file_map = self.file_map(package_info) return [file_map[k] for k in file_map] - - + + def version_replace(cls, string, main_componentt_version): + """ + replace the version when configure the "$version" variable in requirement.yaml + + :param string: the "version config" from requirement.yaml + :param main_componentt_version: the main component version + :return: the replaced version and offset + """ + if not main_componentt_version: + return string, 0 + m = cls._VERSION_KEYCRE.search(string) + if not m: + return string, 0 + + if not m.group(2): + return main_componentt_version, 0 + + return get_prefix_version(Version(main_componentt_version), m.group(2)), m.group(2) + + def requirement_map(self, package_info): + var = { + 'name': package_info.name, + 'version': package_info.version, + 'release': package_info.release, + 'arch': package_info.arch, + 'md5': package_info.md5, + } + key = str(var) + if not self._requirement.get(key): + try: + requirement_map = {} + if self.requirement_data: + for component_name in self.requirement_data: + value = self.requirement_data[component_name] + # Once the version is configured and then obd find the package is equal to the version + version = ConfigUtil.get_value_from_dict(value, 'version', transform_func=Version) + min_version = max_version = None + if not version: + min_version = ConfigUtil.get_value_from_dict(value, 'min_version', transform_func=Version) + max_version = ConfigUtil.get_value_from_dict(value, 'max_version', transform_func=Version) + else: + version_replace, offset = self.version_replace(version, var['version']) + if version_replace != version: + if offset == 0: + version = version_replace + else: + version = None + min_version = version_replace + max_version = add_sub_version(Version(version_replace), offset, 1) + requirement_map[component_name] = InstallPlugin.RequirementItem( + component_name, + str(version) if version else None, + str(min_version) if min_version else None, + str(max_version) if max_version else None, + ) + self._requirement[key] = requirement_map + except: + pass + return self._requirement[key] + + def requirement_list(self, package_info): + requirement_map = self.requirement_map(package_info) + return [requirement_map[k] for k in requirement_map] class ComponentPluginLoader(object): diff --git a/_repository.py b/_repository.py index f53616d..54f7791 100644 --- a/_repository.py +++ b/_repository.py @@ -62,7 +62,7 @@ def extractfile(self, name): self.opens[path] = open(path, 'rb') return self.opens[path] - def __init__(self, path, name, version, files, release=None, arch=None): + def __init__(self, path, name, version, files, release=None, arch=None, size=None): self.name = name self.set_version(version) self.set_release(release if release else time.strftime("%Y%m%d%H%M%S", time.localtime(time.time()))) @@ -71,6 +71,7 @@ def __init__(self, path, name, version, files, release=None, arch=None): self.headers = {} self.files = self.get_all_files(files) self.path = path + self.size = size if size else self.get_path_size(path) self.package() def __hash__(self): @@ -98,6 +99,16 @@ def list_dir(path): else: files += LocalPackage.list_dir(fp) return files + + def get_path_size(self, path): + total_size = 0 + with os.scandir(path) as entries: + for entry in entries: + if entry.is_file(): + total_size += entry.stat().st_size + elif entry.is_dir(): + total_size += self.get_path_size(entry.path) + return total_size def package(self): count = 0 @@ -244,7 +255,7 @@ class Repository(PackageInfo): def __init__(self, name, repository_dir, stdio=None): self.repository_dir = repository_dir - super(Repository, self).__init__(name, None, None, None, None) + super(Repository, self).__init__(name, None, None, None, None, None) self.stdio = stdio self._load() @@ -308,6 +319,7 @@ def _load(self): self.set_release(data.get('release')) self.md5 = data.get('hash') self.arch = data.get('arch') + self.size = data.get('size') self.install_time = data.get('install_time', 0) except: pass @@ -324,7 +336,8 @@ def _parse_path(self): self.set_version(version) def _dump(self): - data = {'version': self.version, 'hash': self.hash, 'release': self.release, 'arch': self.arch} + data = {'version': self.version, 'hash': self.hash, + 'release': self.release, 'arch': self.arch, 'size': self.size} if self.install_time: data['install_time'] = self.install_time try: @@ -415,6 +428,7 @@ def load_pkg(self, pkg, plugin): self.set_release(pkg.release) self.md5 = pkg.md5 self.arch = pkg.arch + self.size = pkg.size self.install_time = time.time() if self._dump(): return True @@ -433,7 +447,7 @@ def clear(self): class RepositoryVO(object): - def __init__(self, name, version, release, arch, md5, path, tags=[]): + def __init__(self, name, version, release, arch, md5, path, tags=[], size=0): self.name = name self.version = version self.release = release @@ -441,6 +455,7 @@ def __init__(self, name, version, release, arch, md5, path, tags=[]): self.md5 = md5 self.path = path self.tags = tags + self.size = size class ComponentRepository(object): @@ -533,7 +548,8 @@ def _get_repository_vo(self, repository): repository.arch, repository.md5, repository.repository_dir, - [] + [], + repository.size ) def get_repositories(self, name, version=None, instance=True): diff --git a/_rpm.py b/_rpm.py index edf9593..d908074 100644 --- a/_rpm.py +++ b/_rpm.py @@ -44,7 +44,7 @@ def __init__(self, bytes_or_buffer, encoding=None, errors=None): @property def __cmp_value__(self): - return [(int(_i), _s) for _i, _s in re.findall('(\d+)([^\.]*)', self.__str__())] + return [(int(_i), _s) for _i, _s in re.findall('(\d+)([^\._]*)', self.__str__())] def __eq__(self, value): if value is None: @@ -71,6 +71,7 @@ def __le__(self, value): return False return self.__eq__(value) or self.__lt__(value) + class Release(Version): @property @@ -84,12 +85,13 @@ def simple(self): class PackageInfo(object): - def __init__(self, name, version, release, arch, md5): + def __init__(self, name, version, release, arch, md5, size): self.name = name self.set_version(version) self.set_release(release) self.arch = arch self.md5 = md5 + self.size = size def set_version(self, version): self.version = Version(str(version) if version else '') @@ -142,11 +144,12 @@ def __init__(self, path): version = rpm.headers.get('version').decode(), release = rpm.headers.get('release').decode(), arch = rpm.headers.get('arch').decode(), - md5 = rpm.headers.get('md5').decode() + md5 = rpm.headers.get('md5').decode(), + size = rpm.headers.get('size') ) def __str__(self): - return 'name: %s\nversion: %s\nrelease:%s\narch: %s\nmd5: %s' % (self.name, self.version, self.release, self.arch, self.md5) + return 'name: %s\nversion: %s\nrelease:%s\narch: %s\nmd5: %s\nsize: %s' % (self.name, self.version, self.release, self.arch, self.md5, self.size) def __hash__(self): return hash(self.path) @@ -157,4 +160,39 @@ def file_name(self): def open(self): return rpmfile.open(self.path) + + +def get_version_from_array(array): + version = '' + for _i, _s in array: + version=version + str(_i) + _s + return Version(version) + +def add_sub_version(version, offset=1, add=1): + """ + add the version by offset and add value + + :param version: version + :param offset: the offset number of the version + :param add: the add value + :return: the new version after adding + """ + version_array = version.__cmp_value__ + version_array[offset-1] = (version_array[offset-1][0] + add, version_array[offset-1][1]) + return get_version_from_array(version_array) + +def get_prefix_version(version, offset=0): + """ + get prefix sub version + + :param version: version + :param offset: the offset number of the version + :return: the new version after geting prefix + """ + if not offset: + return version + if offset >= len(version.__cmp_value__): + return version + version_array = version.__cmp_value__[:offset] + return get_version_from_array(version_array)[:-1] diff --git a/_stdio.py b/_stdio.py index c73cd59..517d7d8 100644 --- a/_stdio.py +++ b/_stdio.py @@ -670,11 +670,11 @@ def read(self, msg='', blocked=False): return self.get_input_stream().read(blocked) def confirm(self, msg): - msg = '%s [y/n]: ' % msg - self.print(msg, end='') if self.default_confirm: - self.verbose("default confirm: True") + self.verbose("%s and then auto confirm yes" % msg) return True + msg = '%s [y/n]: ' % msg + self.print(msg, end='') if self.isatty() and not self.syncing: while True: try: @@ -686,6 +686,7 @@ def confirm(self, msg): except Exception as e: if not e: return False + self.print(msg, end='') else: self.verbose("isatty: %s, syncing: %s, auto confirm: False" % (self.isatty(), self.syncing)) return False diff --git a/_tool.py b/_tool.py new file mode 100644 index 0000000..2e09c84 --- /dev/null +++ b/_tool.py @@ -0,0 +1,237 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function + +import os + +from _manager import Manager +from _rpm import PackageInfo +from _stdio import SafeStdio +from tool import YamlLoader, DirectoryUtil +from const import COMP_OBCLIENT, COMP_OCEANBASE_DIAGNOSTIC_TOOL, COMP_OBDIAG + + +yaml = YamlLoader() + +TOOLS = [COMP_OBCLIENT, COMP_OCEANBASE_DIAGNOSTIC_TOOL] +TOOL_ALIAS = { + COMP_OBDIAG: COMP_OCEANBASE_DIAGNOSTIC_TOOL, +} + + +class ToolConfig(SafeStdio, PackageInfo): + + def __init__(self, config_path, repo_manager, stdio=None): + self.config_path = config_path + self.name = os.path.basename(os.path.split(config_path)[0]) + self._data = None + self.repo_manager = repo_manager + self.stdio = stdio + + @property + def data(self): + if self._data is None: + # load .config from tool + if not os.path.exists(self.config_path): + self._data = {} + else: + with open(self.config_path, 'rb') as f: + self._data = yaml.load(f) + # load .data from repository manager + if self._data and self.repo_manager: + repo = self.repo_manager.get_repository(self.name, self._data.get('version'), package_hash=self._data.get('hash')) + self._data['arch'] = repo.arch + self._data['size'] = repo.size + return self._data + + @property + def version(self): + return self.data.get('version') if self.data else None + + @property + def path(self): + return self.data.get('path') if self.data else None + + @property + def arch(self): + return self.data.get('arch') if self.data else None + + @property + def size(self): + return self.data.get('size') if self.data else None + + @property + def hash(self): + return self.data.get('hash') if self.data else None + + def save(self, version, repo_hash, path): + self.stdio.verbose('dump tool info to %s' % path) + try: + with open(self.config_path, 'w') as f: + data = { + 'version': version, + 'hash': repo_hash, + 'path': path + } + yaml.dump(data, f) + return True + except: + self.stdio.exception('dump tool info to %s failed' % self.config_path) + return False + + +class Tool(SafeStdio): + CONFIG_YAML = '.config' + + def __init__(self, config_dir, repo_manager, stdio=None): + self.config_path = os.path.join(config_dir, self.CONFIG_YAML) + self.name = os.path.split(config_dir)[1] + self._config = None + self.stdio = stdio + self.force = False + self.repo_manager=repo_manager + + def set_force(self, force): + self.force = force + + @property + def config(self): + return ToolConfig(self.config_path, self.repo_manager, self.stdio) + + def save_config(self, version, repo_hash, path): + return self.config.save(version, repo_hash, path) + + def install(self, repository, install_path): + if self.config.path == "" or self.config.path == "/" or self.config.path == os.getenv('HOME') or self.config.path == "/etc" or self.config.path == "/var": + self.stdio.error('Refuse a high-risk deletion operation of tool %s' % self.name) + return False + elif self.config.path: + if not self.uninstall(): + self.stdio.error('Failed to unintall the old version of tool %s' % self.name) + return False + else: + pass + if DirectoryUtil.copy(repository.repository_dir, install_path, self.stdio): + return True + else: + return False + + def uninstall(self): + if not self.config.path: + self.stdio.error('Tool %s has no install folder' % self.name) + return False + if self.config.path == "" or self.config.path == "/" or self.config.path == os.getenv('HOME') or self.config.path == "/etc" or self.config.path == "/var": + self.stdio.error('Refuse a high-risk deletion operation of tool %s' % self.name) + return False + if not DirectoryUtil.rm(self.config.path, self.stdio): + self.stdio.error('remove tool %s failed' % self.name) + return False + return True + + +class ToolManager(Manager): + + RELATIVE_PATH = 'tool/' + + def __init__(self, home_path, repo_manager, lock_manager=None, stdio=None): + super(ToolManager, self).__init__(home_path, stdio) + self.lock_manager = lock_manager + self.repo_manager = repo_manager + + def _lock(self, read_only=False): + if self.lock_manager: + if read_only: + return self.lock_manager.mirror_and_repo_sh_lock() + else: + return self.lock_manager.mirror_and_repo_ex_lock() + return True + + def get_tool_list(self): + tools = [] + for name in os.listdir(self.path): + path = os.path.join(self.path, name) + if os.path.isdir(path): + tools.append(Tool(path, self.repo_manager, stdio=self.stdio)) + return tools + + def is_belong_tool(self, name): + name = TOOL_ALIAS.get(name, name) + if name in TOOLS: + return True + return False + + def get_tool_offical_name(self, name): + offical_name = None + if not self.is_belong_tool(name): + return offical_name + name = TOOL_ALIAS.get(name, name) + return name + + def get_support_tool_list(self): + return TOOLS + + def get_tool_config_by_name(self, name): + self._lock(True) + path = os.path.join(self.path, name) + if os.path.isdir(path): + return Tool(path, self.repo_manager, stdio=self.stdio) + return None + + def create_tool_config(self, name): + self._lock() + config_dir = os.path.join(self.path, name) + self._mkdir(config_dir) + return Tool(config_dir, self.repo_manager, stdio=self.stdio) + + def remove_tool_config(self, name): + self._lock() + config_dir = os.path.join(self.path, name) + self._rm(config_dir) + + def install_tool(self, tool, repository, install_path): + return tool.install(repository, install_path) + + def install_requirement(self, repository, install_path): + if DirectoryUtil.copy(repository.repository_dir, install_path, self.stdio): + return True + else: + return False + + def uninstall_tool(self, tool): + return tool.uninstall() + + def update_tool(self, tool, repository, install_path): + if not self.uninstall_tool(tool): + return False + if not self.install_tool(tool, repository, install_path): + return False + return True + + def is_tool_install(self, name): + config_dir = os.path.join(self.path, name) + if os.path.isdir(config_dir): + return True + return False + + def check_if_avaliable_update(self, tool, package): + if package.version > tool.config.version: + return True + return False \ No newline at end of file diff --git a/_types.py b/_types.py index a2eaecd..80f62a1 100644 --- a/_types.py +++ b/_types.py @@ -25,7 +25,7 @@ import traceback -__all__ = ("Moment", "Time", "Capacity", "CapacityMB", "StringList", "Dict", "List", "StringOrKvList", "Double", "Boolean", "Integer", "String", "Path", "SafeString", "PathList", "SafeStringList", "DBUrl", "WebUrl", "OBUser") +__all__ = ("Moment", "Time", "Capacity", "CapacityWithB", "CapacityMB", "StringList", "Dict", "List", "StringOrKvList", "Double", "Boolean", "Integer", "String", "Path", "SafeString", "PathList", "SafeStringList", "DBUrl", "WebUrl", "OBUser") class Null(object): @@ -33,7 +33,9 @@ class Null(object): def __init__(self): pass + class ConfigItemType(object): + TYPE_STR = None NULL = Null() @@ -92,6 +94,7 @@ def __le__(self, value): return False return self.__eq__(value) or self.__lt__(value) + class Moment(ConfigItemType): def _format(self): @@ -111,6 +114,7 @@ def _format(self): class Time(ConfigItemType): + UNITS = { 'ns': 0.000000001, 'us': 0.000001, @@ -139,32 +143,102 @@ def _format(self): self._value = 0 +class DecimalValue: + + def __init__(self, value, precision=None): + if isinstance(value, str): + self.value = float(value) + else: + self.value = value + self.precision = precision + + def __repr__(self): + if self.precision is not None: + return "%.*f" % (self.precision, self.value) + return str(self.value) + + def __add__(self, other): + if isinstance(other, DecimalValue): + return DecimalValue(self.value + other.value, self.precision if self.precision is not None else other.precision) + return DecimalValue(self.value + other, self.precision) + + def __sub__(self, other): + if isinstance(other, DecimalValue): + return DecimalValue(self.value - other.value, self.precision if self.precision is not None else other.precision) + return DecimalValue(self.value - other, self.precision) + + def __mul__(self, other): + if isinstance(other, DecimalValue): + return DecimalValue(self.value * other.value, self.precision if self.precision is not None else other.precision) + return DecimalValue(self.value * other, self.precision) + + def __truediv__(self, other): + if isinstance(other, DecimalValue): + return DecimalValue(self.value / other.value, self.precision if self.precision is not None else other.precision) + return DecimalValue(self.value / other, self.precision) + + class Capacity(ConfigItemType): - UNITS = {"B": 1, "K": 1 << 10, "M": 1 << 20, "G": 1 << 30, "T": 1 << 40, 'P': 1 << 50} + + UNITS = {"B": 1, "K": 1 << 10, "M": 1 << 20, "G": 1 << 30, "T": 1 << 40, "P": 1 << 50} + + LENGTHS = {"B": 4, "K": 8, "M": 12, "G": 16, "T": 20, "P": 24} + + def __init__(self, s, precision = 0): + self.precision = precision + super(Capacity, self).__init__(s) + + def __str__(self): + return str(self.value) + + @property + def btyes(self): + return self._value def _format(self): if self._origin: - self._origin = str(self._origin).strip() - if self._origin.isdigit(): + if not isinstance(self._origin, str) or self._origin.strip().isdigit(): + self._origin = int(float(self._origin)) n = self._origin - unit = self.UNITS['M'] + unit = self.UNITS['B'] + for u in self.LENGTHS: + if len(str(self._origin)) < self.LENGTHS[u]: + break + else: + u = 'P' else: - r = re.match('^(\d+)(\w)(I?B)?$', self._origin.upper()) - n, u, _ = r.groups() + groups = re.match("^(\d+)\s*([BKMGTP])((IB)|B)?\s*$", self._origin.upper()) + if not groups: + raise ValueError("Invalid capacity string: %s" % self._origin) + n, u, _, _ = groups.groups() unit = self.UNITS.get(u.upper()) if unit: self._value = int(n) * unit + self.value = str(DecimalValue(self._value, self.precision) / self.UNITS[u]) + u else: raise Exception('Invalid Value') else: self._value = 0 + self.value = str(DecimalValue(0, self.precision)) + + +class CapacityWithB(Capacity): + + def __init__(self, s): + super(CapacityWithB, self).__init__(s, precision=0) + + def _format(self): + super(CapacityWithB, self)._format() + self.value = self.value + 'B' class CapacityMB(Capacity): + def _format(self): super(CapacityMB, self)._format() if isinstance(self._origin, str) and self._origin.isdigit(): self.value = self._origin + 'M' + self._value *= self.UNITS['M'] if not self._origin: self.value = '0M' @@ -263,25 +337,34 @@ class String(ConfigItemType): def _format(self): self.value = self._value = str(self._origin) if self._origin else '' + # this type is used to ensure the parameter is a valid oceanbase user class OBUser(ConfigItemType): + OB_USER_PATTERN = re.compile("^[a-zA-Z0-9_\.-]+(@[a-zA-Z0-9_\.-]+)?(#[a-zA-Z0-9_\.-]+)?$") + def _format(self): if not self.OB_USER_PATTERN.match(str(self._origin)): raise Exception("%s is not a valid config" % self._origin) self.value = self._value = str(self._origin) if self._origin else '' + # this type is used to ensure the parameter not containing special characters to inject command class SafeString(ConfigItemType): + SAFE_STRING_PATTERN = re.compile("^[a-zA-Z0-9\u4e00-\u9fa5\-_:@/\.]*$") + def _format(self): if not self.SAFE_STRING_PATTERN.match(str(self._origin)): raise Exception("%s is not a valid config" % self._origin) self.value = self._value = str(self._origin) if self._origin else '' + # this type is used to ensure the parameter not containing special characters to inject command class SafeStringList(ConfigItemType): + SAFE_STRING_PATTERN = re.compile("^[a-zA-Z0-9\u4e00-\u9fa5\-_:@/\.]*$") + def _format(self): if self._origin: self._origin = str(self._origin).strip() @@ -292,9 +375,12 @@ def _format(self): else: self._value = [] + # this type is used to ensure the parameter is a valid path by checking it's only certaining certain characters and not crossing path class Path(ConfigItemType): + PATH_PATTERN = re.compile("^[a-zA-Z0-9\u4e00-\u9fa5\-_:@/\.]*$") + def _format(self): parent_path = "/{0}".format(uuid.uuid4().hex) absolute_path = "/".join([parent_path, str(self._origin)]) @@ -304,9 +390,12 @@ def _format(self): raise Exception("%s is not a valid path" % self._origin) self.value = self._value = str(self._origin) if self._origin else '' + # this type is used to ensure the parameter is a valid path by checking it's only certaining certain characters and not crossing path class PathList(ConfigItemType): + PATH_PATTERN = re.compile("^[a-zA-Z0-9\u4e00-\u9fa5\-_:@/\.]*$") + def _format(self): parent_path = "/{0}".format(uuid.uuid4().hex) if self._origin: @@ -320,17 +409,23 @@ def _format(self): else: self._value = [] + # this type is used to ensure the parameter is a valid database connection url class DBUrl(ConfigItemType): + DBURL_PATTERN = re.compile("^jdbc:(mysql|oceanbase):(\/\/)([a-zA-Z0-9_.-]+)(:[0-9]{1,5})?\/([a-zA-Z0-9_\-]+)(\?[a-zA-Z0-9_&;=.-]*)?$") + def _format(self): if not self.DBURL_PATTERN.match(str(self._origin)): raise Exception("%s is not a valid config" % self._origin) self.value = self._value = str(self._origin) if self._origin else '' + # this type is used to ensure the parameter is a valid web url class WebUrl(ConfigItemType): + WEBURL_PATTERN = re.compile("^(https?:\/\/)?([\da-z_.-]+)(:[0-9]{1,5})?([\/\w \.-]*)*\/?(?:\?[\w&=_.-]*)?$") + def _format(self): if not self.WEBURL_PATTERN.match(str(self._origin)): raise Exception("%s is not a valid config" % self._origin) diff --git a/const.py b/const.py index d427934..b89c722 100644 --- a/const.py +++ b/const.py @@ -24,6 +24,8 @@ TELEMETRY_WEBSITE = '' TELEMETRY_URL = '{}/api/web/oceanbase/report'.format(TELEMETRY_WEBSITE if TELEMETRY_WEBSITE else 'https://openwebapi.oceanbase.com') TELEMETRY_COMPONENT = 'obd' +TELEMETRY_COMPONENT_OB = "obd_web_ob" +TELEMETRY_COMPONENT_OCP = "obd_web_ocp" TELEMETRY_SIG = 'dbe97393a695335d67de91dd4049ba' # obdeploy version @@ -44,5 +46,17 @@ # obdeploy forbidden variable FORBIDDEN_VARS = (CONST_OBD_HOME, CONST_OBD_INSTALL_PRE, CONST_OBD_INSTALL_PATH) +# tool variable +COMP_OBCLIENT = "obclient" +COMP_OCEANBASE_DIAGNOSTIC_TOOL = "oceanbase-diagnostic-tool" +COMP_OBDIAG = "obdiag" +COMP_JRE = 'openjdk-jre' +COMP_OCP_EXPRESS = 'ocp-express' +COMP_OCP_SERVER = 'ocp-server' +COMP_OCP_SERVER_CE = 'ocp-server-ce' +COMPS_OCP = [COMP_OCP_SERVER, COMP_OCP_SERVER_CE] + # service docs url -DISABLE_SWAGGER = '' \ No newline at end of file +DISABLE_SWAGGER = '' + +RSA_KEY_SIZE = 512 \ No newline at end of file diff --git a/core.py b/core.py index f78c978..df1510b 100644 --- a/core.py +++ b/core.py @@ -25,7 +25,7 @@ import time from optparse import Values from copy import deepcopy, copy -import requests +from collections import defaultdict import tempfile from subprocess import call as subprocess_call @@ -37,12 +37,14 @@ from _mirror import MirrorRepositoryManager, PackageInfo from _plugin import PluginManager, PluginType, InstallPlugin, PluginContextNamespace from _deploy import DeployManager, DeployStatus, DeployConfig, DeployConfigStatus, Deploy, ClusterStatus +from _tool import Tool, ToolManager from _repository import RepositoryManager, LocalPackage, Repository import _errno as err from _lock import LockManager, LockMode from _optimize import OptimizeManager from _environ import ENV_REPO_INSTALL_MODE, ENV_BASE_DIR -from const import OB_OFFICIAL_WEBSITE +from _types import Capacity +from const import COMP_OCEANBASE_DIAGNOSTIC_TOOL, COMP_OBCLIENT from ssh import LocalClient @@ -61,6 +63,7 @@ def __init__(self, home_path, dev_mode=False, lock_mode=None, stdio=None): self._plugin_manager = None self._lock_manager = None self._optimize_manager = None + self._tool_manager = None self.stdio = None self._stdio_func = None self.ssh_clients = {} @@ -110,6 +113,12 @@ def optimize_manager(self): if not self._optimize_manager: self._optimize_manager = OptimizeManager(self.home_path, stdio=self.stdio) return self._optimize_manager + + @property + def tool_manager(self): + if not self._tool_manager: + self._tool_manager = ToolManager(self.home_path, self.repository_manager, self.lock_manager, self.stdio) + return self._tool_manager def _global_ex_lock(self): self.lock_manager.global_ex_lock() @@ -345,10 +354,12 @@ def search_py_script_plugin(self, repositories, script_name, no_found_act='exit' self._call_stdio(msg_lv, 'No such %s plugin for %s-%s' % (script_name, repository.name, repository.version)) return plugins - def search_images(self, component_name, version, release=None, disable=[], usable=[], release_first=False, print_match=True): + def search_images(self, component_name, version=None, min_version=None, max_version=None, release=None, disable=[], + usable=[], release_first=False, print_match=True): matchs = {} usable_matchs = [] - for pkg in self.mirror_manager.get_pkgs_info(component_name, version=version, release=release): + for pkg in self.mirror_manager.get_pkgs_info(component_name, version=version, min_version=min_version, + max_version=max_version, release=release): if pkg.md5 in disable: self._call_stdio('verbose', 'Disable %s' % pkg.md5) else: @@ -391,6 +402,7 @@ def search_components_from_mirrors(self, deploy_config, fuzzy_match=False, only_ errors.append('No such component name: {}'.format(component)) continue config = deploy_config.components[component] + # First, check if the component exists in the repository. If exists, check if the version is available. If so, use the repository directly. self._call_stdio('verbose', 'Get %s repository' % component) repository = self.repository_manager.get_repository(name=component, version=config.version, tag=config.tag, release=config.release, package_hash=config.package_hash) @@ -399,7 +411,7 @@ def search_components_from_mirrors(self, deploy_config, fuzzy_match=False, only_ if not config.tag: self._call_stdio('verbose', 'Search %s package from mirror' % component) pkg = self.mirror_manager.get_best_pkg( - name=component, version=config.version, md5=config.package_hash, release=config.release, fuzzy_match=fuzzy_match, only_info=only_info) + name=component, version=config.version, md5=config.package_hash, release=config.release, fuzzy=fuzzy_match, only_info=only_info) else: pkg = None if repository or pkg: @@ -791,23 +803,28 @@ def get_install_plugin_and_install(self, repositories, pkgs): repositories.append(repository) return install_plugins - def install_lib_for_repositories(self, repositories): + def install_lib_for_repositories(self, need_libs): all_data = [] - temp_repositories = repositories - while temp_repositories: + temp_libs = need_libs + while temp_libs: data = {} temp_map = {} - repositories = temp_repositories - temp_repositories = [] - for repository in repositories: - lib_name = '%s-libs' % repository.name - if lib_name in data: - temp_repositories.append(repository) - continue - data[lib_name] = {'global': { - 'version': repository.version - }} - temp_map[lib_name] = repository + libs = temp_libs + temp_libs = [] + for lib in libs: + repository = lib['repository'] + for requirement in lib['requirement']: + lib_name = requirement.name + if lib_name in data: + # To avoid remove one when require different version of same lib + temp_libs.append(lib) + continue + data[lib_name] = { + 'version': requirement.version, + 'min_version': requirement.min_version, + 'max_version': requirement.max_version, + } + temp_map[lib_name] = repository all_data.append((data, temp_map)) try: repositories_lib_map = {} @@ -1088,14 +1105,17 @@ def genconfig(self, name): generate_consistent_config = getattr(self.options, 'generate_consistent_config', False) component_num = len(repositories) + # reverse sort repositories by dependency, so that oceanbase will be the last one to proceed + repositories = self.sort_repository_by_depend(repositories, deploy_config) + repositories.reverse() + for repository in repositories: ret = self.call_plugin(gen_config_plugins[repository], repository, generate_consistent_config=generate_consistent_config) if ret: component_num -= 1 - if component_num == 0 and deploy_config.dump(): return True - + self.deploy_manager.remove_deploy_config(name) return False @@ -1138,11 +1158,16 @@ def export_to_ocp(self, name): mock_ocp_repository = Repository("ocp-server-ce", "/") mock_ocp_repository.version = "4.2.1" repositories = [mock_ocp_repository] + connect_plugin = self.plugin_manager.get_best_py_script_plugin('connect', mock_ocp_repository.name, mock_ocp_repository.version) takeover_precheck_plugins = self.search_py_script_plugin(repositories, "takeover_precheck") self._call_stdio('verbose', 'successfully get takeover precheck plugin.') takeover_plugins = self.search_py_script_plugin(repositories, "takeover") self._call_stdio('verbose', 'successfully get takeover plugin.') + ret = self.call_plugin(connect_plugin, mock_ocp_repository) + if not ret or not ret.get_return('connect'): + return False + # do take over cluster by call takeover precheck plugins self._call_stdio('print', 'precheck for export obcluster to ocp.') precheck_ret = self.call_plugin(takeover_precheck_plugins[mock_ocp_repository], mock_ocp_repository, cluster_config=cluster_config, clients=ssh_clients) @@ -1160,7 +1185,7 @@ def export_to_ocp(self, name): return False self.set_deploy(None) self.set_repositories(None) - takeover_ret = self.call_plugin(takeover_plugins[mock_ocp_repository], mock_ocp_repository, cluster_config=cluster_config, deploy_config = deploy_config, clients=ssh_clients) + takeover_ret = self.call_plugin(takeover_plugins[mock_ocp_repository], mock_ocp_repository, cluster_config=cluster_config, deploy_config=deploy_config, clients=ssh_clients) if not takeover_ret: return False else: @@ -1256,17 +1281,17 @@ def check_for_ocp(self, name): def sort_repository_by_depend(self, repositories, deploy_config): sorted_repositories = [] - sorted_componets = {} + sorted_components = {} while repositories: temp_repositories = [] for repository in repositories: cluster_config = deploy_config.components.get(repository.name) - for componet_name in cluster_config.depends: - if componet_name not in sorted_componets: + for component_name in cluster_config.depends: + if component_name not in sorted_components: temp_repositories.append(repository) break else: - sorted_componets[repository.name] = 1 + sorted_components[repository.name] = 1 sorted_repositories.append(repository) if len(temp_repositories) == len(repositories): sorted_repositories += temp_repositories @@ -1356,9 +1381,6 @@ def demo(self): self._call_stdio('error', 'Deploy "%s" is %s. You could not deploy an %s cluster.' % (name, deploy_info.status.value, deploy_info.status.value)) return False - if 'ocp-server' in getattr(self.options, 'components', ''): - self._call_stdio('error', 'Not support ocp-server.') - return components = set() for component_name in getattr(self.options, 'components', '').split(','): if component_name: @@ -1584,13 +1606,15 @@ def _deploy_cluster(self, deploy, repositories, scale_out=False, dump=True): def install_repository_to_servers(self, components, cluster_config, repository, ssh_clients, unuse_lib_repository=False): install_repo_plugin = self.plugin_manager.get_best_py_script_plugin('install_repo', 'general', '0.1') install_plugins = self.search_plugins([repository], PluginType.INSTALL) + need_libs = [] if not install_plugins: return False install_plugin = install_plugins[repository] check_file_map = install_plugin.file_map(repository) + requirement_map = install_plugin.requirement_map(repository) ret = self.call_plugin(install_repo_plugin, repository, obd_home=self.home_path, install_repository=repository, install_plugin=install_plugin, check_repository=repository, - check_file_map=check_file_map, + check_file_map=check_file_map, requirement_map=requirement_map, msg_lv='error' if unuse_lib_repository else 'warn') if not ret: return False @@ -1599,7 +1623,11 @@ def install_repository_to_servers(self, components, cluster_config, repository, elif unuse_lib_repository: return False self._call_stdio('print', 'Try to get lib-repository') - repositories_lib_map = self.install_lib_for_repositories([repository]) + need_libs.append({ + 'repository': repository, + 'requirement': ret.get_return('requirements') + }) + repositories_lib_map = self.install_lib_for_repositories(need_libs) if repositories_lib_map is False: self._call_stdio('error', 'Failed to install lib package for local') return False @@ -1616,26 +1644,34 @@ def install_repositories_to_servers(self, deploy_config, repositories, install_p install_repo_plugin = self.plugin_manager.get_best_py_script_plugin('install_repo', 'general', '0.1') check_file_maps = {} need_lib_repositories = [] + need_libs = [] for repository in repositories: cluster_config = deploy_config.components[repository.name] install_plugin = install_plugins[repository] check_file_map = check_file_maps[repository] = install_plugin.file_map(repository) + + requirement_map = install_plugin.requirement_map(repository) target_servers = cluster_config.added_servers if cluster_config.added_servers else None ret = self.call_plugin(install_repo_plugin, repository, obd_home=self.home_path, install_repository=repository, install_plugin=install_plugin, check_repository=repository, check_file_map=check_file_map, + requirement_map = requirement_map, target_servers=target_servers, msg_lv='error' if deploy_config.unuse_lib_repository else 'warn') if not ret: return False if not ret.get_return('checked'): need_lib_repositories.append(repository) + need_libs.append({ + 'repository': repository, + 'requirement': ret.get_return('requirements') + }) if need_lib_repositories: if deploy_config.unuse_lib_repository: # self._call_stdio('print', 'You could try using -U to work around the problem') return False self._call_stdio('print', 'Try to get lib-repository') - repositories_lib_map = self.install_lib_for_repositories(need_lib_repositories) + repositories_lib_map = self.install_lib_for_repositories(need_libs) if repositories_lib_map is False: self._call_stdio('error', 'Failed to install lib package for local') return False @@ -1644,10 +1680,12 @@ def install_repositories_to_servers(self, deploy_config, repositories, install_p check_file_map = check_file_maps[need_lib_repository] lib_repository = repositories_lib_map[need_lib_repository]['repositories'] install_plugin = repositories_lib_map[need_lib_repository]['install_plugin'] + requirement_map = install_plugins[need_lib_repository].requirement_map(need_lib_repository) target_servers = cluster_config.added_servers if cluster_config.added_servers else None ret = self.call_plugin(install_repo_plugin, need_lib_repository, obd_home=self.home_path, install_repository=lib_repository, install_plugin=install_plugin, check_repository=need_lib_repository, target_servers=target_servers, - check_file_map=check_file_map, msg_lv='error') + check_file_map=check_file_map, requirement_map=requirement_map, msg_lv='error') + if not ret or not ret.get_return('checked'): self._call_stdio('error', 'Failed to install lib package for cluster servers') return False @@ -2044,6 +2082,7 @@ def _start_cluster(self, deploy, repositories, scale_out=False): self._call_stdio('start_loading', 'Search plugins') start_check_plugins = self.search_py_script_plugin(repositories, 'start_check', no_found_act='warn') create_tenant_plugins = self.search_py_script_plugin(repositories, 'create_tenant', no_found_act='ignore') + tenant_optimize_plugins = self.search_py_script_plugin(repositories, 'tenant_optimize', no_found_act='ignore') start_plugins = self.search_py_script_plugin(repositories, 'start') connect_plugins = self.search_py_script_plugin(repositories, 'connect') bootstrap_plugins = self.search_py_script_plugin(repositories, 'bootstrap') @@ -2130,10 +2169,16 @@ def _start_cluster(self, deploy, repositories, scale_out=False): if self.get_namespace(repository.name).get_variable("create_tenant_options"): if not self.call_plugin(create_tenant_plugins[repository], repository): return False - + if repository in tenant_optimize_plugins: + if not self.call_plugin(tenant_optimize_plugins[repository], repository): + return False if deploy_config.auto_create_tenant: - create_tenant_options = Values({"variables": "ob_tcp_invited_nodes='%'", "create_if_not_exists": True}) - self.call_plugin(create_tenant_plugins[repository], repository, create_tenant_options=create_tenant_options) + create_tenant_options = [Values({"variables": "ob_tcp_invited_nodes='%'", "create_if_not_exists": True})] + if not self.call_plugin(create_tenant_plugins[repository], repository, create_tenant_options=create_tenant_options): + return False + if repository in tenant_optimize_plugins: + if not self.call_plugin(tenant_optimize_plugins[repository], repository): + return False if not start_all: component_num -= 1 @@ -2181,6 +2226,7 @@ def create_tenant(self, name): connect_plugins = self.search_py_script_plugin(repositories, 'connect') create_tenant_plugins = self.search_py_script_plugin(repositories, 'create_tenant', no_found_act='ignore') + tenant_optimize_plugins = self.search_py_script_plugin(repositories, 'tenant_optimize', no_found_act='ignore') self._call_stdio('stop_loading', 'succeed') # Get the client @@ -2192,6 +2238,9 @@ def create_tenant(self, name): if not self.call_plugin(create_tenant_plugins[repository], repository): return False + + if repository in tenant_optimize_plugins: + self.call_plugin(tenant_optimize_plugins[repository], repository) return True def get_component_repositories(self, deploy_info, components): @@ -3483,7 +3532,7 @@ def create_repository(self): files = {} success = True repo_path = attrs['path'] - info = PackageInfo(name=attrs['name'], version=attrs['version'], release=None, arch=None, md5=None) + info = PackageInfo(name=attrs['name'], version=attrs['version'], release=None, arch=None, md5=None, size=None) for item in plugin.file_list(info): path = os.path.join(repo_path, item.src_path) path = os.path.normcase(path) @@ -3504,7 +3553,10 @@ def create_repository(self): self._call_stdio('start_loading', 'Package') try: - pkg = LocalPackage(repo_path, attrs['name'], attrs['version'], files, getattr(self.options, 'release', None), getattr(self.options, 'arch', None)) + release = getattr(self.options, 'release', None) + arch = getattr(self.options, 'arch', None) + size = getattr(self.options, 'size', None) + pkg = LocalPackage(repo_path, attrs['name'], attrs['version'], files, release, arch, size) self._call_stdio('stop_loading', 'succeed') except: self._call_stdio('exception', 'Package failed') @@ -4188,7 +4240,7 @@ def update_obd(self, version, install_prefix='/'): self._call_stdio('critical', 'OBD upgrade plugin not found') return False pkg = self.mirror_manager.get_best_pkg(name=component_name) - if not (pkg and pkg > PackageInfo(component_name, version, pkg.release, pkg.arch, '')): + if not (pkg and pkg > PackageInfo(component_name, version, pkg.release, pkg.arch, '', 0)): self._call_stdio('print', 'No updates detected. OBD is already up to date.') return False @@ -4512,6 +4564,19 @@ def db_connect(self, name, opts): sync_config_plugin = self.plugin_manager.get_best_py_script_plugin('sync_cluster_config', 'general', '0.1') self.call_plugin(sync_config_plugin, repository) + + # Check whether obclient is avaliable + ret = LocalClient.execute_command('%s --help' % opts.obclient_bin) + if not ret: + # install obclient + tool_name = COMP_OBCLIENT + if not self.tool_manager.is_tool_install(tool_name): + if not self.install_tool(tool_name): + self._call_stdio('error', '%s is not an executable file. Please use `--obclient-bin` to set.\nYou may not have obclient installed' % opts.obclient_bin) + return + tool = self.tool_manager.get_tool_config_by_name(tool_name) + opts.obclient_bin = os.path.join(tool.config.path, 'bin/obclient') + db_connect_plugin = self.plugin_manager.get_best_py_script_plugin('db_connect', 'general', '0.1') return self.call_plugin(db_connect_plugin, repository) @@ -4709,16 +4774,18 @@ def obdiag_online_func(self, name, fuction_type, opts): target_repository = repository break if fuction_type in ['gather_plan_monitor']: - setattr(opts, 'connect_cluster', True) + setattr(opts, 'connect_cluster', True) - diagnostic_component_name = 'oceanbase-diagnostic-tool' - diagnostic_component_version = '1.5' - deployed = self.obdiag_deploy(auto_deploy=True, version=diagnostic_component_version) - if deployed: - generate_config_plugin = self.plugin_manager.get_best_py_script_plugin('generate_config', diagnostic_component_name, diagnostic_component_version) + diagnostic_component_name = COMP_OCEANBASE_DIAGNOSTIC_TOOL + deployed = self.obdiag_deploy(fuction_type) + tool = self.tool_manager.get_tool_config_by_name(diagnostic_component_name) + if deployed and tool: + generate_config_plugin = self.plugin_manager.get_best_py_script_plugin('generate_config', diagnostic_component_name, tool.config.version) self.call_plugin(generate_config_plugin, target_repository, deploy_config=deploy_config) self._call_stdio('generate_config', 'succeed') - obdiag_plugin = self.plugin_manager.get_best_py_script_plugin(fuction_type, diagnostic_component_name, diagnostic_component_version) + scene_config_plugin = self.plugin_manager.get_best_py_script_plugin('scene_config', diagnostic_component_name, tool.config.version) + self.call_plugin(scene_config_plugin, target_repository) + obdiag_plugin = self.plugin_manager.get_best_py_script_plugin(fuction_type, diagnostic_component_name, tool.config.version) return self.call_plugin(obdiag_plugin, target_repository) else: self._call_stdio('error', err.EC_OBDIAG_FUCYION_FAILED.format(fuction=fuction_type)) @@ -4726,55 +4793,55 @@ def obdiag_online_func(self, name, fuction_type, opts): def obdiag_offline_func(self, fuction_type, opts): - component_name = 'oceanbase-diagnostic-tool' - pkg = self.mirror_manager.get_best_pkg(name=component_name) + tool_name = COMP_OCEANBASE_DIAGNOSTIC_TOOL + pkg = self.mirror_manager.get_best_pkg(name=tool_name) if not pkg: - self._call_stdio('critical', '%s package not found' % component_name) + self._call_stdio('critical', '%s package not found' % tool_name) return False repository = self.repository_manager.create_instance_repository(pkg.name, pkg.version, pkg.md5) - deployed = self.obdiag_deploy(auto_deploy=True, version=repository.version) - if deployed: - obdiag_plugin = self.plugin_manager.get_best_py_script_plugin(fuction_type, component_name, repository.version) + deployed = self.obdiag_deploy(fuction_type) + tool = self.tool_manager.get_tool_config_by_name(tool_name) + if deployed and tool: + scene_config_plugin = self.plugin_manager.get_best_py_script_plugin('scene_config', tool_name, repository.version) + self.call_plugin(scene_config_plugin, repository, clients={}) + obdiag_plugin = self.plugin_manager.get_best_py_script_plugin(fuction_type, tool_name, repository.version) return self.call_plugin(obdiag_plugin, repository, clients={}) else: self._call_stdio('error', err.EC_OBDIAG_FUCYION_FAILED.format(fuction=fuction_type)) return False - - - def obdiag_deploy(self, auto_deploy=False, install_prefix=None, version=None): - self._global_ex_lock() - component_name = 'oceanbase-diagnostic-tool' - if install_prefix is None: - install_prefix = os.path.join(os.getenv('HOME'), component_name) + + def obdiag_deploy(self, fuction_type): + component_name = COMP_OCEANBASE_DIAGNOSTIC_TOOL + # obdiag pre check pkg = self.mirror_manager.get_best_pkg(name=component_name) if not pkg: self._call_stdio('critical', '%s package not found' % component_name) return False - plugin = self.plugin_manager.get_best_plugin(PluginType.INSTALL, component_name, pkg.version) - self._call_stdio('print', 'obdiag plugin : %s' % plugin) repository = self.repository_manager.create_instance_repository(pkg.name, pkg.version, pkg.md5) - check_plugin = self.plugin_manager.get_best_py_script_plugin('pre_check', component_name, pkg.version) + pre_check_plugin = self.plugin_manager.get_best_py_script_plugin('pre_check', component_name, pkg.version) + if not pre_check_plugin: + self._call_stdio('info', '%s pre_check plugin not found' % component_name) + return True obd = self.fork() obd.set_deploy(deploy=None) - ret = obd.call_plugin(check_plugin, repository, clients={}, obdiag_path = install_prefix, obdiag_new_version = pkg.version, version_check = True) - if ret.get_return('obdiag_found'): - if ret.get_return('version_status'): - self._call_stdio('print', 'No updates detected. obdiag is already up to date.') - return True - else: - if not auto_deploy: - if not self._call_stdio('confirm', 'Found a higher version\n%s\nDo you want to use it?' % pkg): - return False - self._call_stdio('start_loading', 'Get local repositories and plugins') - repository = self.repository_manager.create_instance_repository(pkg.name, pkg.version, pkg.md5) - plugin = self.plugin_manager.get_best_plugin(PluginType.INSTALL, component_name, pkg.version) - repository.load_pkg(pkg, plugin) - src_path = os.path.join(repository.repository_dir, component_name) - if FileUtil.symlink(src_path, install_prefix, self.stdio): - self._call_stdio('stop_loading', 'succeed') - self._call_stdio('print', 'Deploy obdiag successful.\nCurrent version : %s. \nPath of obdiag : %s' % (pkg.version, install_prefix)) + ret = obd.call_plugin(pre_check_plugin, repository, clients={}) + if not ret.get_return('checked'): + self._call_stdio('error', 'Get the pre check return of the tool %s failed' % component_name) + return False + # obdiag install + if not self.tool_manager.is_tool_install(component_name): + return self.install_tool(component_name, force=True) + else: + # try to update obdiag to latest version + tool = self.tool_manager.get_tool_config_by_name(component_name) + obdiag_plugin = self.plugin_manager.get_best_py_script_plugin(fuction_type, component_name, tool.config.version) + if not obdiag_plugin: + self._call_stdio('warn', 'The obdiag version %s is not support command "obd obdiag %s", please update it' % (tool.config.version, fuction_type)) + if not self.update_tool(component_name): + if not obdiag_plugin: + self._call_stdio('error', 'Update the obdiag version %s failed, please update it' % (tool.config.version)) + return False return True - return False def get_repositories_utils(self, repositories): all_data = [] @@ -4831,9 +4898,263 @@ def install_utils_to_servers(self, repositories, repositories_utils_map, unuse_u utils_repository = repositories_utils_map[temp_repository]['repositories'] install_plugin = repositories_utils_map[temp_repository]['install_plugin'] check_file_map = check_file_maps[repository] = install_plugin.file_map(repository) + requirement_map = install_plugin.requirement_map(repository) ret = self.call_plugin(install_repo_plugin, repository, obd_home=self.home_path, install_repository=utils_repository, install_plugin=install_plugin, check_repository=repository, check_file_map=check_file_map, - msg_lv='error' if unuse_utils_repository else 'warn') + requirement_map=requirement_map, msg_lv='error' if unuse_utils_repository else 'warn') if not ret: return False return True + + def print_tools(self, tools, title): + if tools: + self._call_stdio('print_list', tools, + ['Name', 'Arch', 'Version', 'Install Path', 'Install Size'], + lambda x: [x.name, x.config.arch, x.config.version, x.config.path, Capacity(x.config.size, 2).value], + title=title, + ) + else: + self._call_stdio('print', 'No tools have been installed') + + def list_tools(self): + self._call_stdio('verbose', 'Get tool list') + tools = self.tool_manager.get_tool_list() + self.print_tools(tools, 'Tool List') + return True + + def check_requirement(self, tool_name, repository, package, file_map, requirement_map, install_path): + obd = self.fork() + obd.set_deploy(deploy=None) + check_requirement_plugin = self.plugin_manager.get_best_py_script_plugin('check_requirement', tool_name, package.version) + if not check_requirement_plugin: + self._call_stdio('verbose', '%s check_requirement plugin not found' % tool_name) + return True + ret = obd.call_plugin(check_requirement_plugin, + repository, + clients={}, + file_map = file_map, + requirement_map = requirement_map) + if not ret.get_return('checked'): + for requirement in ret.get_return('requirements'): + if not self.install_requirement(requirement.name, requirement.version, os.path.join(install_path, 'lib')): + self._call_stdio('error', 'Install the requirement %s failed' % requirement.name) + return False + return True + + def install_requirement(self, tool_name, version, install_path): + pkg = self.mirror_manager.get_best_pkg(name=tool_name, version=version) + if not pkg: + package_info = '%s-%s' % (tool_name, version) if version else tool_name + self._call_stdio('critical', 'No such package: %s' % package_info) + return False + + plugin = self.plugin_manager.get_best_plugin(PluginType.INSTALL, tool_name, pkg.version) + if not plugin: + self._call_stdio('critical', 'Not support requirement %s of version %s' % (tool_name, pkg.version)) + return False + + repository = self.repository_manager.create_instance_repository(pkg.name, pkg.version, pkg.md5) + + self._call_stdio('start_loading', 'Get local repositories and plugins') + if not repository.load_pkg(pkg, plugin): + self._call_stdio('error', 'Failed to extract file from %s' % pkg.path) + return False + self._call_stdio('stop_loading', 'succeed') + + self._call_stdio('start_loading', 'install requirement') + if not self.tool_manager.install_requirement(repository, install_path): + return False + self._call_stdio('stop_loading', 'succeed') + + file_map = plugin.file_map(pkg) + requirement_map = plugin.requirement_map(pkg) + if file_map and requirement_map: + if not self.check_requirement(tool_name, repository, pkg, file_map, requirement_map, install_path): + self._call_stdio('critical', 'Check the requirement of tool %s failed' % tool_name) + return False + return True + + def _install_tool(self, tool_name, version, force, install_path): + pkg = self.mirror_manager.get_best_pkg(name=tool_name, version=version, only_info=True) + if not pkg: + package_info = '%s-%s' % (tool_name, version) if version else tool_name + self._call_stdio('critical', 'No such package: %s' % package_info) + return False + + plugin = self.plugin_manager.get_best_plugin(PluginType.INSTALL, tool_name, pkg.version) + if not plugin: + self._call_stdio('critical', 'Not support tool %s of version %s' % (tool_name, pkg.version)) + return False + + pkg = self.mirror_manager.get_best_pkg(name=tool_name, version=version) + if not pkg: + package_info = '%s-%s' % (tool_name, version) if version else tool_name + self._call_stdio('critical', 'No such package: %s' % package_info) + return False + + if not self._call_stdio('confirm', 'Found a avaiable version\n%s\nDo you want to use it?' % pkg): + return False + + repository = self.repository_manager.create_instance_repository(pkg.name, pkg.version, pkg.md5) + + tool = self.tool_manager.create_tool_config(tool_name) + + self._call_stdio('start_loading', 'Get local repositories and plugins') + if not repository.load_pkg(pkg, plugin): + self._call_stdio('error', 'Failed to extract file from %s' % pkg.path) + self.tool_manager.remove_tool_config(tool_name) + return False + self._call_stdio('stop_loading', 'succeed') + + self._call_stdio('start_loading', 'install tool') + if not self.tool_manager.install_tool(tool, repository, install_path): + self.tool_manager.remove_tool_config(tool_name) + return False + self._call_stdio('stop_loading', 'succeed') + + file_map = plugin.file_map(pkg) + requirement_map = plugin.requirement_map(pkg) + if file_map and requirement_map: + if not self.check_requirement(tool_name, repository, pkg, file_map, requirement_map, install_path): + self._call_stdio('critical', 'Check the requirement of tool %s failed' % tool_name) + self.tool_manager.remove_tool_config(tool_name) + return False + if not tool.save_config(pkg.version, repository.hash, install_path): + self._call_stdio('error', 'Failed to save tool config to %s' % tool.config_path) + self.tool_manager.remove_tool_config(tool_name) + return False + return True + + def install_tool(self, tool_name, force=None, version=None, install_prefix=None): + self._call_stdio('verbose', 'Try to install %s', tool_name) + self._global_ex_lock() + if not self.tool_manager.is_belong_tool(tool_name): + self._call_stdio('error', 'The tool %s is not supported' % tool_name) + self._call_stdio('print', 'The tool install only support %s' % self.tool_manager.get_support_tool_list()) + return False + tool_name = self.tool_manager.get_tool_offical_name(tool_name) + if not tool_name: + return False + + if self.tool_manager.is_tool_install(tool_name): + self._call_stdio('print', 'The tool %s is already installed' % tool_name) + return True + + if not version: + version = getattr(self.options, 'version', None) + if not install_prefix: + install_prefix = self.options.prefix \ + if getattr(self.options, 'prefix', None) is not None else os.getenv('HOME') + force = self.options.force if getattr(self.options, 'force', None) is not None else force + + install_path = os.path.abspath(os.path.join(install_prefix, tool_name)) + + if not self._install_tool(tool_name, version, force, install_path): + self.tool_manager.remove_tool_config(tool_name) + return False + + tool = self.tool_manager.get_tool_config_by_name(tool_name) + self.print_tools([tool], 'Installed Tool') + self._call_stdio('print', 'Install tool %s completely.', tool_name) + + return True + + def uninstall_tool(self, tool_name): + self._call_stdio('verbose', 'Try to uninstall %s', tool_name) + self._global_ex_lock() + force = self.options.force if getattr(self.options, 'force', None) is not None else False + + if not self.tool_manager.is_belong_tool(tool_name): + self._call_stdio('error', 'The tool %s is not supported' % tool_name) + return False + tool_name = self.tool_manager.get_tool_offical_name(tool_name) + if not tool_name: + return False + tool = self.tool_manager.get_tool_config_by_name(tool_name) + if not tool: + self._call_stdio('error', 'The tool %s is not installed' % tool_name) + return False + + self.print_tools([tool], 'Uninstall Tool') + if not self._call_stdio('confirm', 'Uninstall tool %s\nIs this ok ' % tool_name): + return False + if not self.tool_manager.uninstall_tool(tool): + self._call_stdio('error', 'Uninstall the tool %s failed' % tool_name) + return False + self.tool_manager.remove_tool_config(tool_name) + self._call_stdio('print', 'Uninstall tool %s completely' % tool_name) + return True + + def _update_tool(self, tool, version, force, install_path): + pkg = self.mirror_manager.get_best_pkg(name=tool.name, version=version) + if not pkg: + package_info = '%s-%s' % (tool.name, version) if version else tool.name + self._call_stdio('critical', 'No such package: %s' % package_info) + return False + + plugin = self.plugin_manager.get_best_plugin(PluginType.INSTALL, tool.name, pkg.version) + if not plugin: + self._call_stdio('critical', 'Not support tool %s of version %s' % (tool.name, pkg.version)) + return False + + if self.tool_manager.check_if_avaliable_update(tool, pkg): + if not self._call_stdio('confirm', 'Found a avaiable version\n%s\nDo you want to use it?' % pkg): + return False + else: + self._call_stdio('print', 'The tool %s is already installed the latest version %s' % (tool.name, tool.config.version)) + return True + + repository = self.repository_manager.create_instance_repository(pkg.name, pkg.version, pkg.md5) + + self._call_stdio('start_loading', 'Get local repositories and plugins') + if not repository.load_pkg(pkg, plugin): + self._call_stdio('error', 'Failed to extract file from %s' % pkg.path) + return False + self._call_stdio('stop_loading', 'succeed') + + self._call_stdio('start_loading', 'install tool') + if not self.tool_manager.update_tool(tool, repository, install_path): + self.tool_manager.remove_tool_config(tool.name) + return False + self._call_stdio('stop_loading', 'succeed') + + file_map = plugin.file_map(pkg) + requirement_map = plugin.requirement_map(pkg) + if file_map and requirement_map: + if not self.check_requirement(tool.name, repository, pkg, file_map, requirement_map, install_path): + self._call_stdio('critical', 'Check the requirement of tool %s failed' % tool.name) + return False + if not tool.save_config(pkg.version, repository.hash, install_path): + self._call_stdio('error', 'Failed to save tool config to %s' % tool.config_path) + return False + + self.print_tools([tool], 'Updated tool') + self._call_stdio('print', 'Update tool %s completely.', tool.name) + return True + + def update_tool(self, tool_name, force=False, version=None, install_prefix=None): + self._call_stdio('verbose', 'Try to update %s', tool_name) + self._global_ex_lock() + if not self.tool_manager.is_belong_tool(tool_name): + self._call_stdio('error', 'The tool %s is not supported' % tool_name) + self._call_stdio('print', 'The tool update only support %s' % self.tool_manager.get_support_tool_list()) + return False + tool_name = self.tool_manager.get_tool_offical_name(tool_name) + if not tool_name: + return False + tool = self.tool_manager.get_tool_config_by_name(tool_name) + if not tool: + self._call_stdio('error', 'The tool %s is not installed' % tool_name) + return False + if not version: + version = getattr(self.options, 'version', None) + if not install_prefix: + previous_parent_path = os.path.dirname(tool.config.path) if tool.config.path else os.getenv('HOME') + install_prefix = self.options.prefix \ + if getattr(self.options, 'prefix', None) is not None else previous_parent_path + force = self.options.force if getattr(self.options, 'force', None) is not None else force + + install_path = os.path.abspath(os.path.join(install_prefix, tool_name)) + if not self._update_tool(tool, version, force, install_path): + return False + return True \ No newline at end of file diff --git a/example/all-components-min.yaml b/example/all-components-min.yaml index b238d2b..cc92fb1 100644 --- a/example/all-components-min.yaml +++ b/example/all-components-min.yaml @@ -49,6 +49,7 @@ oceanbase-ce: server1: mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + obshell_port: 2886 # Operation and maintenance port for Oceanbase Database. The default value is 2886. This parameter is valid only when the version of oceanbase-ce is 4.2.2.0 or later. # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. home_path: /root/observer # The directory for data storage. The default value is $home_path/store. @@ -59,6 +60,7 @@ oceanbase-ce: server2: mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + obshell_port: 2886 # Operation and maintenance port for Oceanbase Database. The default value is 2886. This parameter is valid only when the version of oceanbase-ce is 4.2.2.0 or later. # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. home_path: /root/observer # The directory for data storage. The default value is $home_path/store. @@ -69,6 +71,7 @@ oceanbase-ce: server3: mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + obshell_port: 2886 # Operation and maintenance port for Oceanbase Database. The default value is 2886. This parameter is valid only when the version of oceanbase-ce is 4.2.2.0 or later. # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. home_path: /root/observer # The directory for data storage. The default value is $home_path/store. diff --git a/example/all-components.yaml b/example/all-components.yaml index ea1c781..c899097 100644 --- a/example/all-components.yaml +++ b/example/all-components.yaml @@ -50,6 +50,7 @@ oceanbase-ce: server1: mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + obshell_port: 2886 # Operation and maintenance port for Oceanbase Database. The default value is 2886. This parameter is valid only when the version of oceanbase-ce is 4.2.2.0 or later. # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. home_path: /root/observer # The directory for data storage. The default value is $home_path/store. @@ -60,6 +61,7 @@ oceanbase-ce: server2: mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + obshell_port: 2886 # Operation and maintenance port for Oceanbase Database. The default value is 2886. This parameter is valid only when the version of oceanbase-ce is 4.2.2.0 or later. # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. home_path: /root/observer # The directory for data storage. The default value is $home_path/store. @@ -70,6 +72,7 @@ oceanbase-ce: server3: mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + obshell_port: 2886 # Operation and maintenance port for Oceanbase Database. The default value is 2886. This parameter is valid only when the version of oceanbase-ce is 4.2.2.0 or later. # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. home_path: /root/observer # The directory for data storage. The default value is $home_path/store. diff --git a/example/autodeploy/all-components.yaml b/example/autodeploy/all-components.yaml index 540f2e4..80fd6b2 100644 --- a/example/autodeploy/all-components.yaml +++ b/example/autodeploy/all-components.yaml @@ -128,8 +128,8 @@ obagent: # monagent_log_max_backups: 15 # Username for HTTP authentication. The default value is admin. # http_basic_auth_user: admin - # Password for HTTP authentication. The default value is root. - # http_basic_auth_password: root + # Password for HTTP authentication. The default is a random password. + # http_basic_auth_password: ****** # Monitor password for OceanBase Database. The default value is empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the ocp_agent_monitor_password in oceanbase-ce. # monitor_password: # The SQL port for observer. The default value is 2881. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the mysql_port in oceanbase-ce. diff --git a/example/autodeploy/default-example.yaml b/example/autodeploy/default-example.yaml index b332638..1aac397 100644 --- a/example/autodeploy/default-example.yaml +++ b/example/autodeploy/default-example.yaml @@ -128,8 +128,8 @@ obagent: # monagent_log_max_backups: 15 # Username for HTTP authentication. The default value is admin. # http_basic_auth_user: admin - # Password for HTTP authentication. The default value is root. - # http_basic_auth_password: root + # Password for HTTP authentication. The default is a random password. + # http_basic_auth_password: ****** # Monitor password for OceanBase Database. The default value is empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the ocp_agent_monitor_password in oceanbase-ce. # monitor_password: # The SQL port for observer. The default value is 2881. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the mysql_port in oceanbase-ce. diff --git a/example/autodeploy/distributed-with-obproxy-and-obagent-example.yaml b/example/autodeploy/distributed-with-obproxy-and-obagent-example.yaml index a0604f2..9161649 100644 --- a/example/autodeploy/distributed-with-obproxy-and-obagent-example.yaml +++ b/example/autodeploy/distributed-with-obproxy-and-obagent-example.yaml @@ -128,8 +128,8 @@ obagent: # monagent_log_max_backups: 15 # Username for HTTP authentication. The default value is admin. # http_basic_auth_user: admin - # Password for HTTP authentication. The default value is root. - # http_basic_auth_password: root + # Password for HTTP authentication. The default is a random password. + # http_basic_auth_password: ****** # Monitor password for OceanBase Database. The default value is empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the ocp_agent_monitor_password in oceanbase-ce. # monitor_password: # The SQL port for observer. The default value is 2881. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the mysql_port in oceanbase-ce. diff --git a/example/default-components-min.yaml b/example/default-components-min.yaml index 98bf9c6..4ab876b 100644 --- a/example/default-components-min.yaml +++ b/example/default-components-min.yaml @@ -47,6 +47,7 @@ oceanbase-ce: server1: mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + obshell_port: 2886 # Operation and maintenance port for Oceanbase Database. The default value is 2886. This parameter is valid only when the version of oceanbase-ce is 4.2.2.0 or later. # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. home_path: /root/observer # The directory for data storage. The default value is $home_path/store. @@ -57,6 +58,7 @@ oceanbase-ce: server2: mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + obshell_port: 2886 # Operation and maintenance port for Oceanbase Database. The default value is 2886. This parameter is valid only when the version of oceanbase-ce is 4.2.2.0 or later. # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. home_path: /root/observer # The directory for data storage. The default value is $home_path/store. @@ -67,6 +69,7 @@ oceanbase-ce: server3: mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + obshell_port: 2886 # Operation and maintenance port for Oceanbase Database. The default value is 2886. This parameter is valid only when the version of oceanbase-ce is 4.2.2.0 or later. # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. home_path: /root/observer # The directory for data storage. The default value is $home_path/store. diff --git a/example/default-components.yaml b/example/default-components.yaml index 65edfd2..33f51b2 100644 --- a/example/default-components.yaml +++ b/example/default-components.yaml @@ -47,6 +47,7 @@ oceanbase-ce: server1: mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + obshell_port: 2886 # Operation and maintenance port for Oceanbase Database. The default value is 2886. This parameter is valid only when the version of oceanbase-ce is 4.2.2.0 or later. # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. home_path: /root/observer # The directory for data storage. The default value is $home_path/store. @@ -57,6 +58,7 @@ oceanbase-ce: server2: mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + obshell_port: 2886 # Operation and maintenance port for Oceanbase Database. The default value is 2886. This parameter is valid only when the version of oceanbase-ce is 4.2.2.0 or later. # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. home_path: /root/observer # The directory for data storage. The default value is $home_path/store. @@ -67,6 +69,7 @@ oceanbase-ce: server3: mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + obshell_port: 2886 # Operation and maintenance port for Oceanbase Database. The default value is 2886. This parameter is valid only when the version of oceanbase-ce is 4.2.2.0 or later. # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. home_path: /root/observer # The directory for data storage. The default value is $home_path/store. diff --git a/example/distributed-example.yaml b/example/distributed-example.yaml index 06aefaa..9a03bd8 100644 --- a/example/distributed-example.yaml +++ b/example/distributed-example.yaml @@ -35,6 +35,7 @@ oceanbase-ce: server1: mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + obshell_port: 2886 # Operation and maintenance port for Oceanbase Database. The default value is 2886. This parameter is valid only when the version of oceanbase-ce is 4.2.2.0 or later. # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. home_path: /root/observer # The directory for data storage. The default value is $home_path/store. @@ -45,6 +46,7 @@ oceanbase-ce: server2: mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + obshell_port: 2886 # Operation and maintenance port for Oceanbase Database. The default value is 2886. This parameter is valid only when the version of oceanbase-ce is 4.2.2.0 or later. # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. home_path: /root/observer # The directory for data storage. The default value is $home_path/store. @@ -55,6 +57,7 @@ oceanbase-ce: server3: mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + obshell_port: 2886 # Operation and maintenance port for Oceanbase Database. The default value is 2886. This parameter is valid only when the version of oceanbase-ce is 4.2.2.0 or later. # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. home_path: /root/observer # The directory for data storage. The default value is $home_path/store. diff --git a/example/distributed-with-obproxy-example.yaml b/example/distributed-with-obproxy-example.yaml index bd34782..bfb4a28 100644 --- a/example/distributed-with-obproxy-example.yaml +++ b/example/distributed-with-obproxy-example.yaml @@ -38,6 +38,7 @@ oceanbase-ce: server1: mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + obshell_port: 2886 # Operation and maintenance port for Oceanbase Database. The default value is 2886. This parameter is valid only when the version of oceanbase-ce is 4.2.2.0 or later. # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. home_path: /root/observer # The directory for data storage. The default value is $home_path/store. @@ -48,6 +49,7 @@ oceanbase-ce: server2: mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + obshell_port: 2886 # Operation and maintenance port for Oceanbase Database. The default value is 2886. This parameter is valid only when the version of oceanbase-ce is 4.2.2.0 or later. # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. home_path: /root/observer # The directory for data storage. The default value is $home_path/store. @@ -58,6 +60,7 @@ oceanbase-ce: server3: mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + obshell_port: 2886 # Operation and maintenance port for Oceanbase Database. The default value is 2886. This parameter is valid only when the version of oceanbase-ce is 4.2.2.0 or later. # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. home_path: /root/observer # The directory for data storage. The default value is $home_path/store. diff --git a/example/grafana/all-components-with-prometheus-and-grafana.yaml b/example/grafana/all-components-with-prometheus-and-grafana.yaml index 2b3676c..5e1dd3a 100644 --- a/example/grafana/all-components-with-prometheus-and-grafana.yaml +++ b/example/grafana/all-components-with-prometheus-and-grafana.yaml @@ -109,8 +109,8 @@ obagent: # log_compress: true # Username for HTTP authentication. The default value is admin. # http_basic_auth_user: admin - # Password for HTTP authentication. The default value is root. - # http_basic_auth_password: xxxxxx + # Password for HTTP authentication. The default is a random password. + # http_basic_auth_password: ****** # Username for debug service. The default value is admin. # pprof_basic_auth_user: admin # Password for debug service. The default value is root. diff --git a/example/local-example.yaml b/example/local-example.yaml index 43d501c..c464204 100644 --- a/example/local-example.yaml +++ b/example/local-example.yaml @@ -16,6 +16,7 @@ oceanbase-ce: # devname: eth0 mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + obshell_port: 2886 # Operation and maintenance port for Oceanbase Database. The default value is 2886. This parameter is valid only when the version of oceanbase-ce is 4.2.2.0 or later. zone: zone1 # if current hardware's memory capacity is smaller than 50G, please use the setting of "mini-single-example.yaml" and do a small adjustment. memory_limit: 64G # The maximum running memory for an observer diff --git a/example/mini-distributed-example.yaml b/example/mini-distributed-example.yaml index c8d669b..cc50fd3 100644 --- a/example/mini-distributed-example.yaml +++ b/example/mini-distributed-example.yaml @@ -37,6 +37,7 @@ oceanbase-ce: server1: mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + obshell_port: 2886 # Operation and maintenance port for Oceanbase Database. The default value is 2886. This parameter is valid only when the version of oceanbase-ce is 4.2.2.0 or later. # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. home_path: /root/observer # The directory for data storage. The default value is $home_path/store. @@ -47,6 +48,7 @@ oceanbase-ce: server2: mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + obshell_port: 2886 # Operation and maintenance port for Oceanbase Database. The default value is 2886. This parameter is valid only when the version of oceanbase-ce is 4.2.2.0 or later. # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. home_path: /root/observer # The directory for data storage. The default value is $home_path/store. @@ -57,6 +59,7 @@ oceanbase-ce: server3: mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + obshell_port: 2886 # Operation and maintenance port for Oceanbase Database. The default value is 2886. This parameter is valid only when the version of oceanbase-ce is 4.2.2.0 or later. # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. home_path: /root/observer # The directory for data storage. The default value is $home_path/store. diff --git a/example/mini-distributed-with-obproxy-example.yaml b/example/mini-distributed-with-obproxy-example.yaml index b31e49e..5b24de1 100644 --- a/example/mini-distributed-with-obproxy-example.yaml +++ b/example/mini-distributed-with-obproxy-example.yaml @@ -40,6 +40,7 @@ oceanbase-ce: server1: mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + obshell_port: 2886 # Operation and maintenance port for Oceanbase Database. The default value is 2886. This parameter is valid only when the version of oceanbase-ce is 4.2.2.0 or later. # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. home_path: /root/observer # The directory for data storage. The default value is $home_path/store. @@ -50,6 +51,7 @@ oceanbase-ce: server2: mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + obshell_port: 2886 # Operation and maintenance port for Oceanbase Database. The default value is 2886. This parameter is valid only when the version of oceanbase-ce is 4.2.2.0 or later. # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. home_path: /root/observer # The directory for data storage. The default value is $home_path/store. @@ -60,6 +62,7 @@ oceanbase-ce: server3: mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + obshell_port: 2886 # Operation and maintenance port for Oceanbase Database. The default value is 2886. This parameter is valid only when the version of oceanbase-ce is 4.2.2.0 or later. # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. home_path: /root/observer # The directory for data storage. The default value is $home_path/store. diff --git a/example/mini-local-example.yaml b/example/mini-local-example.yaml index 9043dfc..4ae0f7c 100755 --- a/example/mini-local-example.yaml +++ b/example/mini-local-example.yaml @@ -16,6 +16,7 @@ oceanbase-ce: # devname: eth0 mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + obshell_port: 2886 # Operation and maintenance port for Oceanbase Database. The default value is 2886. This parameter is valid only when the version of oceanbase-ce is 4.2.2.0 or later. zone: zone1 cluster_id: 1 # please set memory limit to a suitable value which is matching resource. diff --git a/example/mini-single-example.yaml b/example/mini-single-example.yaml index 06b9355..1e7f429 100755 --- a/example/mini-single-example.yaml +++ b/example/mini-single-example.yaml @@ -23,6 +23,7 @@ oceanbase-ce: # devname: eth0 mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + obshell_port: 2886 # Operation and maintenance port for Oceanbase Database. The default value is 2886. This parameter is valid only when the version of oceanbase-ce is 4.2.2.0 or later. zone: zone1 cluster_id: 1 # please set memory limit to a suitable value which is matching resource. diff --git a/example/mini-single-with-obproxy-example.yaml b/example/mini-single-with-obproxy-example.yaml index 28a57e4..6b67898 100644 --- a/example/mini-single-with-obproxy-example.yaml +++ b/example/mini-single-with-obproxy-example.yaml @@ -23,6 +23,7 @@ oceanbase-ce: # devname: eth0 mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + obshell_port: 2886 # Operation and maintenance port for Oceanbase Database. The default value is 2886. This parameter is valid only when the version of oceanbase-ce is 4.2.2.0 or later. zone: zone1 cluster_id: 1 # please set memory limit to a suitable value which is matching resource. diff --git a/example/obagent/distributed-with-obproxy-and-obagent-example.yaml b/example/obagent/distributed-with-obproxy-and-obagent-example.yaml index a4b1b36..51cd3f5 100644 --- a/example/obagent/distributed-with-obproxy-and-obagent-example.yaml +++ b/example/obagent/distributed-with-obproxy-and-obagent-example.yaml @@ -124,8 +124,8 @@ obagent: monagent_log_max_backups: 15 # Username for HTTP authentication. The default value is admin. http_basic_auth_user: admin - # Password for HTTP authentication. The default value is root. - http_basic_auth_password: root + # Password for HTTP authentication. The default is a random password. + http_basic_auth_password: ****** # Monitor password for OceanBase Database. The default value is empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the ocp_agent_monitor_password in oceanbase-ce. # monitor_password: # The SQL port for observer. The default value is 2881. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the mysql_port in oceanbase-ce. diff --git a/example/obagent/obagent-only-1.2.0-example.yaml b/example/obagent/obagent-only-1.2.0-example.yaml index d6a95b4..5d8bcfb 100644 --- a/example/obagent/obagent-only-1.2.0-example.yaml +++ b/example/obagent/obagent-only-1.2.0-example.yaml @@ -36,8 +36,8 @@ obagent: # log_compress: true # Username for HTTP authentication. The default value is admin. http_basic_auth_user: admin - # Password for HTTP authentication. The default value is root. - http_basic_auth_password: root + # Password for HTTP authentication. The default is a random password. + http_basic_auth_password: ****** # Username for debug service. The default value is admin. pprof_basic_auth_user: admin # Password for debug service. The default value is root. diff --git a/example/obagent/obagent-only-example.yaml b/example/obagent/obagent-only-example.yaml index fc45667..350df57 100644 --- a/example/obagent/obagent-only-example.yaml +++ b/example/obagent/obagent-only-example.yaml @@ -38,8 +38,8 @@ obagent: monagent_log_max_backups: 15 # Username for HTTP authentication. The default value is admin. http_basic_auth_user: admin - # Password for HTTP authentication. The default value is root. - http_basic_auth_password: root + # Password for HTTP authentication. The default is a random password. + http_basic_auth_password: ****** # Monitor password for OceanBase Database. The default value is empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the ocp_agent_monitor_password in oceanbase-ce. monitor_password: # The SQL port for observer. The default value is 2881. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the mysql_port in oceanbase-ce. diff --git a/example/oceanbase-3.x/distributed-with-obproxy-and-obagent-example.yaml b/example/oceanbase-3.x/distributed-with-obproxy-and-obagent-example.yaml index 706bd18..fde624f 100644 --- a/example/oceanbase-3.x/distributed-with-obproxy-and-obagent-example.yaml +++ b/example/oceanbase-3.x/distributed-with-obproxy-and-obagent-example.yaml @@ -119,8 +119,8 @@ obagent: # log_compress: true # Username for HTTP authentication. The default value is admin. http_basic_auth_user: admin - # Password for HTTP authentication. The default value is root. - http_basic_auth_password: root + # Password for HTTP authentication. The default is a random password. + http_basic_auth_password: ****** # Username for debug service. The default value is admin. pprof_basic_auth_user: admin # Password for debug service. The default value is root. diff --git a/example/ocp/distributed-with-obproxy-and-ocp-example.yaml b/example/ocp/distributed-with-obproxy-and-ocp-example.yaml new file mode 100644 index 0000000..5c1b752 --- /dev/null +++ b/example/ocp/distributed-with-obproxy-and-ocp-example.yaml @@ -0,0 +1,118 @@ +## Only need to configure when remote login is required +# user: +# username: your username +# password: your password if need +# key_file: your ssh-key file path if need +# port: your ssh port, default 22 +# timeout: ssh connection timeout (second), default 30 +oceanbase-ce: + servers: + - name: server1 + # Please don't use hostname, only IP can be supported + ip: 192.168.1.2 + - name: server2 + ip: 192.168.1.3 + - name: server3 + ip: 192.168.1.4 + global: + # Starting from observer version 4.2, the network selection for the observer is based on the 'local_ip' parameter, and the 'devname' parameter is no longer mandatory. + # If the 'local_ip' parameter is set, the observer will first use this parameter for the configuration, regardless of the 'devname' parameter. + # If only the 'devname' parameter is set, the observer will use the 'devname' parameter for the configuration. + # If neither the 'devname' nor the 'local_ip' parameters are set, the 'local_ip' parameter will be automatically assigned the IP address configured above. + # devname: eth0 + # if current hardware's memory capacity is smaller than 50G, please use the setting of "mini-single-example.yaml" and do a small adjustment. + memory_limit: 64G # The maximum running memory for an observer + # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G. + system_memory: 30G + datafile_size: 192G # Size of the data file. + log_disk_size: 192G # The size of disk space used by the clog files. + enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. + enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. + max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. + # observer cluster name, consistent with obproxy's cluster_name + appname: obcluster + # root_password: # root user password, can be empty + # proxyro_password: # proxyro user pasword, consistent with obproxy's observer_sys_password, can be empty + # OCP meta tenant definition, including tenant name, cpu and memory + ocp_meta_tenant: + tenant_name: ocp_meta + max_cpu: 2.0 + memory_size: 2G + # ocp_meta_username: root # User to use under ocp meta tenant + # ocp_meta_password: ****** # Password used to connect to ocp meta tenant + # ocp_meta_db: meta_database # Database used to store ocp meta data + # OCP monitor tenant definition, including tenant name, cpu and memory + ocp_monitor_tenant: + tenant_name: ocp_monitor + max_cpu: 2.0 + memory_size: 2G + # ocp_monitor_username: root # User to use under ocp monitor tenant + # ocp_monitor_password: ****** # Password used to connect to ocp meta tenant + # ocp_monitor_db: monitor_database # Database used to store ocp meta data + + # In this example , support multiple ob process in single node, so different process use different ports. + # If deploy ob cluster in multiple nodes, the port and path setting can be same. + server1: + mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. + rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. + home_path: /root/observer + # The directory for data storage. The default value is $home_path/store. + # data_dir: /data + # The directory for clog, ilog, and slog. The default value is the same as the data_dir value. + # redo_dir: /redo + zone: zone1 + server2: + mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. + rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. + home_path: /root/observer + # The directory for data storage. The default value is $home_path/store. + # data_dir: /data + # The directory for clog, ilog, and slog. The default value is the same as the data_dir value. + # redo_dir: /redo + zone: zone2 + server3: + mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. + rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. + home_path: /root/observer + # The directory for data storage. The default value is $home_path/store. + # data_dir: /data + # The directory for clog, ilog, and slog. The default value is the same as the data_dir value. + # redo_dir: /redo + zone: zone3 +obproxy-ce: + # Set dependent components for the component. + # When the associated configurations are not done, OBD will automatically get the these configurations from the dependent components. + depends: + - oceanbase-ce + servers: + - 192.168.1.5 + global: + listen_port: 2883 # External port. The default value is 2883. + prometheus_listen_port: 2884 # The Prometheus port. The default value is 2884. + home_path: /root/obproxy + # oceanbase root server list + # format: ip:mysql_port;ip:mysql_port. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. + # rs_list: 192.168.1.2:2881;192.168.1.3:2881;192.168.1.4:2881 + enable_cluster_checkout: false + # observer cluster name, consistent with oceanbase-ce's appname. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. + # cluster_name: obcluster + skip_proxy_sys_private_check: true + enable_strict_kernel_release: false + # obproxy_sys_password: # obproxy sys user password, can be empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. + # observer_sys_password: # proxyro user pasword, consistent with oceanbase-ce's proxyro_password, can be empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. +ocp-server-ce: + depends: + - oceanbase-ce + - obproxy-ce + servers: + - 192.168.1.6 + global: + home_path: /root/ocp + memory_size: 8G + port: 8080 + soft_dir: /home/root/software # Directory used to store packages + log_dir: /home/root/logs # Directory used to temporary store downloaded logs + # admin_password: ****** # Password of ocp's admin user diff --git a/example/ocp/ocp-only-example.yaml b/example/ocp/ocp-only-example.yaml new file mode 100644 index 0000000..f7a6170 --- /dev/null +++ b/example/ocp/ocp-only-example.yaml @@ -0,0 +1,34 @@ +## Only need to configure when remote login is required +# user: +# username: your username +# password: your password if need +# key_file: your ssh-key file path if need +# port: your ssh port, default 22 +# timeout: ssh connection timeout (second), default 30 +ocp-server-ce: + servers: + - 192.168.1.2 + global: + home_path: /root/ocp + memory_size: 8G + port: 8080 + soft_dir: /home/root/software # Directory used to store packages + log_dir: /home/root/logs # Directory used to temporary store downloaded logs + # admin_password: ****** # Password of ocp's admin user + jdbc_url: jdbc:oceanbase://192.168.1.2:2881/ocp_meta # Jdbc url of meta obcluster + # OCP meta tenant definition, including tenant name, cpu and memory + ocp_meta_tenant: + tenant_name: ocp_meta + max_cpu: 2.0 + memory_size: 2G + # ocp_meta_username: root # User to use under ocp meta tenant + # ocp_meta_password: ****** # Password used to connect to ocp meta tenant + # ocp_meta_db: meta_database # Database used to store ocp meta data + # OCP monitor tenant definition, including tenant name, cpu and memory + ocp_monitor_tenant: + tenant_name: ocp_monitor + max_cpu: 2.0 + memory_size: 2G + # ocp_monitor_username: root # User to use under ocp monitor tenant + # ocp_monitor_password: ****** # Password used to connect to ocp meta tenant + # ocp_monitor_db: monitor_database # Database used to store ocp meta data diff --git a/example/prometheus/distributed-with-obagent-and-prometheus-example.yaml b/example/prometheus/distributed-with-obagent-and-prometheus-example.yaml index 58756f6..409a042 100644 --- a/example/prometheus/distributed-with-obagent-and-prometheus-example.yaml +++ b/example/prometheus/distributed-with-obagent-and-prometheus-example.yaml @@ -103,8 +103,8 @@ obagent: monagent_log_max_backups: 15 # Username for HTTP authentication. The default value is admin. http_basic_auth_user: admin - # Password for HTTP authentication. The default value is root. - http_basic_auth_password: root + # Password for HTTP authentication. The default is a random password. + http_basic_auth_password: ****** # Monitor password for OceanBase Database. The default value is empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the ocp_agent_monitor_password in oceanbase-ce. # monitor_password: # The SQL port for observer. The default value is 2881. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the mysql_port in oceanbase-ce. diff --git a/example/single-example.yaml b/example/single-example.yaml index 527f5ac..26c0175 100644 --- a/example/single-example.yaml +++ b/example/single-example.yaml @@ -23,6 +23,7 @@ oceanbase-ce: # devname: eth0 mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + obshell_port: 2886 # Operation and maintenance port for Oceanbase Database. The default value is 2886. This parameter is valid only when the version of oceanbase-ce is 4.2.2.0 or later. zone: zone1 # if current hardware's memory capacity is smaller than 50G, please use the setting of "mini-single-example.yaml" and do a small adjustment. memory_limit: 64G # The maximum running memory for an observer diff --git a/example/single-with-obproxy-example.yaml b/example/single-with-obproxy-example.yaml index 76fadf8..4295831 100644 --- a/example/single-with-obproxy-example.yaml +++ b/example/single-with-obproxy-example.yaml @@ -23,6 +23,7 @@ oceanbase-ce: # devname: eth0 mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + obshell_port: 2886 # Operation and maintenance port for Oceanbase Database. The default value is 2886. This parameter is valid only when the version of oceanbase-ce is 4.2.2.0 or later. zone: zone1 # if current hardware's memory capacity is smaller than 50G, please use the setting of "mini-single-example.yaml" and do a small adjustment. memory_limit: 64G # The maximum running memory for an observer diff --git a/optimize/obproxy/3.1.0/sysbench.yaml b/optimize/obproxy/3.1.0/sysbench.yaml index fdc76ee..fd81564 100644 --- a/optimize/obproxy/3.1.0/sysbench.yaml +++ b/optimize/obproxy/3.1.0/sysbench.yaml @@ -1,9 +1,9 @@ test: system_config: - name: proxy_mem_limited - value: format_size(min(max(threads * (8 << 10), 2 << 30), 4 << 30), 0) + value: Capacity(min(max(threads * (8 << 10), 2 << 30), 4 << 30), 0) expression: true - condition: "lambda n, o: parse_size(n) > parse_size(o)" + condition: "lambda n, o: Capacity(n).btyes > Capacity(o).btyes" - name: enable_prometheus value: false value_type: BOOL diff --git a/optimize/oceanbase-ce/4.3.0/sysbench.yaml b/optimize/oceanbase-ce/4.3.0/sysbench.yaml new file mode 100644 index 0000000..9e1e80c --- /dev/null +++ b/optimize/oceanbase-ce/4.3.0/sysbench.yaml @@ -0,0 +1,14 @@ +test: + system_config: + - name: enable_sql_audit + value: 'false' + - name: sleep + value: 3 + optimizer: sleep + - name: syslog_level + value: 'ERROR' + - name: enable_perf_event + value: false + value_type: BOOL + - name: enable_record_trace_log + value: 'false' \ No newline at end of file diff --git a/optimize/oceanbase-ce/4.3.0/tpcc.yaml b/optimize/oceanbase-ce/4.3.0/tpcc.yaml new file mode 100644 index 0000000..a0bbcce --- /dev/null +++ b/optimize/oceanbase-ce/4.3.0/tpcc.yaml @@ -0,0 +1,19 @@ +build: + variables: + - name: ob_query_timeout + value: 36000000000 + - name: ob_trx_timeout + value: 36000000000 + system_config: + - name: enable_sql_audit + value: 'false' + - name: sleep + value: 5 + optimizer: sleep + - name: syslog_level + value: 'ERROR' + - name: enable_perf_event + value: false + value_type: BOOL + - name: enable_record_trace_log + value: 'false' \ No newline at end of file diff --git a/optimize/oceanbase-ce/4.3.0/tpch.yaml b/optimize/oceanbase-ce/4.3.0/tpch.yaml new file mode 100644 index 0000000..2578e10 --- /dev/null +++ b/optimize/oceanbase-ce/4.3.0/tpch.yaml @@ -0,0 +1,27 @@ +test: + system_config: + - name: enable_sql_audit + value: false + - name: syslog_level + value: PERF + - name: enable_perf_event + value: false + - name: enable_record_trace_log + value: 'false' + variables: + - name: ob_sql_work_area_percentage + value: 80 + - name: ob_query_timeout + value: 36000000000 + - name: ob_trx_timeout + value: 36000000000 + - name: max_allowed_packet + value: 67108864 + - name: secure_file_priv + value: '/' + - name: parallel_servers_target + value: int(max_cpu * server_num * 8) + expression: true + exec_sql: + - name: clean_cache + optimizer: clean_cache \ No newline at end of file diff --git a/plugins/general/0.1/db_connect.py b/plugins/general/0.1/db_connect.py index 5b5c3b4..6ab7f9a 100644 --- a/plugins/general/0.1/db_connect.py +++ b/plugins/general/0.1/db_connect.py @@ -92,13 +92,13 @@ def connect(): port = server_config.get("mysql_port") else: port = server_config.get("listen_port") - if not obclient_bin: - ret = local_execute_command('%s --help' % obclient_bin) - if not ret: - stdio.error( - '%s\n%s is not an executable file. Please use `--obclient-bin` to set.\nYou may not have obclient installed' % ( - ret.stderr, obclient_bin)) - return + # check the obclien avaliable + ret = local_execute_command('%s --help' % obclient_bin) + if not ret: + stdio.error( + '%s\n%s is not an executable file. Please use `--obclient-bin` to set.\nYou may not have obclient installed' % ( + ret.stderr, obclient_bin)) + return if not password: connected = test_connect() if not connected: diff --git a/plugins/general/0.1/install_repo.py b/plugins/general/0.1/install_repo.py index e81d7e3..ce419c8 100644 --- a/plugins/general/0.1/install_repo.py +++ b/plugins/general/0.1/install_repo.py @@ -26,16 +26,19 @@ from _plugin import InstallPlugin from _deploy import InnerConfigKeywords from tool import YamlLoader +from _rpm import Version def install_repo(plugin_context, obd_home, install_repository, install_plugin, check_repository, check_file_map, - msg_lv, *args, **kwargs): + requirement_map, msg_lv, *args, **kwargs): cluster_config = plugin_context.cluster_config def install_to_home_path(): repo_dir = install_repository.repository_dir.replace(obd_home, remote_obd_home, 1) if is_lib_repo: home_path = os.path.join(remote_home_path, 'lib') + elif is_jre_repo: + home_path = os.path.join(remote_home_path, 'jre') else: home_path = remote_home_path client.add_env("_repo_dir", repo_dir, True) @@ -60,11 +63,13 @@ def install_to_home_path(): else: success = client.execute_command("%(install_cmd)s ${source} ${target}" % {"install_cmd": install_cmd}) and success return success + stdio = plugin_context.stdio clients = plugin_context.clients servers = cluster_config.servers is_lib_repo = install_repository.name.endswith("-libs") is_utils_repo = install_repository.name.endswith("-utils") + is_jre_repo = install_repository.name.endswith("-jre") home_path_map = {} for server in servers: server_config = cluster_config.get_server_conf(server) @@ -154,10 +159,25 @@ def check_lib(): if not libs and not ret: stdio.error('Failed to execute repository lib check.') return - need_libs.update(libs) + if requirement_map and libs and file_item.require in requirement_map: + need_libs.add(requirement_map[file_item.require]) + elif file_item.type == InstallPlugin.FileItemType.JAR: + client.add_env('PATH', '%s/jre/bin:' % remote_home_path) + ret = client.execute_command('java -version') + if not ret: + need_libs.add(requirement_map[file_item.require]) + else: + pattern = r'version\s+\"(\d+\.\d+\.\d+_?\d*)' + match = re.search(pattern, ret.stderr) + if not match: + need_libs.add(requirement_map[file_item.require]) + else: + if Version(match.group(1)) < Version(requirement_map[file_item.require].min_version) or \ + Version(match.group(1)) > Version(requirement_map[file_item.require].max_version): + need_libs.add(requirement_map[file_item.require]) if need_libs: for lib in need_libs: - getattr(stdio, msg_lv, '%s %s require: %s' % (server, check_repository, lib)) + getattr(stdio, msg_lv, '%s %s require: %s' % (server, check_repository, lib.name)) lib_check = False client.add_env('LD_LIBRARY_PATH', '', True) @@ -165,7 +185,7 @@ def check_lib(): stdio.stop_loading('succeed' if lib_check else 'fail') elif msg_lv == 'warn': stdio.stop_loading('succeed' if lib_check else 'warn') - return plugin_context.return_true(checked=lib_check) + return plugin_context.return_true(checked=lib_check, requirements=need_libs) # check utils def check_utils(): diff --git a/plugins/general/0.1/telemetry_info_collect.py b/plugins/general/0.1/telemetry_info_collect.py index 1978978..53ae805 100644 --- a/plugins/general/0.1/telemetry_info_collect.py +++ b/plugins/general/0.1/telemetry_info_collect.py @@ -29,7 +29,7 @@ from tool import NetUtil, COMMAND_ENV from const import VERSION, REVISION, TELEMETRY_COMPONENT -from _environ import ENV_TELEMETRY_REPORTER +from _environ import ENV_TELEMETRY_REPORTER, ENV_OBD_ID shell_command_map = { "host_type": 'systemd-detect-virt', @@ -62,6 +62,11 @@ def wrapper(*args, **kwargs): class BaseInfo: + + @staticmethod + def uuid(): + return COMMAND_ENV.get(ENV_OBD_ID, None) + @staticmethod def reporter(): return COMMAND_ENV.get(ENV_TELEMETRY_REPORTER, TELEMETRY_COMPONENT) @@ -202,6 +207,8 @@ def init_telemetry_data(opt_data): def telemetry_base_data(): data = {} + if BaseInfo.uuid(): + data['obdID'] = BaseInfo.uuid() data['reporter'] = BaseInfo.reporter() data['reportTime'] = BaseInfo.report_time() data['eventId'] = BaseInfo.event_id() diff --git a/plugins/libobclient/2.0.1/file_map.yaml b/plugins/libobclient/2.0.1/file_map.yaml new file mode 100644 index 0000000..309f9b9 --- /dev/null +++ b/plugins/libobclient/2.0.1/file_map.yaml @@ -0,0 +1,9 @@ +- src_path: ./u01/obclient/bin + target_path: bin + type: dir +- src_path: ./u01/obclient/include + target_path: include + type: dir +- src_path: ./u01/obclient/lib + target_path: lib + type: dir diff --git a/plugins/mysqltest/3.1.0/init.py b/plugins/mysqltest/3.1.0/init.py index 6ddfe8e..2ba45e1 100644 --- a/plugins/mysqltest/3.1.0/init.py +++ b/plugins/mysqltest/3.1.0/init.py @@ -28,24 +28,14 @@ from ssh import LocalClient from tool import FileUtil from _errno import EC_MYSQLTEST_FAILE_NOT_FOUND, EC_MYSQLTEST_PARSE_CMD_FAILED - - -def parse_size(size): - _bytes = 0 - if not isinstance(size, str) or size.isdigit(): - _bytes = int(size) - else: - units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} - match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) - _bytes = int(match.group(1)) * units[match.group(2)] - return _bytes +from _types import Capacity def get_memory_limit(cursor, client): try: memory_limit = cursor.fetchone('show parameters where name = \'memory_limit\'') if memory_limit and 'value' in memory_limit and memory_limit['value']: - return parse_size(memory_limit['value']) + return Capacity(memory_limit['value']).btyes ret = client.execute_command('free -b') if ret: ret = client.execute_command("cat /proc/meminfo | grep 'MemTotal:' | awk -F' ' '{print $2}'") @@ -127,7 +117,7 @@ def exec_sql(cmd): exec_init_user_for_oracle = 'init_user_oracle.sql|SYS@oracle|SYS' client = plugin_context.clients[server] memory_limit = get_memory_limit(cursor, client) - is_mini = memory_limit and parse_size(memory_limit) < (16<<30) + is_mini = memory_limit and Capacity(memory_limit).btyes < (16<<30) if env['is_business']: init_sql = [exec_mini_init if is_mini else exec_init, exec_init_user_for_oracle, exec_init_user] else: diff --git a/plugins/mysqltest/4.0.0.0/init.py b/plugins/mysqltest/4.0.0.0/init.py index 2b77011..0c6034e 100644 --- a/plugins/mysqltest/4.0.0.0/init.py +++ b/plugins/mysqltest/4.0.0.0/init.py @@ -30,17 +30,6 @@ from _errno import EC_MYSQLTEST_FAILE_NOT_FOUND, EC_MYSQLTEST_PARSE_CMD_FAILED -def parse_size(size): - _bytes = 0 - if not isinstance(size, str) or size.isdigit(): - _bytes = int(size) - else: - units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} - match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) - _bytes = int(match.group(1)) * units[match.group(2)] - return _bytes - - def init(plugin_context, env, *args, **kwargs): def get_root_server(cursor): while True: diff --git a/plugins/obagent/0.1/generate_config.py b/plugins/obagent/0.1/generate_config.py index efa9d1a..777fafb 100644 --- a/plugins/obagent/0.1/generate_config.py +++ b/plugins/obagent/0.1/generate_config.py @@ -81,4 +81,4 @@ def generate_random_password(cluster_config): add_components = cluster_config.get_deploy_added_components() global_config = cluster_config.get_original_global_conf() if cluster_config.name in add_components and 'http_basic_auth_password' not in global_config: - cluster_config.update_global_conf('http_basic_auth_password', ConfigUtil.get_random_pwd_by_total_length(), save=False) + cluster_config.update_global_conf('http_basic_auth_password', ConfigUtil.get_random_pwd_by_rule(lowercase_length=3, uppercase_length=3, digits_length=3, punctuation_length=0), save=False) diff --git a/plugins/obagent/1.3.0/start_check.py b/plugins/obagent/1.3.0/start_check.py index a2d0395..37fb9b2 100644 --- a/plugins/obagent/1.3.0/start_check.py +++ b/plugins/obagent/1.3.0/start_check.py @@ -21,6 +21,7 @@ from __future__ import absolute_import, division, print_function import os +import re from copy import deepcopy import _errno as err @@ -93,6 +94,12 @@ def prepare_parameters(cluster_config): return env +def password_check(password): + if not re.match(r'^[\w~^*{}\[\]_\-+]+$', password): + return False + return True + + def start_check(plugin_context, init_check_status=False, strict_check=False, work_dir_check=False, work_dir_empty_check=True, precheck=False, *args, **kwargs): def check_fail(item, error, suggests=[]): status = check_status[server][item] @@ -145,6 +152,7 @@ def wait_2_pass(): check_status[server] = { 'port': err.CheckStatus(), 'parameter': err.CheckStatus(), + 'password': err.CheckStatus() } if work_dir_check: check_status[server]['dir'] = err.CheckStatus() @@ -168,6 +176,12 @@ def wait_2_pass(): continue check_pass('parameter') + # http_basic_auth_password check + http_basic_auth_password = server_config.get('http_basic_auth_password') + if http_basic_auth_password: + if not password_check(http_basic_auth_password): + critical('password', err.EC_COMPONENT_PASSWD_ERROR.format(ip=server.ip, component='obagent', key='http_basic_auth_password', rule='^[\w~^*{}\[\]_\-+]+$'), suggests=[err.SUG_OBAGENT_EDIT_HTTP_BASIC_AUTH_PASSWORD.format()]) + if work_dir_check: stdio.verbose('%s dir check' % server) if ip not in servers_dirs: diff --git a/plugins/obclient/2.0.1/check_requirement.py b/plugins/obclient/2.0.1/check_requirement.py new file mode 100644 index 0000000..9ebe6bb --- /dev/null +++ b/plugins/obclient/2.0.1/check_requirement.py @@ -0,0 +1,33 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function + +import os +from _plugin import InstallPlugin + + +def check_requirement(plugin_context, file_map=None, requirement_map=None, *args, **kwargs): + lib_check = False + need_libs = set() + for file_item in file_map.values(): + if file_item.type == InstallPlugin.FileItemType.BIN: + need_libs.add(requirement_map[file_item.require]) + return plugin_context.return_true(checked=lib_check, requirements=need_libs) diff --git a/plugins/obclient/2.0.1/file_map.yaml b/plugins/obclient/2.0.1/file_map.yaml new file mode 100644 index 0000000..0aeac4a --- /dev/null +++ b/plugins/obclient/2.0.1/file_map.yaml @@ -0,0 +1,8 @@ +- src_path: ./u01/obclient/bin/obclient + target_path: bin/obclient + type: bin + mode: 755 + require: libobclient +- src_path: ./u01/obclient/bin + target_path: bin + type: dir diff --git a/plugins/obclient/2.0.1/requirement.yaml b/plugins/obclient/2.0.1/requirement.yaml new file mode 100644 index 0000000..262f4ea --- /dev/null +++ b/plugins/obclient/2.0.1/requirement.yaml @@ -0,0 +1,2 @@ +libobclient: + version: $version diff --git a/plugins/oblogproxy/2.0.0/start.py b/plugins/oblogproxy/2.0.0/start.py index e2715cc..bf5bc8c 100644 --- a/plugins/oblogproxy/2.0.0/start.py +++ b/plugins/oblogproxy/2.0.0/start.py @@ -99,9 +99,11 @@ def prepare_conf(repositories, cluster_config, clients, stdio): config['ob_sys_password'] = client.execute_command("{}/bin/logproxy -x {}".format(home_path, ob_sys_password)).stdout.strip() if ob_sys_password else "" config['binlog_log_bin_basename'] = custom_config.get('binlog_dir') if custom_config.get('binlog_dir') else '%s/run' % home_path if not custom_config.get('binlog_obcdc_ce_path_template'): - config['binlog_obcdc_ce_path_template'] = '{}/obcdc/obcdc-ce-%d.x-access/libobcdc.so'.format(home_path) + source_binlog_path = config['binlog_obcdc_ce_path_template'] + config['binlog_obcdc_ce_path_template'] = os.path.join(home_path, source_binlog_path[source_binlog_path.find('/obcdc/') + 1:]) if not custom_config.get('oblogreader_obcdc_ce_path_template'): - config['oblogreader_obcdc_ce_path_template'] = '{}/obcdc/obcdc-ce-%d.x-access/libobcdc.so'.format(home_path) + source_oblogreader_path = config['oblogreader_obcdc_ce_path_template'] + config['oblogreader_obcdc_ce_path_template'] = os.path.join(home_path, source_oblogreader_path[source_oblogreader_path.find('/obcdc/') + 1:]) if not custom_config.get('bin_path'): config['bin_path'] = '{}/bin'.format(home_path) if not custom_config.get('oblogreader_path'): diff --git a/plugins/obproxy/3.1.0/display.py b/plugins/obproxy/3.1.0/display.py index 2531027..b79cb73 100644 --- a/plugins/obproxy/3.1.0/display.py +++ b/plugins/obproxy/3.1.0/display.py @@ -69,7 +69,7 @@ def display(plugin_context, cursor, *args, **kwargs): cmd = 'obclient -h%s -P%s -u%s %s-Doceanbase -A \n' % (server.ip, server_config['listen_port'], user, '-p%s ' % passwd_format(password) if password else '') break - if not with_observer: + if (with_observer and server_config.get('obproxy_sys_password', '')) or not with_observer: user = 'root@proxysys' password = server_config.get('obproxy_sys_password', '') info_dict['user'] = user diff --git a/plugins/obproxy/3.1.0/generate_config.py b/plugins/obproxy/3.1.0/generate_config.py index d0aaf26..79ec04b 100644 --- a/plugins/obproxy/3.1.0/generate_config.py +++ b/plugins/obproxy/3.1.0/generate_config.py @@ -20,7 +20,7 @@ from __future__ import absolute_import, division, print_function -import hashlib +from collections import defaultdict from tool import ConfigUtil @@ -63,6 +63,14 @@ def generate_config(plugin_context, generate_config_mini=False, auto_depend=Fals generate_configs['global']['proxy_mem_limited'] = '500M' cluster_config.update_global_conf('proxy_mem_limited', '500M', False) + # write required memory into resource namespace + resource = plugin_context.namespace.get_variable("required_resource") + if resource is None: + resource = defaultdict(lambda: defaultdict(dict)) + plugin_context.namespace.set_variable("required_resource", resource) + for server in cluster_config.servers: + resource[cluster_config.name]['memory'][server.ip] = cluster_config.get_global_conf_with_default()['proxy_mem_limited'] + if auto_depend: for depend in ['oceanbase', 'oceanbase-ce']: if cluster_config.add_depend_component(depend): @@ -77,4 +85,4 @@ def generate_random_password(cluster_config): add_components = cluster_config.get_deploy_added_components() global_config = cluster_config.get_original_global_conf() if cluster_config.name in add_components and 'obproxy_sys_password' not in global_config: - cluster_config.update_global_conf('obproxy_sys_password', ConfigUtil.get_random_pwd_by_total_length(), False) \ No newline at end of file + cluster_config.update_global_conf('obproxy_sys_password', ConfigUtil.get_random_pwd_by_total_length(), False) diff --git a/plugins/obproxy/3.1.0/parameter.yaml b/plugins/obproxy/3.1.0/parameter.yaml index cd314b5..212ef91 100644 --- a/plugins/obproxy/3.1.0/parameter.yaml +++ b/plugins/obproxy/3.1.0/parameter.yaml @@ -443,7 +443,7 @@ type: STRING default: '' need_restart: false - description_en: password pf obproxy sys user + description_en: password of obproxy sys user - name: observer_sys_password type: STRING default: '' diff --git a/plugins/obproxy/3.1.0/restart.py b/plugins/obproxy/3.1.0/restart.py index ad82208..78d9209 100644 --- a/plugins/obproxy/3.1.0/restart.py +++ b/plugins/obproxy/3.1.0/restart.py @@ -121,7 +121,7 @@ def restart(self): cluster_config = self.new_cluster_config if self.new_cluster_config else self.cluster_config need_bootstrap = self.bootstrap_plugin is not None - if not self.call_plugin(self.start_plugin, clients=clients, cluster_config=cluster_config, local_home_path=self.local_home_path, repository=self.repository, need_bootstrap=need_bootstrap): + if not self.call_plugin(self.start_plugin, clients=clients, cluster_config=cluster_config, local_home_path=self.local_home_path, repository=self.repository): self.rollback() self.stdio.stop_loading('stop_loading', 'fail') return False diff --git a/plugins/obproxy/4.2.3/generate_config.py b/plugins/obproxy/4.2.3/generate_config.py new file mode 100644 index 0000000..f931cfd --- /dev/null +++ b/plugins/obproxy/4.2.3/generate_config.py @@ -0,0 +1,93 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function + +import hashlib +import random + +from tool import ConfigUtil + + +def generate_config(plugin_context, generate_config_mini=False, auto_depend=False, return_generate_keys=False, only_generate_password=False, generate_password=True, *args, **kwargs): + if return_generate_keys: + generate_keys = [] + if generate_password: + generate_keys += ['obproxy_sys_password'] + if not only_generate_password: + generate_keys += ['skip_proxy_sys_private_check', 'enable_strict_kernel_release', 'enable_cluster_checkout', 'proxy_mem_limited'] + return plugin_context.return_true(generate_keys=generate_keys) + + cluster_config = plugin_context.cluster_config + random_num = random.randint(1, 8191 - len(cluster_config.servers)) + num = 0 + for server in cluster_config.servers: + server_config = cluster_config.get_server_conf(server) + client_session_id_version = server_config.get('client_session_id_version', 2) + + if client_session_id_version == 2: + if server_config.get('proxy_id', None) is None: + cluster_config.update_server_conf(server, 'proxy_id', random_num + num, False) + cluster_config.update_server_conf(server, 'client_session_id_version', client_session_id_version, False) + num += 1 + + if generate_password: + generate_random_password(cluster_config) + if only_generate_password: + return plugin_context.return_true() + + stdio = plugin_context.stdio + generate_configs = {'global': {}} + plugin_context.set_variable('generate_configs', generate_configs) + stdio.start_loading('Generate obproxy configuration') + + global_config = cluster_config.get_original_global_conf() + if 'skip_proxy_sys_private_check' not in global_config: + generate_configs['global']['skip_proxy_sys_private_check'] = True + cluster_config.update_global_conf('skip_proxy_sys_private_check', True, False) + + if 'enable_strict_kernel_release' not in global_config: + generate_configs['global']['enable_strict_kernel_release'] = False + cluster_config.update_global_conf('enable_strict_kernel_release', False, False) + + if 'enable_cluster_checkout' not in global_config: + generate_configs['global']['enable_cluster_checkout'] = False + cluster_config.update_global_conf('enable_cluster_checkout', False, False) + + if generate_config_mini: + if 'proxy_mem_limited' not in global_config: + generate_configs['global']['proxy_mem_limited'] = '500M' + cluster_config.update_global_conf('proxy_mem_limited', '500M', False) + + if auto_depend: + for depend in ['oceanbase', 'oceanbase-ce']: + if cluster_config.add_depend_component(depend): + stdio.stop_loading('succeed') + return plugin_context.return_true() + + stdio.stop_loading('succeed') + return plugin_context.return_true() + + +def generate_random_password(cluster_config): + add_components = cluster_config.get_deploy_added_components() + global_config = cluster_config.get_original_global_conf() + if cluster_config.name in add_components and 'obproxy_sys_password' not in global_config: + cluster_config.update_global_conf('obproxy_sys_password', ConfigUtil.get_random_pwd_by_total_length(), False) \ No newline at end of file diff --git a/plugins/obproxy/4.2.3/obproxyd.sh b/plugins/obproxy/4.2.3/obproxyd.sh new file mode 100644 index 0000000..bd9b126 --- /dev/null +++ b/plugins/obproxy/4.2.3/obproxyd.sh @@ -0,0 +1,45 @@ + +path=$1 +ip=$2 +port=$3 + +function start() { + obproxyd_path=$path/run/obproxyd-$ip-$port.pid + obproxy_path=$path/run/obproxy-$ip-$port.pid + + cat $obproxyd_path | xargs kill -9 + + echo $$ > $obproxyd_path + if [ $? != 0 ]; then + exit $? + fi + + pid=`cat $obproxy_path` + ls /proc/$pid > /dev/null + if [ $? != 0 ]; then + exit $? + fi + kill -9 $pid + + while [ 1 ]; + do + sleep 1 + ls /proc/$pid > /dev/null + if [ $? != 0 ]; then + cd $path + $path/bin/obproxy --listen_port $port + pid=`ps -aux | egrep "$path/bin/obproxy --listen_port $port$" | grep -v grep | awk '{print $2}'` + echo $pid > $obproxy_path + if [ $? != 0 ]; then + exit $? + fi + fi + done +} + +if [ "$4" == "daemon" ] +then + start +else + nohup bash $0 $path $ip $port daemon > /dev/null 2>&1 & +fi \ No newline at end of file diff --git a/plugins/obproxy/4.2.3/parameter.yaml b/plugins/obproxy/4.2.3/parameter.yaml new file mode 100644 index 0000000..0eff7d6 --- /dev/null +++ b/plugins/obproxy/4.2.3/parameter.yaml @@ -0,0 +1,463 @@ +- name: home_path + name_local: 工作目录 + require: true + essential: true + type: PATH + need_redeploy: true + description_en: the directory for the work data file + description_local: ObProxy工作目录 +- name: listen_port + name_local: 服务端口 + require: true + essential: true + type: INT + default: 2883 + min_value: 1025 + max_value: 65535 + need_restart: true + description_en: port number for mysql connection + description_local: SQL服务协议端口号 +- name: prometheus_listen_port + name_local: Exporter 端口 + require: true + essential: true + type: INT + default: 2884 + min_value: 1025 + max_value: 65535 + need_restart: true + description_en: obproxy prometheus listen port + description_local: 提供prometheus服务端口号 +- name: appname + require: false + type: SAFE_STRING + need_restart: true + description_en: application name + description_local: 应用名 +- name: cluster_name + require: false + type: SAFE_STRING + need_restart: true + description_en: observer cluster name + description_local: 代理的observer集群名 +- name: rs_list + type: ARRAY + need_restart: true + description_en: root server list(format ip:sql_port) + description_local: observer列表(格式 ip:sql_port) +- name: proxy_mem_limited + name_local: 最大运行内存 + essential: true + type: CAPACITY + default: 2G + min_value: 100MB + max_value: 100GB + description_en: The upper limit of ODP runtime memory. If the ODP exceeds the upper limit, it will exit automatically. Please enter an capacity, such as 2G + description_local: ODP 运行时内存上限。超过上限 ODP 即自动退出。请输入带容量带单位的整数,如2G +- name: refresh_json_config + type: BOOL + default: false + min_value: false + max_value: true + need_restart: false + description_en: force update json info if refresh_json_config is true +- name: refresh_rslist + type: BOOL + default: false + min_value: false + max_value: true + need_restart: false + description_en: when refresh config server, update all rslist if refresh_rslist is true +- name: refresh_idc_list + type: BOOL + default: false + min_value: false + max_value: true + need_restart: false + description_en: when refresh config server, update all idc list if refresh_idc_list is true +- name: refresh_config + type: BOOL + default: false + min_value: false + max_value: true + need_restart: false + description_en: when table processor do check work, update all proxy config if refresh_config is true +- name: proxy_info_check_interval + type: TIME + default: 60s + min_value: 1s + max_value: 1h + need_restart: false + description_en: proxy info check task interval, [1s, 1h] +- name: cache_cleaner_clean_interval + type: TIME + default: 20s + min_value: 1s + max_value: 1d + need_restart: false + description_en: the interval for cache cleaner to clean cache, [1s, 1d] +- name: server_state_refresh_interval + type: TIME + default: 20s + min_value: 10ms + max_value: 1h + need_restart: false + description_en: the interval to refresh server state for getting zone or server newest state, [10ms, 1h] +- name: metadb_server_state_refresh_interval + type: TIME + default: 60s + min_value: 10ms + max_value: 1h + need_restart: false + description_en: the interval to refresh metadb server state for getting zone or server newest state, [10ms, 1h] +- name: config_server_refresh_interval + type: TIME + default: 60s + min_value: 10s + max_value: 1d + need_restart: false + description_en: config server info refresh task interval, [10s, 1d] +- name: idc_list_refresh_interval + type: TIME + default: 2h + min_value: 10s + max_value: 1d + need_restart: false + description_en: the interval to refresh idc list for getting newest region-idc, [10s, 1d] +- name: stat_table_sync_interval + type: TIME + default: 60s + min_value: 0s + max_value: 1d + need_restart: false + description_en: update sync statistic to ob_all_proxy_stat table interval, [0s, 1d], 0 means disable, if set a negative value, proxy treat it as 0 +- name: stat_dump_interval + type: TIME + default: 6000s + min_value: 0s + max_value: 1d + need_restart: false + description_en: dump statistic in log interval, [0s, 1d], 0 means disable, if set a negative value, proxy treat it as 0 +- name: partition_location_expire_relative_time + type: INT + default: 0 + min_value: -36000000 + max_value: 36000000 + need_restart: false + description_en: the unit is ms, 0 means do not expire, others will expire partition location base on relative time +- name: cluster_count_high_water_mark + type: INT + default: 256 + min_value: 2 + max_value: 102400 + need_restart: false + description_en: if cluster count is greater than this water mark, cluser will be kicked out by LRU +- name: cluster_expire_time + type: TIME + default: 1d + min_value: 0 + max_value: + need_restart: false + description_en: cluster resource expire time, 0 means never expire,cluster will be deleted if it has not been accessed for more than the time,[0, ] +- name: fetch_proxy_bin_random_time + type: TIME + default: 300s + min_value: 1s + max_value: 1h + need_restart: false + description_en: max random waiting time of fetching proxy bin in hot upgrade, [1s, 1h] +- name: fetch_proxy_bin_timeout + type: TIME + default: 120s + min_value: 1s + max_value: 1200s + need_restart: false + description_en: default hot upgrade fetch binary timeout, proxy will stop fetching after such long time, [1s, 1200s] +- name: hot_upgrade_failure_retries + type: INT + default: 5 + min_value: 1 + max_value: 20 + need_restart: false + description_en: default hot upgrade failure retries, proxy will stop handle hot_upgrade command after such retries, [1, 20] +- name: hot_upgrade_rollback_timeout + type: TIME + default: 24h + min_value: 1s + max_value: 30d + need_restart: false + description_en: default hot upgrade rollback timeout, proxy will do rollback if receive no rollback command in such long time, [1s, 30d] +- name: hot_upgrade_graceful_exit_timeout + type: TIME + default: 120s + min_value: 0s + max_value: 30d + need_restart: false + description_en: graceful exit timeout, [0s, 30d], if set a value <= 0, proxy treat it as 0 +- name: delay_exit_time + type: TIME + default: 100ms + min_value: 100ms + max_value: 500ms + need_restart: false + description_en: delay exit time, [100ms,500ms] +- name: log_file_percentage + type: INT + default: 80 + min_value: 0 + max_value: 100 + need_restart: false + description_en: max percentage of avail size occupied by proxy log file, [0, 90], 0 means ignore such limit +- name: log_cleanup_interval + type: TIME + default: 10m + min_value: 5s + max_value: 30d + need_restart: false + description_en: log file clean up task schedule interval, set 1 day or longer, [5s, 30d] +- name: log_dir_size_threshold + type: CAPACITY + default: 64GB + min_value: 256M + max_value: 1T + need_restart: false + description_en: max usable space size of log dir, used to decide whether should clean up log file, [256MB, 1T] +- name: need_convert_vip_to_tname + type: BOOL + default: false + min_value: false + max_value: true + need_restart: false + description_en: convert vip to tenant name, which is useful in cloud +- name: long_async_task_timeout + type: TIME + default: 60s + min_value: 1s + max_value: 1h + need_restart: false + description_en: long async task timeout, [1s, 1h] +- name: short_async_task_timeout + type: TIME + default: 5s + min_value: 1s + max_value: 1h + need_restart: false + description_en: short async task timeout, [1s, 1h] +- name: username_separator + type: SAFE_STRING_LIST + default: :;-;. + min_value: + max_value: + need_restart: false + description_en: username separator +- name: enable_client_connection_lru_disconnect + type: BOOL + default: false + min_value: false + max_value: true + need_restart: false + description_en: if client connections reach throttle, true is that new connection will be accepted, and eliminate lru client connection, false is that new connection will disconnect, and err packet will be returned +- name: max_connections + type: INT + default: 60000 + min_value: 0 + max_value: 65535 + need_restart: false + description_en: max fd proxy could use +- name: client_max_connections + type: INT + default: 8192 + min_value: 0 + max_value: 65535 + need_restart: false + description_en: client max connections for one obproxy, [0, 65535] +- name: observer_query_timeout_delta + type: TIME + default: 20s + min_value: 1s + max_value: 30s + need_restart: false + description_en: the delta value for @@ob_query_timeout, to cover net round trip time(proxy<->server) and task schedule time(server), [1s, 30s] +- name: enable_cluster_checkout + type: BOOL + default: true + min_value: false + max_value: true + need_restart: false + description_en: if enable cluster checkout, proxy will send cluster name when login and server will check it +- name: enable_proxy_scramble + type: BOOL + default: false + min_value: false + max_value: true + need_restart: false + description_en: if enable proxy scramble, proxy will send client its variable scramble num, not support old observer +- name: enable_client_ip_checkout + type: BOOL + default: true + min_value: false + max_value: true + need_restart: false + description_en: if enabled, proxy send client ip when login +- name: connect_observer_max_retries + type: INT + default: 3 + min_value: 2 + max_value: 5 + need_restart: false + description_en: max retries to do connect +- name: frequent_accept + type: BOOL + default: true + min_value: false + max_value: true + need_restart: true + description_en: frequent accept +- name: net_accept_threads + type: INT + default: 2 + min_value: 0 + max_value: 8 + need_restart: true + description_en: net accept threads num, [0, 8] +- name: stack_size + type: CAPACITY + default: 1MB + min_value: 1MB + max_value: 10MB + need_restart: true + description_en: stack size of one thread, [1MB, 10MB] +- name: work_thread_num + type: INT + default: 128 + min_value: 1 + max_value: 128 + need_restart: true + description_en: proxy work thread num or max work thread num when automatic match, [1, 128] +- name: task_thread_num + type: INT + default: 2 + min_value: 1 + max_value: 4 + need_restart: true + description_en: proxy task thread num, [1, 4] +- name: block_thread_num + type: INT + default: 1 + min_value: 1 + max_value: 4 + need_restart: true + description_en: proxy block thread num, [1, 4] +- name: grpc_thread_num + type: INT + default: 8 + min_value: 8 + max_value: 16 + need_restart: true + description_en: proxy grpc thread num, [8, 16] +- name: grpc_client_num + type: INT + default: 9 + min_value: 9 + max_value: 16 + need_restart: true + description_en: proxy grpc client num, [9, 16] +- name: automatic_match_work_thread + type: BOOL + default: true + min_value: false + max_value: true + need_restart: true + description_en: ignore work_thread_num configuration item, use the count of cpu for current proxy work thread num +- name: enable_strict_kernel_release + require: true + type: BOOL + default: false + min_value: false + max_value: true + need_restart: true + description_en: If is true, proxy only support 5u/6u/7u redhat. Otherwise no care kernel release, and proxy maybe unstable +- name: enable_cpu_topology + type: BOOL + default: true + min_value: false + max_value: true + need_restart: true + description_en: enable cpu topology, work threads bind to cpu +- name: local_bound_ip + type: SAFE_STRING + default: 0.0.0.0 + need_restart: true + description_en: local bound ip(any) +- name: obproxy_config_server_url + type: WEB_URL + default: '' + need_restart: true + description_en: url of config info(rs list and so on) +- name: proxy_service_mode + type: SAFE_STRING + default: '' + need_restart: true + description_en: "proxy deploy and service mode: 1.client(default); 2.server" +- name: client_session_id_version + type: INT + default: 2 + max_value: 2 + min_value: 1 + need_reload: true + description_en: This parameter is used to specify whether to use the new logic to generate the client session ID. The parameter type is integer. The value range is [1, 2] and the default value is 2 (use the new logic). +- name: proxy_id + type: INT + default: 0 + max_value: 8191 + min_value: 0 + need_reload: true + description_en: This parameter is used to set the ID for an ODP. The parameter type is integer. The default value is 0 and the value range is [0, 8191]. +- name: app_name + type: SAFE_STRING + default: undefined + need_restart: true + description_en: current application name which proxy works for, need defined, only modified when restart +- name: enable_metadb_used + type: BOOL + default: true + max_value: true + min_value: false + need_restart: true + description_en: use MetaDataBase when proxy run +- name: rootservice_cluster_name + type: SAFE_STRING + default: undefined + need_restart: true + description_en: default cluster name for rootservice_list +- name: prometheus_cost_ms_unit + type: BOOL + default: true + max_value: true + min_value: false + need_restart: true + description_en: update sync metrics to prometheus exposer interval, [1s, 1h], 0 means disable, if set a negative value, proxy treat it as 0 +- name: bt_retry_times + type: INT + default: 3 + min_value: 0 + max_value: 100 + need_restart: true + description_en: beyond trust sdk retry times +- name: obproxy_sys_password + name_local: 密码 + essential: true + type: STRING + default: '' + need_restart: false + description_en: password pf obproxy sys user +- name: observer_sys_password + type: STRING + default: '' + need_restart: false + description_en: password of observer proxyro user +- name: observer_root_password + type: STRING + default: '' + need_restart: false + description_en: password of observer root user diff --git a/plugins/obproxy/4.2.3/reload.py b/plugins/obproxy/4.2.3/reload.py new file mode 100644 index 0000000..a96ac5b --- /dev/null +++ b/plugins/obproxy/4.2.3/reload.py @@ -0,0 +1,130 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function +import _errno as err + + +def reload(plugin_context, new_cluster_config, *args, **kwargs): + stdio = plugin_context.stdio + cluster_config = plugin_context.cluster_config + servers = cluster_config.servers + cursor = plugin_context.get_return('connect').get_return('cursor') + cluster_server = {} + change_conf = {} + global_change_conf = {} + global_ret = True + + proxy_id_limits = { + 1: [1, 255], + 2: [1, 8191], + } + + for server in servers: + server_config = new_cluster_config.get_server_conf(server) + client_session_id_version = server_config.get('client_session_id_version') + proxy_id = server_config.get('proxy_id') + if proxy_id and client_session_id_version == 1: + limit_range = proxy_id_limits.get(client_session_id_version) + if limit_range: + min_limit, max_limit = limit_range + if not (min_limit <= proxy_id <= max_limit): + stdio.error(err.EC_OBPROXY_ID_OVER_LIMIT.format(id=client_session_id_version, limit=str(limit_range))) + return plugin_context.return_false() + + config_map = { + 'observer_sys_password': 'proxyro_password', + 'cluster_name': 'appname', + 'observer_root_password': 'root_password' + } + for comp in ['oceanbase', 'oceanbase-ce']: + if comp in cluster_config.depends: + root_servers = {} + ob_config = cluster_config.get_depend_config(comp) + new_ob_config = new_cluster_config.get_depend_config(comp) + ob_config = {} if ob_config is None else ob_config + new_ob_config = {} if new_ob_config is None else new_ob_config + for key in config_map: + if ob_config.get(key) != new_ob_config.get(key): + global_change_conf[config_map[key]] = new_ob_config.get(key) + + for server in servers: + change_conf[server] = {} + stdio.verbose('get %s old configuration' % (server)) + config = cluster_config.get_server_conf_with_default(server) + stdio.verbose('get %s new configuration' % (server)) + new_config = new_cluster_config.get_server_conf_with_default(server) + stdio.verbose('get %s cluster address' % (server)) + cluster_server[server] = '%s:%s' % (server.ip, config['listen_port']) + stdio.verbose('compare configuration of %s' % (server)) + reload_unused = ['observer_root_password'] + for key in new_config: + if key in reload_unused: + continue + if key not in config or config[key] != new_config[key]: + item = cluster_config.get_temp_conf_item(key) + if item: + if item.need_redeploy or item.need_restart: + stdio.verbose('%s can not be reload' % key) + global_ret = False + continue + try: + item.modify_limit(config.get(key), new_config.get(key)) + except Exception as e: + stdio.verbose('%s: %s' % (server, str(e))) + global_ret = False + continue + change_conf[server][key] = new_config[key] + if key not in global_change_conf: + global_change_conf[key] = 1 + else: + global_change_conf[key] += 1 + + servers_num = len(servers) + stdio.verbose('apply new configuration') + stdio.start_load('Reload obproxy') + success_conf = {} + sql = '' + value = None + + for key in global_change_conf: + success_conf[key] = [] + for server in servers: + if key not in change_conf[server]: + continue + sql = 'alter proxyconfig set %s = %%s' % key + value = change_conf[server][key] if change_conf[server].get(key) is not None else '' + if cursor[server].execute(sql, [value]) is False: + global_ret = False + continue + success_conf[key].append(server) + for key in success_conf: + if global_change_conf[key] == servers_num == len(success_conf): + cluster_config.update_global_conf(key, value, False) + for server in success_conf[key]: + value = change_conf[server][key] + cluster_config.update_server_conf(server,key, value, False) + + if global_ret: + stdio.stop_load('succeed') + return plugin_context.return_true() + else: + stdio.stop_load('fail') + return diff --git a/plugins/obproxy/4.2.3/start.py b/plugins/obproxy/4.2.3/start.py new file mode 100644 index 0000000..f7d223b --- /dev/null +++ b/plugins/obproxy/4.2.3/start.py @@ -0,0 +1,303 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function + +import os +import time +import random +import hashlib +from copy import deepcopy + +import re + +from _errno import EC_CONFLICT_PORT +from tool import NetUtil + +stdio = None + + +def get_port_socket_inode(client, port): + port = hex(port)[2:].zfill(4).upper() + cmd = "bash -c 'cat /proc/net/{tcp*,udp*}' | awk -F' ' '{print $2,$10}' | grep '00000000:%s' | awk -F' ' '{print $2}' | uniq" % port + res = client.execute_command(cmd) + if not res or not res.stdout.strip(): + return False + stdio.verbose(res.stdout) + return res.stdout.strip().split('\n') + + +def confirm_port(client, pid, port): + socket_inodes = get_port_socket_inode(client, port) + if not socket_inodes: + return False + ret = client.execute_command("ls -l /proc/%s/fd/ |grep -E 'socket:\[(%s)\]'" % (pid, '|'.join(socket_inodes))) + if ret and ret.stdout.strip(): + return True + return False + + +def confirm_command(client, pid, command): + command = command.replace(' ', '').strip() + if client.execute_command('bash -c \'cmd=`cat /proc/%s/cmdline`; if [ "$cmd" != "%s" ]; then exit 1; fi\'' % (pid, command)): + return True + return False + + +def confirm_home_path(client, pid, home_path): + if client.execute_command('path=`ls -l /proc/%s | grep cwd | awk -F\'-> \' \'{print $2}\'`; bash -c \'if [ "$path" != "%s" ]; then exit 1; fi\'' % (pid, home_path)): + return True + return False + + +def is_started(client, remote_bin_path, port, home_path, command): + username = client.config.username + ret = client.execute_command('pgrep -u %s -f "^%s"' % (username, remote_bin_path)) + if not ret: + return False + pids = ret.stdout.strip() + if not pids: + return False + pids = pids.split('\n') + for pid in pids: + if confirm_port(client, pid, port): + break + else: + return False + return confirm_home_path(client, pid, home_path) and confirm_command(client, pid, command) + + +def obproxyd(home_path, client, ip, port): + path = os.path.join(os.path.split(__file__)[0], 'obproxyd.sh') + retmoe_path = os.path.join(home_path, 'obproxyd.sh') + if os.path.exists(path): + shell = '''bash %s %s %s %s''' % (retmoe_path, home_path, ip, port) + return client.put_file(path, retmoe_path) and client.execute_command(shell) + return False + + +class EnvVariables(object): + + def __init__(self, environments, client): + self.environments = environments + self.client = client + self.env_done = {} + + def __enter__(self): + for env_key, env_value in self.environments.items(): + self.env_done[env_key] = self.client.get_env(env_key) + self.client.add_env(env_key, env_value, True) + + def __exit__(self, *args, **kwargs): + for env_key, env_value in self.env_done.items(): + if env_value is not None: + self.client.add_env(env_key, env_value, True) + else: + self.client.del_env(env_key) + + +def start(plugin_context, need_bootstrap=False, *args, **kwargs): + global stdio + cluster_config = plugin_context.cluster_config + clients = plugin_context.clients + stdio = plugin_context.stdio + options = plugin_context.options + clusters_cmd = {} + real_cmd = {} + pid_path = {} + obproxy_config_server_url = '' + + for comp in ['oceanbase', 'oceanbase-ce']: + if comp in cluster_config.depends: + root_servers = {} + ob_config = cluster_config.get_depend_config(comp) + if not ob_config: + continue + odp_config = cluster_config.get_global_conf() + for server in cluster_config.get_depend_servers(comp): + config = cluster_config.get_depend_config(comp, server) + zone = config['zone'] + if zone not in root_servers: + root_servers[zone] = '%s:%s' % (server.ip, config['mysql_port']) + depend_rs_list = ';'.join([root_servers[zone] for zone in root_servers]) + cluster_config.update_global_conf('rs_list', depend_rs_list, save=False) + + config_map = { + 'observer_sys_password': 'proxyro_password', + 'cluster_name': 'appname' + } + for key in config_map: + ob_key = config_map[key] + if key not in odp_config and ob_key in ob_config: + cluster_config.update_global_conf(key, ob_config.get(ob_key), save=False) + break + + obc_cluster_config = cluster_config.get_depend_config('ob-configserver') + if obc_cluster_config: + vip_address = obc_cluster_config.get('vip_address') + if vip_address: + obc_ip = vip_address + obc_port = obc_cluster_config.get('vip_port') + else: + server = cluster_config.get_depend_servers('ob-configserver')[0] + client = clients[server] + obc_ip = NetUtil.get_host_ip() if client.is_localhost() else server.ip + obc_port = obc_cluster_config.get('listen_port') + obproxy_config_server_url = "http://{0}:{1}/services?Action=GetObProxyConfig".format(obc_ip, obc_port) + + error = False + for server in cluster_config.servers: + server_config = cluster_config.get_server_conf(server) + if 'rs_list' not in server_config and 'obproxy_config_server_url' not in server_config and not obproxy_config_server_url: + error = True + stdio.error('%s need config "rs_list" or "obproxy_config_server_url"' % server) + if error: + return plugin_context.return_false() + + stdio.start_loading('Start obproxy') + + for server in cluster_config.servers: + client = clients[server] + server_config = cluster_config.get_server_conf(server) + home_path = server_config['home_path'] + if not client.execute_command('ls %s/etc/obproxy_config.bin' % home_path): + need_bootstrap = True + break + + if getattr(options, 'without_parameter', False) and need_bootstrap is False: + use_parameter = False + else: + # Bootstrap is required when starting with parameter, ensure the passwords are correct. + need_bootstrap = True + use_parameter = True + + for server in cluster_config.servers: + client = clients[server] + server_config = cluster_config.get_server_conf(server) + home_path = server_config['home_path'] + if not server_config.get('obproxy_config_server_url') and obproxy_config_server_url: + server_config['obproxy_config_server_url'] = obproxy_config_server_url + + pid_path[server] = "%s/run/obproxy-%s-%s.pid" % (home_path, server.ip, server_config["listen_port"]) + + if use_parameter: + not_opt_str = [ + 'listen_port', + 'prometheus_listen_port', + 'rs_list', + 'cluster_name' + ] + start_unuse = ['home_path', 'observer_sys_password', 'obproxy_sys_password', 'observer_root_password'] + get_value = lambda key: "'%s'" % server_config[key] if isinstance(server_config[key], str) else server_config[key] + opt_str = [] + if server_config.get('obproxy_sys_password'): + obproxy_sys_password = hashlib.sha1(server_config['obproxy_sys_password'].encode("utf-8")).hexdigest() + else: + obproxy_sys_password = '' + if server_config.get('proxy_id'): + opt_str.append("client_session_id_version=%s,proxy_id=%s" % (server_config.get('client_session_id_version', 2), server_config.get('proxy_id'))) + opt_str.append("obproxy_sys_password='%s'" % obproxy_sys_password) + for key in server_config: + if key not in start_unuse and key not in not_opt_str: + value = get_value(key) + opt_str.append('%s=%s' % (key, value)) + cmd = ['-o %s' % ','.join(opt_str)] + for key in not_opt_str: + if key in server_config: + value = get_value(key) + cmd.append('--%s %s' % (key, value)) + else: + cmd = ['--listen_port %s' % server_config.get('listen_port')] + + real_cmd[server] = '%s/bin/obproxy %s' % (home_path, ' '.join(cmd)) + clusters_cmd[server] = 'cd %s; %s' % (home_path, real_cmd[server]) + + for server in clusters_cmd: + environments = deepcopy(cluster_config.get_environments()) + client = clients[server] + server_config = cluster_config.get_server_conf(server) + port = int(server_config["listen_port"]) + prometheus_port = int(server_config["prometheus_listen_port"]) + stdio.verbose('%s port check' % server) + remote_pid = client.execute_command("cat %s" % pid_path[server]).stdout.strip() + cmd = real_cmd[server].replace('\'', '') + if remote_pid: + ret = client.execute_command('ls /proc/%s/' % remote_pid) + if ret: + if confirm_port(client, remote_pid, port): + continue + stdio.stop_loading('fail') + stdio.error(EC_CONFLICT_PORT.format(server=server.ip, port=port)) + return plugin_context.return_false() + + stdio.verbose('starting %s obproxy', server) + if 'LD_LIBRARY_PATH' not in environments: + environments['LD_LIBRARY_PATH'] = '%s/lib:' % server_config['home_path'] + with EnvVariables(environments, client): + ret = client.execute_command(clusters_cmd[server]) + if not ret: + stdio.stop_loading('fail') + stdio.error('failed to start %s obproxy: %s' % (server, ret.stderr)) + return plugin_context.return_false() + client.execute_command('''ps -aux | grep -e '%s$' | grep -v grep | awk '{print $2}' > %s''' % (cmd, pid_path[server])) + stdio.stop_loading('succeed') + + stdio.start_loading('obproxy program health check') + failed = [] + servers = cluster_config.servers + count = 300 + while servers and count: + count -= 1 + tmp_servers = [] + for server in servers: + server_config = cluster_config.get_server_conf(server) + client = clients[server] + stdio.verbose('%s program health check' % server) + remote_pid = client.execute_command("cat %s" % pid_path[server]).stdout.strip() + if remote_pid: + for pid in re.findall('\d+',remote_pid): + confirm = confirm_port(client, pid, int(server_config["listen_port"])) + if confirm: + proxyd_Pid_path = os.path.join(server_config["home_path"], 'run/obproxyd-%s-%d.pid' % (server.ip, server_config["listen_port"])) + if client.execute_command("pid=`cat %s` && ls /proc/$pid" % proxyd_Pid_path): + stdio.verbose('%s obproxy[pid: %s] started', server, pid) + else: + client.execute_command('echo %s > %s' % (pid, pid_path[server])) + obproxyd(server_config["home_path"], client, server.ip, server_config["listen_port"]) + tmp_servers.append(server) + break + stdio.verbose('failed to start %s obproxy, remaining retries: %d' % (server, count)) + if count: + tmp_servers.append(server) + else: + failed.append('failed to start %s obproxy' % server) + else: + failed.append('failed to start %s obproxy' % server) + servers = tmp_servers + if servers and count: + time.sleep(1) + if failed: + stdio.stop_loading('fail') + for msg in failed: + stdio.warn(msg) + plugin_context.return_false() + else: + stdio.stop_loading('succeed') + plugin_context.return_true(need_bootstrap=need_bootstrap) diff --git a/plugins/obproxy/4.2.3/start_check.py b/plugins/obproxy/4.2.3/start_check.py new file mode 100644 index 0000000..4e99782 --- /dev/null +++ b/plugins/obproxy/4.2.3/start_check.py @@ -0,0 +1,216 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function + +import os +import _errno as err + + +stdio = None +success = True + + +def get_port_socket_inode(client, port): + port = hex(port)[2:].zfill(4).upper() + cmd = "bash -c 'cat /proc/net/{tcp*,udp*}' | awk -F' ' '{if($4==\"0A\") print $2,$4,$10}' | grep ':%s' | awk -F' ' '{print $3}' | uniq" % port + res = client.execute_command(cmd) + if not res or not res.stdout.strip(): + return False + stdio.verbose(res.stdout) + return res.stdout.strip().split('\n') + + +def start_check(plugin_context, init_check_status=False, strict_check=False, work_dir_check=False, work_dir_empty_check=True, precheck=False, *args, **kwargs): + def check_pass(item): + status = check_status[server] + if status[item].status == err.CheckStatus.WAIT: + status[item].status = err.CheckStatus.PASS + def check_fail(item, error, suggests=[]): + status = check_status[server][item] + if status.status == err.CheckStatus.WAIT: + status.error = error + status.suggests = suggests + status.status = err.CheckStatus.FAIL + def wait_2_pass(): + status = check_status[server] + for item in status: + check_pass(item) + def alert(item, error, suggests=[]): + global success + if strict_check: + success = False + check_fail(item, error, suggests) + stdio.error(error) + else: + stdio.warn(error) + def critical(item, error, suggests=[]): + global success + success = False + check_fail(item, error, suggests) + stdio.error(error) + + global stdio, success + success = True + cluster_config = plugin_context.cluster_config + clients = plugin_context.clients + stdio = plugin_context.stdio + servers_port = {} + check_status = {} + servers_dirs = {} + servers_check_dirs = {} + + plugin_context.set_variable('start_check_status', check_status) + for server in cluster_config.servers: + check_status[server] = { + 'port': err.CheckStatus(), + } + if work_dir_check: + check_status[server]['dir'] = err.CheckStatus() + + for comp in ["oceanbase", "oceanbase-ce"]: + if comp in cluster_config.depends: + check_status[server]['password'] = err.CheckStatus() + check_status[server]['proxy_id'] = err.CheckStatus() + + if init_check_status: + return plugin_context.return_true(start_check_status=check_status) + + stdio.start_loading('Check before start obproxy') + + global_config = cluster_config.get_original_global_conf() + key = 'observer_sys_password' + for comp in ["oceanbase", "oceanbase-ce"]: + if comp in cluster_config.depends: + if key in global_config: + alert('password', + err.WC_PARAM_USELESS.format(key=key, current_comp=cluster_config.name, comp=comp), + [err.SUG_OB_SYS_PASSWORD.format()] + ) + break + + for server in cluster_config.servers: + ip = server.ip + client = clients[server] + server_config = cluster_config.get_server_conf(server) + port = int(server_config["listen_port"]) + if not precheck: + remote_pid_path = "%s/run/obproxy-%s-%s.pid" % (server_config['home_path'], server.ip, server_config["listen_port"]) + remote_pid = client.execute_command("cat %s" % remote_pid_path).stdout.strip() + if remote_pid: + if client.execute_command('ls /proc/%s/fd' % remote_pid): + stdio.verbose('%s is runnning, skip' % server) + wait_2_pass() + continue + + if work_dir_check: + stdio.verbose('%s dir check' % server) + if ip not in servers_dirs: + servers_dirs[ip] = {} + servers_check_dirs[ip] = {} + dirs = servers_dirs[ip] + check_dirs = servers_check_dirs[ip] + key = 'home_path' + path = server_config.get(key) + suggests = [err.SUG_CONFIG_CONFLICT_DIR.format(key=key, server=server)] + if path in dirs and dirs[path]: + critical('dir', err.EC_CONFIG_CONFLICT_DIR.format(server1=server, path=path, server2=dirs[path]['server'], key=dirs[path]['key']), suggests) + dirs[path] = { + 'server': server, + 'key': key, + } + empty_check = work_dir_empty_check + while True: + if path in check_dirs: + if check_dirs[path] != True: + critical('dir', check_dirs[path], suggests) + break + + if client.execute_command('bash -c "[ -a %s ]"' % path): + is_dir = client.execute_command('[ -d {} ]'.format(path)) + has_write_permission = client.execute_command('[ -w {} ]'.format(path)) + if is_dir and has_write_permission: + if empty_check: + ret = client.execute_command('ls %s' % path) + if not ret or ret.stdout.strip(): + check_dirs[path] = err.EC_FAIL_TO_INIT_PATH.format(server=server, key=key, msg=err.InitDirFailedErrorMessage.NOT_EMPTY.format(path=path)) + else: + check_dirs[path] = True + else: + check_dirs[path] = True + else: + if not is_dir: + check_dirs[path] = err.EC_FAIL_TO_INIT_PATH.format(server=server, key=key, msg=err.InitDirFailedErrorMessage.NOT_DIR.format(path=path)) + else: + check_dirs[path] = err.EC_FAIL_TO_INIT_PATH.format(server=server, key=key, msg=err.InitDirFailedErrorMessage.PERMISSION_DENIED.format(path=path)) + else: + path = os.path.dirname(path) + empty_check = False + + if ip not in servers_port: + servers_port[ip] = {} + ports = servers_port[ip] + server_config = cluster_config.get_server_conf_with_default(server) + stdio.verbose('%s port check' % server) + for key in ['listen_port', 'prometheus_listen_port']: + port = int(server_config[key]) + alert_f = alert if key == 'prometheus_listen_port' else critical + if port in ports: + alert_f( + 'port', + err.EC_CONFIG_CONFLICT_PORT.format(server1=server, port=port, server2=ports[port]['server'], key=ports[port]['key']), + [err.SUG_PORT_CONFLICTS.format()] + ) + continue + ports[port] = { + 'server': server, + 'key': key + } + if get_port_socket_inode(client, port): + alert_f( + 'port', + err.EC_CONFLICT_PORT.format(server=ip, port=port), + [err.SUG_USE_OTHER_PORT.format()] + ) + + new_cluster_config = kwargs.get('new_cluster_config', None) + if new_cluster_config: + server_config = new_cluster_config.get_server_conf_with_default(server) + client_session_id_version = server_config.get('client_session_id_version') + proxy_id = server_config.get('proxy_id') + proxy_id_limits = { + 1: [1, 255], + 2: [1, 8191], + } + if proxy_id: + limit_range = proxy_id_limits.get(client_session_id_version) + if limit_range: + min_limit, max_limit = limit_range + if not (min_limit <= proxy_id <= max_limit): + critical('proxy_id', err.EC_OBPROXY_ID_OVER_LIMIT.format(id=client_session_id_version, limit=str(limit_range))) + + for server in cluster_config.servers: + wait_2_pass() + + if success: + stdio.stop_loading('succeed') + plugin_context.return_true() + else: + stdio.stop_loading('fail') \ No newline at end of file diff --git a/plugins/obproxy/4.2.3/upgrade.py b/plugins/obproxy/4.2.3/upgrade.py new file mode 100644 index 0000000..f78f097 --- /dev/null +++ b/plugins/obproxy/4.2.3/upgrade.py @@ -0,0 +1,84 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function + +import random + + +def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, install_repository_to_servers, *args, **kwargs): + namespace = plugin_context.namespace + namespaces = plugin_context.namespaces + deploy_name = plugin_context.deploy_name + deploy_status = plugin_context.deploy_status + repositories = plugin_context.repositories + plugin_name = plugin_context.plugin_name + + components = plugin_context.components + clients = plugin_context.clients + cluster_config = plugin_context.cluster_config + cmds = plugin_context.cmds + options = plugin_context.options + dev_mode = plugin_context.dev_mode + stdio = plugin_context.stdio + + upgrade_ctx = kwargs.get('upgrade_ctx') + local_home_path = kwargs.get('local_home_path') + upgrade_repositories = kwargs.get('upgrade_repositories') + + cur_repository = upgrade_repositories[0] + dest_repository = upgrade_repositories[-1] + repository_dir = dest_repository.repository_dir + kwargs['repository_dir'] = repository_dir + + stop_plugin = search_py_script_plugin([cur_repository], 'stop')[cur_repository] + start_plugin = search_py_script_plugin([dest_repository], 'start')[dest_repository] + connect_plugin = search_py_script_plugin([dest_repository], 'connect')[dest_repository] + display_plugin = search_py_script_plugin([dest_repository], 'display')[dest_repository] + bootstrap_plugin = search_py_script_plugin([dest_repository], 'bootstrap')[dest_repository] + + + apply_param_plugin(cur_repository) + if not stop_plugin(namespace, namespaces, deploy_name, deploy_status, repositories, components, clients, cluster_config, cmds, options, stdio, *args, **kwargs): + return + install_repository_to_servers(cluster_config.name, cluster_config, dest_repository, clients) + + random_num = random.randint(1, 8191 - len(cluster_config.servers)) + num = 0 + for server in cluster_config.servers: + server_config = cluster_config.get_server_conf(server) + server_config_default = cluster_config.get_server_conf_with_default(server) + client_session_id_version = server_config.get('client_session_id_version', None) + + if client_session_id_version in [None, 2]: + cluster_config.update_server_conf('client_session_id_version', 2, False) + if server_config.get('proxy_id', None) is None: + cluster_config.update_server_conf(server, 'proxy_id', random_num + num, False) + num += 1 + + apply_param_plugin(dest_repository) + if not start_plugin(namespace, namespaces, deploy_name,deploy_status, repositories, components, clients, cluster_config, cmds, options, stdio, need_bootstrap=True, *args, **kwargs): + return + + ret = connect_plugin(namespace, namespaces, deploy_name,deploy_status, repositories, components, clients, cluster_config, cmds, options, stdio, *args, **kwargs) + if ret: + if bootstrap_plugin(namespace, namespaces, deploy_name, deploy_status, repositories, components, clients, cluster_config, cmds, options, stdio, ret.get_return('cursor'), *args, **kwargs) and display_plugin(namespace, namespaces, deploy_name, deploy_status, repositories, components, clients, cluster_config, cmds, options, stdio, ret.get_return('cursor'), *args, **kwargs): + upgrade_ctx['index'] = len(upgrade_repositories) + return plugin_context.return_true() diff --git a/plugins/oceanbase-diagnostic-tool/1.0/file_map.yaml b/plugins/oceanbase-diagnostic-tool/1.0/file_map.yaml index 5fc962f..5021b3b 100644 --- a/plugins/oceanbase-diagnostic-tool/1.0/file_map.yaml +++ b/plugins/oceanbase-diagnostic-tool/1.0/file_map.yaml @@ -1,3 +1,3 @@ -- src_path: ./usr/local/ +- src_path: ./usr/local/oceanbase-diagnostic-tool target_path: '' type: dir \ No newline at end of file diff --git a/plugins/oceanbase-diagnostic-tool/1.0/gather_plan_monitor.py b/plugins/oceanbase-diagnostic-tool/1.0/gather_plan_monitor.py index 86e5237..4a426d4 100644 --- a/plugins/oceanbase-diagnostic-tool/1.0/gather_plan_monitor.py +++ b/plugins/oceanbase-diagnostic-tool/1.0/gather_plan_monitor.py @@ -44,6 +44,8 @@ def get_obdiag_cmd(): ) if store_dir_option: cmd = cmd + r" --store_dir {store_dir_option}".format(store_dir_option=store_dir_option) + if env_option: + cmd = cmd + r" --env {env_option}".format(env_option=env_option) return cmd def run(): @@ -57,6 +59,10 @@ def run(): store_dir_option = os.path.abspath(get_option('store_dir')) obdiag_install_dir = get_option('obdiag_dir') trace_id = get_option('trace_id') + env_option = get_option('env') + if not trace_id: + stdio.error("failed get --trace_id option, example: obd obdiag gather plan_monitor {0} --trace_id ".format(plugin_context.deploy_name)) + return plugin_context.return_false() ret = local_execute_command('%s --help' % obdiag_bin) if not ret: diff --git a/plugins/oceanbase-diagnostic-tool/1.0/pre_check.py b/plugins/oceanbase-diagnostic-tool/1.0/pre_check.py index 2969d54..cc17d7a 100644 --- a/plugins/oceanbase-diagnostic-tool/1.0/pre_check.py +++ b/plugins/oceanbase-diagnostic-tool/1.0/pre_check.py @@ -21,7 +21,6 @@ from __future__ import absolute_import, division, print_function import os -import os from copy import deepcopy import re from ssh import LocalClient @@ -30,7 +29,8 @@ import _errno as err from tool import DirectoryUtil -def pre_check(plugin_context, gather_type=None, obdiag_path='', obdiag_new_version='1.0', utils_work_dir_check=False, version_check=False, *args, **kwargs): +def pre_check(plugin_context, gather_type=None, utils_work_dir_check=False, *args, **kwargs): + def utils_work_dir_checker(util_name): clients = plugin_context.clients cluster_config = plugin_context.cluster_config @@ -49,27 +49,6 @@ def utils_work_dir_checker(util_name): stdio.stop_loading('succeed') return True - def version_checker(): - client = LocalClient - check_status = {} - ret = client.execute_command('cd {} && ./obdiag version'.format(obdiag_path)) - if not ret: - check_status = {'version_checker_status': False, 'obdiag_version': obdiag_new_version, 'obdiag_found': False} - return check_status - version_pattern = r'OceanBase\sDiagnostic\sTool:\s+(\d+\.\d+.\d+)' - found = re.search(version_pattern, ret.stdout) or re.search(version_pattern, ret.stderr) - if not found: - check_status = {'version_checker_status': False, 'obdiag_version': obdiag_new_version, 'obdiag_found': False} - return check_status - else: - major_version = found.group(1) - if Version(major_version) > Version(obdiag_new_version): - check_status = {'version_checker_status': True, 'obdiag_version': major_version, 'obdiag_found': True} - return check_status - else: - check_status = {'version_checker_status': False, 'obdiag_version': major_version, 'obdiag_found': True} - return check_status - def store_dir_checker_and_handler(): store_dir_option = getattr(plugin_context.options, 'store_dir', None) if (store_dir_option is not None) and (not DirectoryUtil.mkdir(store_dir_option, stdio=stdio)): @@ -78,25 +57,18 @@ def store_dir_checker_and_handler(): return True stdio = plugin_context.stdio + utils_work_dir_check_status = True - version_check_status = True - obdiag_version = obdiag_new_version - obdiag_found = True skip = True if utils_work_dir_check: if gather_type in ['gather_clog', 'gather_slog', 'gather_all']: utils_work_dir_check_status = utils_work_dir_checker('ob_admin') if gather_type != 'gather_all': skip = False - if version_check: - res = version_checker() - version_check_status = res['version_checker_status'] - obdiag_version = res['obdiag_version'] - obdiag_found = res['obdiag_found'] store_dir_checker_status = store_dir_checker_and_handler() - status = utils_work_dir_check_status and version_check_status and store_dir_checker_status - if status: - return plugin_context.return_true(version_status = version_check_status, utils_status = utils_work_dir_check_status, obdiag_version = obdiag_version, obdiag_found = obdiag_found, skip = skip) + checked = utils_work_dir_check_status and store_dir_checker_status + if checked: + return plugin_context.return_true(checked = checked, skip = skip) else: - return plugin_context.return_false(version_status = version_check_status, utils_status = utils_work_dir_check_status, obdiag_version = obdiag_version, obdiag_found = obdiag_found, skip = skip) + return plugin_context.return_false(checked = checked, skip = skip) diff --git a/plugins/oceanbase-diagnostic-tool/1.4/checker.py b/plugins/oceanbase-diagnostic-tool/1.4/checker.py index 204a6b2..26f849e 100644 --- a/plugins/oceanbase-diagnostic-tool/1.4/checker.py +++ b/plugins/oceanbase-diagnostic-tool/1.4/checker.py @@ -47,14 +47,14 @@ def local_execute_command(command, env=None, timeout=None): def get_obdiag_cmd(): base_commond = r"{install_dir}/obdiag check".format(install_dir=obdiag_install_dir) + cmd=base_commond + options_dict = vars(options) # check options - if cases: - cmd = r"{base} --cases {cases}".format( - base=base_commond, - cases=cases, - ) - else: - cmd = r"{base}".format(base=base_commond) + for option, value in options_dict.items(): + if value is not None: + if option is "obdiag_dir": + continue + cmd += ' --{}={}'.format(option, value) return cmd def run(): diff --git a/plugins/oceanbase-diagnostic-tool/1.5/generate_config.py b/plugins/oceanbase-diagnostic-tool/1.5/generate_config.py index 489a458..10dc94b 100644 --- a/plugins/oceanbase-diagnostic-tool/1.5/generate_config.py +++ b/plugins/oceanbase-diagnostic-tool/1.5/generate_config.py @@ -112,17 +112,10 @@ def write_obdiag_config(data): yaml.dump(data, f) except: stdio.error('path %s dump obdiag config %s failed.\n' % (config_path, data)) - - def copy_check_config(): - obdiag_install_dir = get_option('obdiag_dir') - init_shell_path = os.path.join(obdiag_install_dir, 'init.sh') - init_command = 'sh {0}'.format(init_shell_path) - LocalClient.execute_command(init_command, None, None, None) def run(): config_data = get_obdiag_config() write_obdiag_config(config_data) - copy_check_config() try: if run(): diff --git a/plugins/oceanbase-diagnostic-tool/1.6/gather_scene_list.py b/plugins/oceanbase-diagnostic-tool/1.6/gather_scene_list.py new file mode 100644 index 0000000..986175d --- /dev/null +++ b/plugins/oceanbase-diagnostic-tool/1.6/gather_scene_list.py @@ -0,0 +1,64 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function +from ssh import LocalClient +import _errno as err + + +def gather_scene_list(plugin_context, *args, **kwargs): + def get_option(key, default=''): + value = getattr(options, key) + if value is None: + value = default + stdio.verbose('get option: %s value %s' % (key, value)) + return value + + def local_execute_command(command, env=None, timeout=None): + command = r"cd {install_dir} && ./".format(install_dir=obdiag_install_dir) + command + return LocalClient.execute_command(command, env, timeout, stdio) + + def get_obdiag_cmd(): + base_commond=r"cd {install_dir} && ./obdiag gather scene list".format(install_dir=obdiag_install_dir) + cmd = r"{base}".format( + base=base_commond, + ) + return cmd + + def run(): + obdiag_cmd = get_obdiag_cmd() + stdio.verbose('execute cmd: {}'.format(obdiag_cmd)) + return LocalClient.run_command(obdiag_cmd, env=None, stdio=stdio) + + options = plugin_context.options + obdiag_bin = "obdiag" + stdio = plugin_context.stdio + obdiag_install_dir = get_option('obdiag_dir') + + ret = local_execute_command('%s --help' % obdiag_bin) + if not ret: + stdio.error(err.EC_OBDIAG_NOT_FOUND.format()) + return plugin_context.return_false() + try: + if run(): + plugin_context.return_true() + except KeyboardInterrupt: + stdio.exception("obdiag gather scene list failed") + return plugin_context.return_false() \ No newline at end of file diff --git a/plugins/oceanbase-diagnostic-tool/1.6/gather_scene_run.py b/plugins/oceanbase-diagnostic-tool/1.6/gather_scene_run.py new file mode 100644 index 0000000..a91cd1c --- /dev/null +++ b/plugins/oceanbase-diagnostic-tool/1.6/gather_scene_run.py @@ -0,0 +1,87 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function +from ssh import LocalClient +import os +from tool import TimeUtils +import _errno as err + + +def gather_scene_run(plugin_context, *args, **kwargs): + def get_option(key, default=''): + value = getattr(options, key) + if value is None: + value = default + stdio.verbose('get option: %s value %s' % (key, value)) + return value + + def local_execute_command(command, env=None, timeout=None): + command = r"cd {install_dir} && ./".format(install_dir=obdiag_install_dir) + command + return LocalClient.execute_command(command, env, timeout, stdio) + + def get_obdiag_cmd(): + base_commond=r"cd {install_dir} && ./obdiag gather scene run --scene={scene}".format(install_dir=obdiag_install_dir,scene=scene_option) + cmd = r"{base} --from {from_option} --to {to_option} ".format( + base = base_commond, + from_option = from_option, + to_option = to_option, + ) + if store_dir_option: + cmd = cmd + r" --store_dir {store_dir_option}".format(store_dir_option=store_dir_option) + if env_option: + cmd = cmd + r" --env '{env_option}'".format(env_option=env_option) + if dis_update_option: + cmd = cmd + r" --dis_update '{dis_update_option}'".format(dis_update_option=dis_update_option) + return cmd + + def run(): + obdiag_cmd = get_obdiag_cmd() + stdio.verbose('execute cmd: {}'.format(obdiag_cmd)) + return LocalClient.run_command(obdiag_cmd, env=None, stdio=stdio) + + options = plugin_context.options + obdiag_bin = "obdiag" + stdio = plugin_context.stdio + from_option = get_option('from') + to_option = get_option('to') + scene_option = get_option('scene') + env_option = get_option('env') + since_option = get_option('since') + if not scene_option: + stdio.error("failed get --scene option, example: obd obdiag gather scene run {0} --scene ".format(plugin_context.deploy_name)) + return plugin_context.return_false() + dis_update_option = get_option('dis_update') + store_dir_option = os.path.join(os.path.abspath(get_option('store_dir')), 'gather_scene') + obdiag_install_dir = get_option('obdiag_dir') + from_option, to_option, ok = TimeUtils.parse_time_from_to(from_time=from_option, to_time=to_option, stdio=stdio) + if not ok: + from_option, to_option = TimeUtils.parse_time_since(since=since_option) + + ret = local_execute_command('%s --help' % obdiag_bin) + if not ret: + stdio.error(err.EC_OBDIAG_NOT_FOUND.format()) + return plugin_context.return_false() + try: + if run(): + plugin_context.return_true() + except KeyboardInterrupt: + stdio.exception("obdiag gather scene run failed") + return plugin_context.return_false() \ No newline at end of file diff --git a/plugins/oceanbase-diagnostic-tool/1.6/rca_list.py b/plugins/oceanbase-diagnostic-tool/1.6/rca_list.py new file mode 100644 index 0000000..8b0e7ab --- /dev/null +++ b/plugins/oceanbase-diagnostic-tool/1.6/rca_list.py @@ -0,0 +1,64 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function +from ssh import LocalClient +import _errno as err + + +def rca_list(plugin_context, *args, **kwargs): + def get_option(key, default=''): + value = getattr(options, key) + if value is None: + value = default + stdio.verbose('get option: %s value %s' % (key, value)) + return value + + def local_execute_command(command, env=None, timeout=None): + command = r"cd {install_dir} && ./".format(install_dir=obdiag_install_dir) + command + return LocalClient.execute_command(command, env, timeout, stdio) + + def get_obdiag_cmd(): + base_commond=r"cd {install_dir} && ./obdiag rca list".format(install_dir=obdiag_install_dir) + cmd = r"{base}".format( + base=base_commond, + ) + return cmd + + def run(): + obdiag_cmd = get_obdiag_cmd() + stdio.verbose('execute cmd: {}'.format(obdiag_cmd)) + return LocalClient.run_command(obdiag_cmd, env=None, stdio=stdio) + + options = plugin_context.options + obdiag_bin = "obdiag" + stdio = plugin_context.stdio + obdiag_install_dir = get_option('obdiag_dir') + + ret = local_execute_command('%s --help' % obdiag_bin) + if not ret: + stdio.error(err.EC_OBDIAG_NOT_FOUND.format()) + return plugin_context.return_false() + try: + if run(): + plugin_context.return_true() + except KeyboardInterrupt: + stdio.exception("obdiag rca list failed") + return plugin_context.return_false() \ No newline at end of file diff --git a/plugins/oceanbase-diagnostic-tool/1.6/rca_run.py b/plugins/oceanbase-diagnostic-tool/1.6/rca_run.py new file mode 100644 index 0000000..6c86571 --- /dev/null +++ b/plugins/oceanbase-diagnostic-tool/1.6/rca_run.py @@ -0,0 +1,75 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function +from ssh import LocalClient +import _errno as err +import os + + +def rca_run(plugin_context, *args, **kwargs): + def get_option(key, default=''): + value = getattr(options, key) + if value is None: + value = default + stdio.verbose('get option: %s value %s' % (key, value)) + return value + + def local_execute_command(command, env=None, timeout=None): + command = r"cd {install_dir} && ./".format(install_dir=obdiag_install_dir) + command + return LocalClient.execute_command(command, env, timeout, stdio) + + def get_obdiag_cmd(): + base_commond=r"cd {install_dir} && ./obdiag rca run --scene={scene}".format(install_dir=obdiag_install_dir, scene=scene_option) + cmd = r"{base}".format( + base=base_commond, + ) + if store_dir_option: + cmd = cmd + r" --store_dir {store_dir}".format(store_dir=store_dir_option) + if parameters_option: + cmd = cmd + r" --parameters '{parameters}'".format(parameters=parameters_option) + return cmd + + def run(): + obdiag_cmd = get_obdiag_cmd() + stdio.verbose('execute cmd: {}'.format(obdiag_cmd)) + return LocalClient.run_command(obdiag_cmd, env=None, stdio=stdio) + + options = plugin_context.options + obdiag_bin = "obdiag" + stdio = plugin_context.stdio + obdiag_install_dir = get_option('obdiag_dir') + scene_option = get_option('scene') + if not scene_option: + stdio.error("failed get --scene option, example: obd obdiag rca run {0} --scene ".format(plugin_context.deploy_name)) + return plugin_context.return_false() + parameters_option = get_option('parameters') + store_dir_option = os.path.abspath(get_option('store_dir')) + + ret = local_execute_command('%s --help' % obdiag_bin) + if not ret: + stdio.error(err.EC_OBDIAG_NOT_FOUND.format()) + return plugin_context.return_false() + try: + if run(): + plugin_context.return_true() + except KeyboardInterrupt: + stdio.exception("obdiag rca run failed") + return plugin_context.return_false() \ No newline at end of file diff --git a/plugins/oceanbase-diagnostic-tool/1.6/scene_config.py b/plugins/oceanbase-diagnostic-tool/1.6/scene_config.py new file mode 100644 index 0000000..6e962c7 --- /dev/null +++ b/plugins/oceanbase-diagnostic-tool/1.6/scene_config.py @@ -0,0 +1,84 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function +import os +from ssh import LocalClient + +def scene_config(plugin_context, *args, **kwargs): + + def get_option(key, default=''): + value = getattr(options, key) + if value is None: + value = default + stdio.verbose('get option: %s value %s' % (key, value)) + return value + + def check_config(): + home = os.path.expanduser("~") + base_path = os.path.join(home, ".obdiag") + required = { + "check": {"tasks": True}, + "example": True, + "gather": {"tasks": True} + } + for item, content in required.items(): + full_path = os.path.join(base_path, item) + if isinstance(content, dict): + if not os.path.isdir(full_path): + return False + for sub_item, is_dir in content.items(): + sub_full_path = os.path.join(full_path, sub_item) + if is_dir: + if not os.path.isdir(sub_full_path): + return False + else: + if not os.path.isfile(sub_full_path): + return False + else: + if content: + if not os.path.isdir(full_path): + return False + else: + if not os.path.isfile(full_path): + return False + return True + + def init(): + obdiag_install_dir = get_option('obdiag_dir') + init_shell_path = os.path.join(obdiag_install_dir, 'init.sh') + init_command = 'sh {0}'.format(init_shell_path) + if LocalClient.execute_command(init_command, None, None, None): + return True + else: + stdio.error("excute command: {0} failed".format(init_command)) + return False + + stdio = plugin_context.stdio + options = plugin_context.options + if check_config(): + init_status = True + else: + init_status = init() + if init_status: + return plugin_context.return_true() + else: + return plugin_context.return_false() + diff --git a/plugins/oceanbase-diagnostic-tool/1.6/update_scene.py b/plugins/oceanbase-diagnostic-tool/1.6/update_scene.py new file mode 100644 index 0000000..e165d91 --- /dev/null +++ b/plugins/oceanbase-diagnostic-tool/1.6/update_scene.py @@ -0,0 +1,73 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function +from ssh import LocalClient +import _errno as err +import os + + +def update_scene(plugin_context, *args, **kwargs): + def get_option(key, default=''): + value = getattr(options, key) + if value is None: + value = default + stdio.verbose('get option: %s value %s' % (key, value)) + return value + + def local_execute_command(command, env=None, timeout=None): + command = r"cd {install_dir} && ./".format(install_dir=obdiag_install_dir) + command + return LocalClient.execute_command(command, env, timeout, stdio) + + def get_obdiag_cmd(): + base_commond=r"cd {install_dir} && ./obdiag update ".format(install_dir=obdiag_install_dir) + cmd = r"{base}".format( + base=base_commond, + ) + if file_option: + cmd = cmd + r" --file {option}".format(option=file_option) + if force_option: + cmd = cmd + r" --force {option}".format(option=force_option) + return cmd + + def run(): + obdiag_cmd = get_obdiag_cmd() + stdio.verbose('execute cmd: {}'.format(obdiag_cmd)) + return LocalClient.run_command(obdiag_cmd, env=None, stdio=stdio) + + options = plugin_context.options + obdiag_bin = "obdiag" + stdio = plugin_context.stdio + obdiag_install_dir = get_option('obdiag_dir') + file_option = get_option('file') + if file_option: + file_option = os.path.abspath(get_option('file')) + force_option = get_option('force') + + ret = local_execute_command('%s --help' % obdiag_bin) + if not ret: + stdio.error(err.EC_OBDIAG_NOT_FOUND.format()) + return plugin_context.return_false() + try: + if run(): + plugin_context.return_true() + except KeyboardInterrupt: + stdio.exception("obdiag update failed") + return plugin_context.return_false() \ No newline at end of file diff --git a/plugins/oceanbase/3.1.0/bootstrap.py b/plugins/oceanbase/3.1.0/bootstrap.py index 232b185..d67f49d 100644 --- a/plugins/oceanbase/3.1.0/bootstrap.py +++ b/plugins/oceanbase/3.1.0/bootstrap.py @@ -36,8 +36,8 @@ def bootstrap(plugin_context, *args, **kwargs): InnerConfigItem('$_zone_idc'): 'idc' } has_obproxy = False - for componet_name in ['obproxy', 'obproxy-ce']: - if componet_name in added_components and componet_name in be_depend: + for component_name in ['obproxy', 'obproxy-ce']: + if component_name in added_components and component_name in be_depend: has_obproxy = True break for server in cluster_config.servers: diff --git a/plugins/oceanbase/3.1.0/create_tenant.py b/plugins/oceanbase/3.1.0/create_tenant.py index a57bc0a..8fa0587 100644 --- a/plugins/oceanbase/3.1.0/create_tenant.py +++ b/plugins/oceanbase/3.1.0/create_tenant.py @@ -21,43 +21,15 @@ from __future__ import absolute_import, division, print_function -import re import time from _errno import EC_OBSERVER_CAN_NOT_MIGRATE_IN +from _types import Capacity tenant_cursor = None -def parse_size(size): - _bytes = 0 - if isinstance(size, str): - size = size.strip() - if not isinstance(size, str) or size.isdigit(): - _bytes = int(size) - else: - units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} - match = re.match(r'^(0|[1-9][0-9]*)\s*([B,K,M,G,T])$', size.upper()) - _bytes = int(match.group(1)) * units[match.group(2)] - return _bytes - - -def format_size(size, precision=1): - units = ['B', 'K', 'M', 'G', 'T', 'P'] - idx = 0 - if precision: - div = 1024.0 - format = '%.' + str(precision) + 'f%s' - else: - div = 1024 - format = '%d%s' - while idx < 5 and size >= 1024: - size /= 1024.0 - idx += 1 - return format % (size, units[idx]) - - -def exec_sql_in_tenant(sql, cursor, tenant, mode, retries=10): +def exec_sql_in_tenant(sql, cursor, tenant, mode, retries=10, args=[]): global tenant_cursor if not tenant_cursor: user = 'SYS' if mode == 'oracle' else 'root' @@ -65,11 +37,11 @@ def exec_sql_in_tenant(sql, cursor, tenant, mode, retries=10): if not tenant_cursor and retries: retries -= 1 time.sleep(2) - return exec_sql_in_tenant(sql, cursor, tenant, mode, retries) - return tenant_cursor.execute(sql) + return exec_sql_in_tenant(sql, cursor, tenant, mode, retries=retries, args=args) + return tenant_cursor.execute(sql, args=args) -def create_tenant(plugin_context, create_tenant_options=None, cursor=None, *args, **kwargs): +def create_tenant(plugin_context, create_tenant_options=[], cursor=None, *args, **kwargs): def get_option(key, default=''): if key in kwargs: return kwargs[key] @@ -81,7 +53,7 @@ def get_option(key, default=''): def get_parsed_option(key, default=''): value = get_option(key=key, default=default) try: - parsed_value = parse_size(value) + parsed_value = Capacity(value).btyes except: stdio.exception("") raise Exception("Invalid option {}: {}".format(key, value)) @@ -93,242 +65,239 @@ def error(*arg, **kwargs): cluster_config = plugin_context.cluster_config stdio = plugin_context.stdio - options = create_tenant_options if create_tenant_options else plugin_context.options - cursor = plugin_context.get_return('connect').get_return('cursor') if not cursor else cursor - create_if_not_exists = get_option('create_if_not_exists', False) - tenant_exists = False - global tenant_cursor - tenant_cursor = None - - mode = get_option('mode', 'mysql').lower() - if not mode in ['mysql', 'oracle']: - error('No such tenant mode: %s.\n--mode must be `mysql` or `oracle`' % mode) - return - - name = get_option('tenant_name', 'test') - unit_name = '%s_unit' % name - pool_name = '%s_pool' % name - sql = "select tenant_name from oceanbase.gv$tenant where tenant_name = '%s'" % name - res = cursor.fetchone(sql) - if res: - if create_if_not_exists: - return plugin_context.return_true() - else: - error('Tenant %s already exists' % name) - return - elif res is False: - return - if not tenant_exists: - stdio.start_loading('Create tenant %s' % name) - zone_list = get_option('zone_list', set()) - zone_obs_num = {} - sql = "select zone, count(*) num from oceanbase.__all_server where status = 'active' group by zone" - res = cursor.fetchall(sql) - if res is False: - stdio.stop_loading('fail') - return - for row in res: - zone_obs_num[str(row['zone'])] = row['num'] - if not zone_list: - zone_list = zone_obs_num.keys() - if isinstance(zone_list, str): - zones = zone_list.replace(';', ',').split(',') - else: - zones = zone_list - zone_list = "('%s')" % "','".join(zones) - - min_unit_num = min(zone_obs_num.items(), key=lambda x: x[1])[1] - unit_num = get_option('unit_num', min_unit_num) - if unit_num > min_unit_num: - return error('resource pool unit num is bigger than zone server count') - - sql = "select count(*) num from oceanbase.__all_server where status = 'active' and start_service_time > 0" - count = 30 - try: - while count: - num = cursor.fetchone(sql, raise_exception=True)['num'] - if num >= unit_num: - break - count -= 1 - time.sleep(1) - if count == 0: - stdio.error(EC_OBSERVER_CAN_NOT_MIGRATE_IN) + multi_options = create_tenant_options if create_tenant_options else [plugin_context.options] + for options in multi_options: + cursor = plugin_context.get_return('connect').get_return('cursor') if not cursor else cursor + create_if_not_exists = get_option('create_if_not_exists', False) + tenant_exists = False + global tenant_cursor + tenant_cursor = None + + mode = get_option('mode', 'mysql').lower() + if not mode in ['mysql', 'oracle']: + error('No such tenant mode: %s.\n--mode must be `mysql` or `oracle`' % mode) + return + + name = get_option('tenant_name', 'test') + unit_name = '%s_unit' % name + pool_name = '%s_pool' % name + sql = "select tenant_name from oceanbase.gv$tenant where tenant_name = '%s'" % name + res = cursor.fetchone(sql) + if res: + if create_if_not_exists: + continue + else: + error('Tenant %s already exists' % name) return - except: - stdio.stop_loading('fail') - return - - cpu_total = 0 - mem_total = 0 - disk_total = 0 - sql = "SELECT min(cpu_total) cpu_total, min(mem_total) mem_total, min(disk_total) disk_total FROM oceanbase.__all_virtual_server_stat where zone in %s" % zone_list - resource = cursor.fetchone(sql) - if resource is False: - stdio.stop_loading('fail') + elif res is False: return - cpu_total = resource['cpu_total'] - mem_total = resource['mem_total'] - disk_total = resource['disk_total'] + if not tenant_exists: + stdio.start_loading('Create tenant %s' % name) + zone_list = get_option('zone_list', set()) + zone_obs_num = {} + sql = "select zone, count(*) num from oceanbase.__all_server where status = 'active' group by zone" + res = cursor.fetchall(sql) + if res is False: + stdio.stop_loading('fail') + return + for row in res: + zone_obs_num[str(row['zone'])] = row['num'] + if not zone_list: + zone_list = zone_obs_num.keys() + if isinstance(zone_list, str): + zones = zone_list.replace(';', ',').split(',') + else: + zones = zone_list + zone_list = "('%s')" % "','".join(zones) + + min_unit_num = min(zone_obs_num.items(), key=lambda x: x[1])[1] + unit_num = get_option('unit_num', min_unit_num) + if unit_num > min_unit_num: + return error('resource pool unit num is bigger than zone server count') + + sql = "select count(*) num from oceanbase.__all_server where status = 'active' and start_service_time > 0" + count = 30 + try: + while count: + num = cursor.fetchone(sql, raise_exception=True)['num'] + if num >= unit_num: + break + count -= 1 + time.sleep(1) + if count == 0: + stdio.error(EC_OBSERVER_CAN_NOT_MIGRATE_IN) + return + except: + stdio.stop_loading('fail') + return - sql = 'select * from oceanbase.__all_resource_pool order by name' + cpu_total = 0 + mem_total = 0 + disk_total = 0 + sql = "SELECT min(cpu_total) cpu_total, min(mem_total) mem_total, min(disk_total) disk_total FROM oceanbase.__all_virtual_server_stat where zone in %s" % zone_list + resource = cursor.fetchone(sql) + if resource is False: + stdio.stop_loading('fail') + return + cpu_total = resource['cpu_total'] + mem_total = resource['mem_total'] + disk_total = resource['disk_total'] - units_id = {} - res = cursor.fetchall(sql) - if res is False: - stdio.stop_loading('fail') - return - for row in res: - if str(row['name']) == unit_name: - unit_name += '1' - if row['tenant_id'] < 1: - continue - for zone in str(row['zone_list']).replace(';', ',').split(','): - if zone in zones: - unit_config_id = row['unit_config_id'] - units_id[unit_config_id] = units_id.get(unit_config_id, 0) + 1 - break - - sql = 'select * from oceanbase.__all_unit_config order by name' - res = cursor.fetchall(sql) - if res is False: - stdio.stop_loading('fail') - return - for row in res: - if str(row['name']) == unit_name: - unit_name += '1' - if row['unit_config_id'] in units_id: - cpu_total -= row['max_cpu'] * units_id[row['unit_config_id']] - mem_total -= row['max_memory'] * units_id[row['unit_config_id']] - # disk_total -= row['max_disk_size'] - - MIN_CPU = 2 - MIN_MEMORY = 1073741824 - MIN_DISK_SIZE = 536870912 - MIN_IOPS = 128 - MIN_SESSION_NUM = 64 - if cpu_total < MIN_CPU: - return error('%s: resource not enough: cpu count less than %s' % (zone_list, MIN_CPU)) - if mem_total < MIN_MEMORY: - return error('%s: resource not enough: memory less than %s' % (zone_list, format_size(MIN_MEMORY))) - if disk_total < MIN_DISK_SIZE: - return error('%s: resource not enough: disk space less than %s' % (zone_list, format_size(MIN_DISK_SIZE))) + sql = 'select * from oceanbase.__all_resource_pool order by name' - try: - max_memory = get_parsed_option('max_memory', mem_total) - max_disk_size = get_parsed_option('max_disk_size', disk_total) - min_memory = get_parsed_option('min_memory', max_memory) - except Exception as e: - error(e) - return + units_id = {} + res = cursor.fetchall(sql) + if res is False: + stdio.stop_loading('fail') + return + for row in res: + if str(row['name']) == unit_name: + unit_name += '1' + if row['tenant_id'] < 1: + continue + for zone in str(row['zone_list']).replace(';', ',').split(','): + if zone in zones: + unit_config_id = row['unit_config_id'] + units_id[unit_config_id] = units_id.get(unit_config_id, 0) + 1 + break + + sql = 'select * from oceanbase.__all_unit_config order by name' + res = cursor.fetchall(sql) + if res is False: + stdio.stop_loading('fail') + return + for row in res: + if str(row['name']) == unit_name: + unit_name += '1' + if row['unit_config_id'] in units_id: + cpu_total -= row['max_cpu'] * units_id[row['unit_config_id']] + mem_total -= row['max_memory'] * units_id[row['unit_config_id']] + # disk_total -= row['max_disk_size'] + + MIN_CPU = 2 + MIN_MEMORY = 1073741824 + MIN_DISK_SIZE = 536870912 + MIN_IOPS = 128 + MIN_SESSION_NUM = 64 + if cpu_total < MIN_CPU: + return error('%s: resource not enough: cpu count less than %s' % (zone_list, MIN_CPU)) + if mem_total < MIN_MEMORY: + return error('%s: resource not enough: memory less than %s' % (zone_list, Capacity(MIN_MEMORY))) + if disk_total < MIN_DISK_SIZE: + return error('%s: resource not enough: disk space less than %s' % (zone_list, Capacity(MIN_DISK_SIZE))) + + try: + max_memory = get_parsed_option('max_memory', mem_total) + max_disk_size = get_parsed_option('max_disk_size', disk_total) + min_memory = get_parsed_option('min_memory', max_memory) + except Exception as e: + error(e) + return - max_cpu = get_option('max_cpu', cpu_total) - max_iops = get_option('max_iops', MIN_IOPS) - max_session_num = get_option('max_session_num', MIN_SESSION_NUM) - min_cpu = get_option('min_cpu', max_cpu) - min_iops = get_option('min_iops', max_iops) - - if cpu_total < max_cpu: - return error('resource not enough: cpu (Avail: %s, Need: %s)' % (cpu_total, max_cpu)) - if mem_total < max_memory: - return error('resource not enough: memory (Avail: %s, Need: %s)' % (format_size(mem_total), format_size(max_memory))) - if disk_total < max_disk_size: - return error('resource not enough: disk space (Avail: %s, Need: %s)' % (format_size(disk_total), format_size(max_disk_size))) - - if max_iops < MIN_IOPS: - return error('max_iops must greater than %d' % MIN_IOPS) - if max_session_num < MIN_SESSION_NUM: - return error('max_session_num must greater than %d' % MIN_SESSION_NUM) - - if max_cpu < min_cpu: - return error('min_cpu must less then max_cpu') - if max_memory < min_memory: - return error('min_memory must less then max_memory') - if max_iops < min_iops: - return error('min_iops must less then max_iops') - - - zone_num = len(zones) - charset = get_option('charset', '') - collate = get_option('collate', '') - replica_num = get_option('replica_num', zone_num) - logonly_replica_num = get_option('logonly_replica_num', 0) - tablegroup = get_option('tablegroup', '') - primary_zone = get_option('primary_zone', 'RANDOM') - locality = get_option('locality', '') - variables = get_option('variables', '') - - if replica_num == 0: - replica_num = zone_num - elif replica_num > zone_num: - return error('replica_num cannot be greater than zone num (%s)' % zone_num) - if not primary_zone: - primary_zone = 'RANDOM' - if logonly_replica_num > replica_num: - return error('logonly_replica_num cannot be greater than replica_num (%s)' % replica_num) - - # create resource unit - sql = 'create resource unit %s max_cpu %.1f, max_memory %d, max_iops %d, max_disk_size %d, max_session_num %d, min_cpu %.1f, min_memory %d, min_iops %d' - sql = sql % (unit_name, max_cpu, max_memory, max_iops, max_disk_size, max_session_num, min_cpu, min_memory, min_iops) - res = cursor.execute(sql) - if res is False: - stdio.stop_loading('fail') - return + max_cpu = get_option('max_cpu', cpu_total) + max_iops = get_option('max_iops', MIN_IOPS) + max_session_num = get_option('max_session_num', MIN_SESSION_NUM) + min_cpu = get_option('min_cpu', max_cpu) + min_iops = get_option('min_iops', max_iops) + + if cpu_total < max_cpu: + return error('resource not enough: cpu (Avail: %s, Need: %s)' % (cpu_total, max_cpu)) + if mem_total < max_memory: + return error('resource not enough: memory (Avail: %s, Need: %s)' % (Capacity(mem_total), Capacity(max_memory))) + if disk_total < max_disk_size: + return error('resource not enough: disk space (Avail: %s, Need: %s)' % (Capacity(disk_total), Capacity(max_disk_size))) + + if max_iops < MIN_IOPS: + return error('max_iops must greater than %d' % MIN_IOPS) + if max_session_num < MIN_SESSION_NUM: + return error('max_session_num must greater than %d' % MIN_SESSION_NUM) + + if max_cpu < min_cpu: + return error('min_cpu must less then max_cpu') + if max_memory < min_memory: + return error('min_memory must less then max_memory') + if max_iops < min_iops: + return error('min_iops must less then max_iops') + + + zone_num = len(zones) + charset = get_option('charset', '') + collate = get_option('collate', '') + replica_num = get_option('replica_num', zone_num) + logonly_replica_num = get_option('logonly_replica_num', 0) + tablegroup = get_option('tablegroup', '') + primary_zone = get_option('primary_zone', 'RANDOM') + locality = get_option('locality', '') + variables = get_option('variables', "ob_tcp_invited_nodes='%'") + + if replica_num == 0: + replica_num = zone_num + elif replica_num > zone_num: + return error('replica_num cannot be greater than zone num (%s)' % zone_num) + if not primary_zone: + primary_zone = 'RANDOM' + if logonly_replica_num > replica_num: + return error('logonly_replica_num cannot be greater than replica_num (%s)' % replica_num) + + # create resource unit + sql = 'create resource unit %s max_cpu %.1f, max_memory %d, max_iops %d, max_disk_size %d, max_session_num %d, min_cpu %.1f, min_memory %d, min_iops %d' + sql = sql % (unit_name, max_cpu, max_memory, max_iops, max_disk_size, max_session_num, min_cpu, min_memory, min_iops) + res = cursor.execute(sql) + if res is False: + stdio.stop_loading('fail') + return - # create resource pool - sql = "create resource pool %s unit='%s', unit_num=%d, zone_list=%s" % (pool_name, unit_name, unit_num, zone_list) - res = cursor.execute(sql) - if res is False: - stdio.stop_loading('fail') - return + # create resource pool + sql = "create resource pool %s unit='%s', unit_num=%d, zone_list=%s" % (pool_name, unit_name, unit_num, zone_list) + res = cursor.execute(sql) + if res is False: + stdio.stop_loading('fail') + return - # create tenant - sql = "create tenant %s replica_num=%d,zone_list=%s,primary_zone='%s',resource_pool_list=('%s')" - sql = sql % (name, replica_num, zone_list, primary_zone, pool_name) - if charset: - sql += ", charset = '%s'" % charset - if collate: - sql += ", collate = '%s'" % collate - if logonly_replica_num: - sql += ", logonly_replica_num = %d" % logonly_replica_num - if tablegroup: - sql += ", default tablegroup ='%s'" % tablegroup - if locality: - sql += ", locality = '%s'" % locality - - set_mode = "ob_compatibility_mode = '%s'" % mode - if variables: - sql += "set %s, %s" % (variables, set_mode) - else: - sql += "set %s" % set_mode - res = cursor.execute(sql) - if res is False: - stdio.stop_loading('fail') - return + # create tenant + sql = "create tenant %s replica_num=%d,zone_list=%s,primary_zone='%s',resource_pool_list=('%s')" + sql = sql % (name, replica_num, zone_list, primary_zone, pool_name) + if charset: + sql += ", charset = '%s'" % charset + if collate: + sql += ", collate = '%s'" % collate + if logonly_replica_num: + sql += ", logonly_replica_num = %d" % logonly_replica_num + if tablegroup: + sql += ", default tablegroup ='%s'" % tablegroup + if locality: + sql += ", locality = '%s'" % locality + + set_mode = "ob_compatibility_mode = '%s'" % mode + if variables: + sql += "set %s, %s" % (variables, set_mode) + else: + sql += "set %s" % set_mode + res = cursor.execute(sql) + if res is False: + stdio.stop_loading('fail') + return - stdio.stop_loading('succeed') + stdio.stop_loading('succeed') - database = get_option('database') - if database: - sql = 'create database {}'.format(database) - if not exec_sql_in_tenant(sql=sql, cursor=cursor, tenant=name, mode=mode) and not create_if_not_exists: - stdio.error('failed to create database {}'.format(database)) - return + database = get_option('database') + if database: + sql = 'create database {}'.format(database) + if not exec_sql_in_tenant(sql=sql, cursor=cursor, tenant=name, mode=mode) and not create_if_not_exists: + stdio.error('failed to create database {}'.format(database)) + return - db_username = get_option('db_username') - db_password = get_option('db_password', '') - if db_username: - if mode == "mysql": - sql = """create user if not exists '{username}' IDENTIFIED BY '{password}'; - grant all on *.* to '{username}' WITH GRANT OPTION;""".format( - username=db_username, password=db_password) - else: - # todo: fix oracle user create - sql = """create {username} IDENTIFIED BY {password}; -grant all on *.* to {username} WITH GRANT OPTION; -grant dba to {username}; -grant all privileges to {username};""" - if not exec_sql_in_tenant(sql=sql, cursor=cursor, tenant=name, mode=mode): - stdio.error('failed to create user {}'.format(db_username)) - return + db_username = get_option('db_username') + db_password = get_option('db_password', '') + if db_username: + if mode == "mysql": + sql = """create user if not exists '{username}' IDENTIFIED BY %s; + grant all on *.* to '{username}' WITH GRANT OPTION;""".format( + username=db_username) + else: + error("Create user in oracle tenant is not supported") + if not exec_sql_in_tenant(sql=sql, cursor=cursor, tenant=name, mode=mode, args=[db_password]): + stdio.error('failed to create user {}'.format(db_username)) + return - return plugin_context.return_true() \ No newline at end of file + return plugin_context.return_true() diff --git a/plugins/oceanbase/3.1.0/file_map.yaml b/plugins/oceanbase/3.1.0/file_map.yaml index 6296b3a..4f87f4f 100644 --- a/plugins/oceanbase/3.1.0/file_map.yaml +++ b/plugins/oceanbase/3.1.0/file_map.yaml @@ -2,10 +2,11 @@ target_path: bin/observer type: bin mode: 755 + require: oceanbase-ce-libs - src_path: ./home/admin/oceanbase/bin target_path: bin type: dir - src_path: ./home/admin/oceanbase/etc target_path: etc type: dir - install_method: cp \ No newline at end of file + install_method: cp diff --git a/plugins/oceanbase/3.1.0/generate_config.py b/plugins/oceanbase/3.1.0/generate_config.py index e36e248..bf98533 100644 --- a/plugins/oceanbase/3.1.0/generate_config.py +++ b/plugins/oceanbase/3.1.0/generate_config.py @@ -26,35 +26,7 @@ from _errno import EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE, EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED from tool import ConfigUtil - - -def parse_size(size): - _bytes = 0 - if not isinstance(size, str) or size.isdigit(): - _bytes = int(size) - else: - units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} - match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) - _bytes = int(match.group(1)) * units[match.group(2)] - return _bytes - - -def format_size(size, precision=1): - units = ['B', 'K', 'M', 'G'] - units_num = len(units) - 1 - idx = 0 - if precision: - div = 1024.0 - format = '%.' + str(precision) + 'f%s' - limit = 1024 - else: - div = 1024 - limit = 1024 - format = '%d%s' - while idx < units_num and size >= limit: - size /= div - idx += 1 - return format % (size, units[idx]) +from _types import Capacity def get_system_memory(memory_limit): @@ -65,7 +37,7 @@ def get_system_memory(memory_limit): else: system_memory = memory_limit * 0.3 system_memory = max(4 << 30, system_memory) - return format_size(system_memory, 0) + return str(Capacity(system_memory, 0)) def generate_config(plugin_context, generate_config_mini=False, generate_check=True, return_generate_keys=False, generate_consistent_config=False, only_generate_password=False, generate_password=True, *args, **kwargs): @@ -138,7 +110,7 @@ def summit_config(): if generate_config_mini: if not global_config.get('memory_limit_percentage') and not global_config.get('memory_limit'): - update_global_conf('memory_limit', format_size(MIN_MEMORY, 0)) + update_global_conf('memory_limit', str(Capacity(MIN_MEMORY, 0))) if not global_config.get('datafile_size') and not global_config.get('datafile_disk_percentage'): update_global_conf('datafile_size', '20G') if not global_config.get('clog_disk_utilization_threshold'): @@ -179,7 +151,7 @@ def summit_config(): total_memory = 0 for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): if k == 'MemTotal': - total_memory = parse_size(str(v)) + total_memory = Capacity(str(v)).btyes memory_limit = int(total_memory * user_server_config.get('memory_limit_percentage') / 100) else: if not server_config.get('memory_limit'): @@ -198,21 +170,21 @@ def summit_config(): for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): if k in memory_key_map: key = memory_key_map[k] - server_memory_stats[key] = parse_size(str(v)) + server_memory_stats[key] = Capacity(str(v)).btyes if generate_check: if server_memory_stats['available'] < START_NEED_MEMORY: - stdio.error(EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE.format(ip=ip, available=format_size(server_memory_stats['available']), need=format_size(START_NEED_MEMORY))) + stdio.error(EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE.format(ip=ip, available=Capacity(server_memory_stats['available']), need=Capacity(START_NEED_MEMORY))) success = False continue if server_memory_stats['free'] + server_memory_stats['buffers'] + server_memory_stats['cached'] < MIN_MEMORY: - stdio.error(EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=format_size(server_memory_stats['free']), cached=format_size(server_memory_stats['buffers'] + server_memory_stats['cached']), need=format_size(MIN_MEMORY))) + stdio.error(EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=Capacity(server_memory_stats['free']), cached=Capacity(server_memory_stats['buffers'] + server_memory_stats['cached']), need=Capacity(MIN_MEMORY))) success = False continue - memory_limit = max(MIN_MEMORY, server_memory_stats['available'] * 0.9) - server_config['memory_limit'] = format_size(memory_limit, 0) + memory_limit = max(MIN_MEMORY, int(server_memory_stats['available'] * 0.9)) + server_config['memory_limit'] = str(Capacity(memory_limit, 0)) update_server_conf(server, 'memory_limit', server_config['memory_limit']) auto_set_memory = True else: @@ -220,7 +192,7 @@ def summit_config(): success = False continue else: - memory_limit = parse_size(server_config.get('memory_limit')) + memory_limit = server_config.get('memory_limit') auto_set_system_memory = False if not user_server_config.get('system_memory'): @@ -306,13 +278,13 @@ def summit_config(): if auto_set_system_memory: min_size = MIN_MEMORY * 7 else: - min_size = max(MIN_MEMORY, parse_size(user_server_config.get('system_memory')) * 2) * 7 + min_size = max(MIN_MEMORY, Capacity(user_server_config.get('system_memory')).btyes * 2) * 7 min_need = padding_size + min_size if min_need <= disk_free: memory_limit = (disk_free - padding_size) / 7 - server_config['memory_limit'] = format_size(memory_limit, 0) + server_config['memory_limit'] = str(Capacity(memory_limit, 0)) update_server_conf(server, 'memory_limit', server_config['memory_limit']) - memory_limit = parse_size(server_config['memory_limit']) + memory_limit = Capacity(server_config['memory_limit']).btyes clog_disk_size = memory_limit * 4 clog_size = int(round(clog_disk_size * 0.64)) if auto_set_system_memory: @@ -322,12 +294,12 @@ def summit_config(): disk_flag = True if generate_check and not disk_flag: - stdio.error('(%s) %s not enough disk space. (Avail: %s, Need: %s). Use `redo_dir` to set other disk for clog' % (ip, kp, format_size(disk_free), format_size(min_need))) + stdio.error('(%s) %s not enough disk space. (Avail: %s, Need: %s). Use `redo_dir` to set other disk for clog' % (ip, kp, Capacity(disk_free), Capacity(min_need))) success = False continue - datafile_size_format = format_size(disk_total - clog_disk_size - disk_used, 0) - datafile_size = parse_size(datafile_size_format) + datafile_size_format = str(Capacity(disk_total - clog_disk_size - disk_used, 0)) + datafile_size = Capacity(datafile_size_format).btyes clog_disk_utilization_threshold = max(80, int(100.0 * (disk_used + datafile_size + padding_size + clog_disk_size * 0.8) / real_disk_total)) clog_disk_utilization_threshold = min(clog_disk_utilization_threshold, clog_disk_utilization_threshold_max) clog_disk_usage_limit_percentage = min(int(clog_disk_utilization_threshold / 80.0 * 95), clog_disk_usage_limit_percentage_max) @@ -337,7 +309,7 @@ def summit_config(): update_server_conf(server, 'clog_disk_usage_limit_percentage', clog_disk_usage_limit_percentage) else: datafile_size = max(5 << 30, data_dir_disk['avail'] * 0.8, 0) - update_server_conf(server, 'datafile_size', format_size(datafile_size, 0)) + update_server_conf(server, 'datafile_size', Capacity(datafile_size, 0)) if generate_password: generate_random_password(cluster_config) @@ -356,13 +328,13 @@ def summit_config(): if key in generate_configs.get(server, {}): value = generate_configs[server][key] servers.append(server) - values.append(parse_size(value) if is_capacity_key else value) + values.append(Capacity(value).btyes if is_capacity_key else value) if values: if len(values) != server_num and key in generate_global_config: continue comp = min if key in MIN_KEY else max value = comp(values) - generate_global_config[key] = format_size(value, 0) if is_capacity_key else value + generate_global_config[key] = str(Capacity(value, 0)) if is_capacity_key else value for server in servers: del generate_configs[server][key] diff --git a/plugins/oceanbase/3.1.0/list_tenant.py b/plugins/oceanbase/3.1.0/list_tenant.py index 48e8822..72fe28e 100644 --- a/plugins/oceanbase/3.1.0/list_tenant.py +++ b/plugins/oceanbase/3.1.0/list_tenant.py @@ -19,36 +19,7 @@ from __future__ import absolute_import, division, print_function - -import re - - -def parse_size(size): - _bytes = 0 - if isinstance(size, str): - size = size.strip() - if not isinstance(size, str) or size.isdigit(): - _bytes = int(size) - else: - units = {"B": 1, "K": 1 << 10, "M": 1 << 20, "G": 1 << 30, "T": 1 << 40} - match = re.match(r'^([1-9][0-9]*)\s*([B,K,M,G,T])$', size.upper()) - _bytes = int(match.group(1)) * units[match.group(2)] - return _bytes - - -def format_size(size, precision=1): - units = ['B', 'K', 'M', 'G', 'T', 'P'] - idx = 0 - if precision: - div = 1024.0 - format = '%.' + str(precision) + 'f%s' - else: - div = 1024 - format = '%d%s' - while idx < 5 and size >= 1024: - size /= 1024.0 - idx += 1 - return format % (size, units[idx]) +from _types import Capacity def list_tenant(plugin_context, cursor, *args, **kwargs): @@ -79,8 +50,8 @@ def list_tenant(plugin_context, cursor, *args, **kwargs): stdio.print_list(tenant_infos, ['tenant_name', 'zone_list', 'primary_zone', 'max_cpu', 'min_cpu', 'max_memory', 'min_memory', 'max_iops', 'min_iops', 'max_disk_size', 'max_session_num'], lambda x: [x['tenant_name'], x['zone_list'], x['primary_zone'], x['max_cpu'], x['min_cpu'], - format_size(x['max_memory']), format_size(x['min_memory']), x['max_iops'], - x['min_iops'], format_size(x['max_disk_size']), x['max_session_num']], + str(Capacity(x['max_memory'])), str(Capacity(x['min_memory'])), x['max_iops'], + x['min_iops'], str(Capacity(x['max_disk_size'])), x['max_session_num']], title='tenant') stdio.stop_loading('succeed') return plugin_context.return_true() diff --git a/plugins/oceanbase/3.1.0/parameter.yaml b/plugins/oceanbase/3.1.0/parameter.yaml index c72594d..aacd008 100644 --- a/plugins/oceanbase/3.1.0/parameter.yaml +++ b/plugins/oceanbase/3.1.0/parameter.yaml @@ -306,7 +306,7 @@ name_local: 数据文件大小 require: false essential: true - type: CAPACITY + type: CAPACITY_MB default: 0 min_value: 0M max_value: NULL @@ -347,7 +347,7 @@ description_local: SQL审计数据可占用内存限制 - name: cache_wash_threshold require: false - type: CAPACITY + type: CAPACITY_MB default: 4GB min_value: 0B max_value: NULL @@ -477,7 +477,7 @@ description_local: 系统可以使用的最小CPU配额,将会预留 - name: memory_reserved require: false - type: CAPACITY + type: CAPACITY_MB default: 500M min_value: 10M max_value: NULL @@ -597,7 +597,7 @@ description_local: 负载均衡的时候,是否允许配置的资源权重生效 - name: multiblock_read_size require: false - type: CAPACITY + type: CAPACITY_MB default: 128K min_value: 0K max_value: 2M @@ -627,7 +627,7 @@ description_local: 因磁盘满等原因导致某个节点数据迁入失败时,暂停迁入时长 - name: tablet_size require: false - type: CAPACITY + type: CAPACITY_MB default: 128M min_value: NULL max_value: NULL @@ -756,7 +756,7 @@ description_local: 数据块缓存在缓存系统中的优先级 - name: syslog_io_bandwidth_limit require: false - type: CAPACITY + type: CAPACITY_MB default: 30MB min_value: NULL max_value: NULL @@ -838,7 +838,7 @@ description_local: 事务日志的磁盘IO最大可用的磁盘利用率 - name: px_task_size require: false - type: CAPACITY + type: CAPACITY_MB default: 2M min_value: 2M max_value: NULL @@ -1388,7 +1388,7 @@ description_local: 控制租户CPU调度中每次预留多少比例的空闲token数给租户 - name: stack_size require: false - type: CAPACITY + type: CAPACITY_MB default: 1M min_value: 512K max_value: 20M @@ -1410,7 +1410,7 @@ name_local: 最大运行内存 require: false essential: true - type: CAPACITY + type: CAPACITY_MB default: 0 min_value: NULL max_value: NULL @@ -1422,7 +1422,7 @@ - name: system_memory name_local: 集群系统内存 essential: true - type: CAPACITY + type: CAPACITY_MB default: 30G min_value: 0M max_value: NULL @@ -1621,7 +1621,7 @@ description_local: 单个节点迁入数据最大并发数 - name: rootservice_memory_limit require: false - type: CAPACITY + type: CAPACITY_MB default: 2G min_value: 2G max_value: NULL @@ -1631,7 +1631,7 @@ description_local: RootService最大内存限制 - name: plan_cache_low_watermark require: false - type: CAPACITY + type: CAPACITY_MB default: 1500M min_value: NULL max_value: NULL @@ -1713,7 +1713,7 @@ description_local: 控制内存大页的行为,"true"表示在操作系统开启内存大页并且有空闲大页时,数据库总是申请内存大页,否则申请普通内存页, "false"表示数据库不使用大页, "only"表示数据库总是分配大页 - name: dtl_buffer_size require: false - type: CAPACITY + type: CAPACITY_MB default: 64K min_value: 4K max_value: 2M @@ -2074,7 +2074,7 @@ description_local: 手工合并开关 - name: memory_chunk_cache_size require: false - type: CAPACITY + type: CAPACITY_MB default: 0M min_value: 0M max_value: NULL diff --git a/plugins/oceanbase/3.1.0/requirement.yaml b/plugins/oceanbase/3.1.0/requirement.yaml new file mode 100644 index 0000000..d74b386 --- /dev/null +++ b/plugins/oceanbase/3.1.0/requirement.yaml @@ -0,0 +1,2 @@ +oceanbase-ce-libs: + version: $version diff --git a/plugins/oceanbase/3.1.0/start.py b/plugins/oceanbase/3.1.0/start.py index cb6a26f..7b5da23 100644 --- a/plugins/oceanbase/3.1.0/start.py +++ b/plugins/oceanbase/3.1.0/start.py @@ -175,6 +175,7 @@ def start(plugin_context, *args, **kwargs): not_cmd_opt = [ 'home_path', 'obconfig_url', 'root_password', 'proxyro_password', 'redo_dir', 'clog_dir', 'ilog_dir', 'slog_dir', '$_zone_idc', + 'ocp_monitor_tenant', 'ocp_monitor_username', 'ocp_monitor_password', 'ocp_monitor_db', 'ocp_meta_tenant', 'ocp_meta_username', 'ocp_meta_password', 'ocp_meta_db', 'ocp_agent_monitor_password', 'ocp_root_password' ] get_value = lambda key: "'%s'" % server_config[key] if isinstance(server_config[key], str) else server_config[key] diff --git a/plugins/oceanbase/3.1.0/start_check.py b/plugins/oceanbase/3.1.0/start_check.py index 6793cbe..b4bd2e5 100644 --- a/plugins/oceanbase/3.1.0/start_check.py +++ b/plugins/oceanbase/3.1.0/start_check.py @@ -25,6 +25,7 @@ import time import _errno as err +from _types import Capacity stdio = None @@ -41,26 +42,6 @@ def get_port_socket_inode(client, port): return res.stdout.strip().split('\n') -def parse_size(size): - _bytes = 0 - if not isinstance(size, str) or size.isdigit(): - _bytes = int(size) - else: - units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} - match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) - _bytes = int(match.group(1)) * units[match.group(2)] - return _bytes - - -def format_size(size): - units = ['B', 'K', 'M', 'G', 'T', 'P'] - idx = 0 - while idx < 5 and size >= 1024: - size /= 1024.0 - idx += 1 - return '%.1f%s' % (size, units[idx]) - - def time_delta(client): time_st = time.time() * 1000 time_srv = int(client.execute_command('date +%s%N').stdout) / 1000000 @@ -346,10 +327,10 @@ def system_memory_check(): memory_limit = 0 percentage = 0 if server_config.get('memory_limit'): - memory_limit = parse_size(server_config['memory_limit']) + memory_limit = Capacity(server_config['memory_limit']).btyes memory['num'] += memory_limit elif 'memory_limit_percentage' in server_config: - percentage = int(parse_size(server_config['memory_limit_percentage'])) + percentage = server_config['memory_limit_percentage'] memory['percentage'] += percentage else: percentage = 80 @@ -357,7 +338,7 @@ def system_memory_check(): memory['servers'][server] = { 'num': memory_limit, 'percentage': percentage, - 'system_memory': parse_size(server_config.get('system_memory', 0)) + 'system_memory': Capacity(server_config.get('system_memory', 0)).btyes } data_path = server_config['data_dir'] if server_config.get('data_dir') else os.path.join(server_config['home_path'], 'store') @@ -528,14 +509,14 @@ def system_memory_check(): for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): if k in memory_key_map: key = memory_key_map[k] - server_memory_stats[key] = parse_size(str(v)) + server_memory_stats[key] = Capacity(str(v)).btyes server_memory_stat = servers_memory[ip] min_start_need = server_num * START_NEED_MEMORY - total_use = server_memory_stat['percentage'] * server_memory_stats['total'] / 100 + server_memory_stat['num'] + total_use = int(server_memory_stat['percentage'] * server_memory_stats['total'] / 100 + server_memory_stat['num']) if min_start_need > server_memory_stats['available']: for server in ip_servers: - error('mem', err.EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE.format(ip=ip, available=format_size(server_memory_stats['available']), need=format_size(min_start_need)), [err.SUG_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE.format(ip=ip)]) + error('mem', err.EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE.format(ip=ip, available=str(Capacity(server_memory_stats['available'])), need=str(Capacity(min_start_need))), [err.SUG_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE.format(ip=ip)]) elif total_use > server_memory_stats['free'] + server_memory_stats['buffers'] + server_memory_stats['cached']: for server in ip_servers: server_generate_config = generate_configs.get(server, {}) @@ -545,11 +526,11 @@ def system_memory_check(): if key in global_generate_config or key in server_generate_config: suggest.auto_fix = False break - error('mem', err.EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=format_size(server_memory_stats['free']), cached=format_size(server_memory_stats['buffers'] + server_memory_stats['cached']), need=format_size(total_use)), [suggest]) + error('mem', err.EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=str(Capacity(server_memory_stats['free'])), cached=str(Capacity(server_memory_stats['buffers'] + server_memory_stats['cached'])), need=str(Capacity(total_use))), [suggest]) elif total_use > server_memory_stats['free']: system_memory_check() for server in ip_servers: - alert('mem', err.EC_OBSERVER_NOT_ENOUGH_MEMORY.format(ip=ip, free=format_size(server_memory_stats['free']), need=format_size(total_use)), [err.SUG_OBSERVER_REDUCE_MEM.format()]) + alert('mem', err.EC_OBSERVER_NOT_ENOUGH_MEMORY.format(ip=ip, free=str(Capacity(server_memory_stats['free'])), need=str(Capacity(total_use))), [err.SUG_OBSERVER_REDUCE_MEM.format()]) else: system_memory_check() @@ -567,7 +548,7 @@ def system_memory_check(): if isinstance(need, int): disk[kp]['need'] += disk[kp]['total'] * need / 100 else: - disk[kp]['need'] += parse_size(need) + disk[kp]['need'] += Capacity(need).btyes for path in servers_clog_mount[ip]: kp = '/' @@ -596,7 +577,7 @@ def system_memory_check(): if key in global_generate_config or key in server_generate_config: suggest.auto_fix = False break - critical('disk', err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=p, avail=format_size(avail), need=format_size(need)), [suggest] + suggests) + critical('disk', err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=p, avail=str(Capacity(avail)), need=str(Capacity(need))), [suggest] + suggests) elif 1.0 * (total - avail + need) / total > disk[p]['threshold']: # msg = '(%s) %s not enough disk space for clog. Use `redo_dir` to set other disk for clog' % (ip, p) # msg += ', or reduce the value of `datafile_size`' if need > 0 else '.' diff --git a/plugins/oceanbase/3.1.0/upgrade_route.py b/plugins/oceanbase/3.1.0/upgrade_route.py index 023645c..00715cd 100644 --- a/plugins/oceanbase/3.1.0/upgrade_route.py +++ b/plugins/oceanbase/3.1.0/upgrade_route.py @@ -35,7 +35,7 @@ def __init__(self, version, deprecated = False, require_from_binary = False): version = version.split('_') release = version[1] if len(version) > 1 else self.RELEASE_NULL version = version[0] - super(VersionNode, self).__init__('', version, release, '', md5) + super(VersionNode, self).__init__('', version, release, '', md5, 0) self.next = [] self.can_be_upgraded_to = [] self.can_be_upgraded_to = [] diff --git a/plugins/oceanbase/4.0.0.0/bootstrap.py b/plugins/oceanbase/4.0.0.0/bootstrap.py index fdf8eff..8155e18 100644 --- a/plugins/oceanbase/4.0.0.0/bootstrap.py +++ b/plugins/oceanbase/4.0.0.0/bootstrap.py @@ -119,25 +119,39 @@ def is_bootstrap(): stdio.verbose(sql) raise_cursor.execute(sql, [value]) - has_ocp = 'ocp-express' in added_components and 'ocp-express' in be_depend - if any([key in global_conf for key in ["ocp_meta_tenant", "ocp_meta_db", "ocp_meta_username", "ocp_meta_password"]]): - has_ocp = True - if has_ocp: - global_conf_with_default = deepcopy(cluster_config.get_global_conf_with_default()) - original_global_conf = cluster_config.get_original_global_conf() - ocp_meta_tenant_prefix = 'ocp_meta_tenant_' + # check the requirements of ocp meta and monitor tenant + global_conf_with_default = deepcopy(cluster_config.get_global_conf_with_default()) + original_global_conf = cluster_config.get_original_global_conf() + + ocp_tenants = [] + tenants_componets_map = { + "meta": ["ocp-express", "ocp-server", "ocp-server-ce"], + "monitor": ["ocp-server", "ocp-server-ce"], + } + ocp_tenant_keys = ['tenant', 'db', 'username', 'password'] + for tenant in tenants_componets_map: + components = tenants_componets_map[tenant] + prefix = "ocp_%s_" % tenant + if not any([component in added_components and component in be_depend for component in components]): + for key in ocp_tenant_keys: + config_key = prefix + key + if config_key in global_conf: + break + else: + continue + # set create tenant variable for key in global_conf_with_default: - if key.startswith(ocp_meta_tenant_prefix) and original_global_conf.get(key, None): - global_conf_with_default['ocp_meta_tenant'][key.replace(ocp_meta_tenant_prefix, '', 1)] = global_conf_with_default[key] - tenant_info = global_conf_with_default["ocp_meta_tenant"] + if key.startswith(prefix) and original_global_conf.get(key, None): + global_conf_with_default[prefix + 'tenant'][key.replace(prefix, '', 1)] = global_conf_with_default[key] + tenant_info = global_conf_with_default[prefix + "tenant"] tenant_info["variables"] = "ob_tcp_invited_nodes='%'" tenant_info["create_if_not_exists"] = True - tenant_info["database"] = global_conf_with_default["ocp_meta_db"] - tenant_info["db_username"] = global_conf_with_default["ocp_meta_username"] - tenant_info["db_password"] = global_conf_with_default.get("ocp_meta_password", "") - tenant_info["ocp_root_password"] = global_conf_with_default.get("ocp_root_password", "") - tenant_options = Values(tenant_info) - plugin_context.set_variable("create_tenant_options", tenant_options) + tenant_info["database"] = global_conf_with_default[prefix + "db"] + tenant_info["db_username"] = global_conf_with_default[prefix + "username"] + tenant_info["db_password"] = global_conf_with_default.get(prefix + "password", "") + tenant_info["{0}_root_password".format(tenant_info['tenant_name'])] = global_conf_with_default.get(prefix + "password", "") + ocp_tenants.append(Values(tenant_info)) + plugin_context.set_variable("create_tenant_options", ocp_tenants) # wait for server online all_server_online = False diff --git a/plugins/oceanbase/4.0.0.0/create_tenant.py b/plugins/oceanbase/4.0.0.0/create_tenant.py index 0d22096..a443f6b 100644 --- a/plugins/oceanbase/4.0.0.0/create_tenant.py +++ b/plugins/oceanbase/4.0.0.0/create_tenant.py @@ -21,44 +21,16 @@ from __future__ import absolute_import, division, print_function -import re import time from collections import defaultdict from _errno import EC_OBSERVER_CAN_NOT_MIGRATE_IN +from _types import Capacity tenant_cursor_cache = defaultdict(dict) -def parse_size(size): - _bytes = 0 - if isinstance(size, str): - size = size.strip() - if not isinstance(size, str) or size.isdigit(): - _bytes = int(size) - else: - units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} - match = re.match(r'^(0|[1-9][0-9]*)\s*([B,K,M,G,T])$', size.upper()) - _bytes = int(match.group(1)) * units[match.group(2)] - return _bytes - - -def format_size(size, precision=1): - units = ['B', 'K', 'M', 'G', 'T', 'P'] - idx = 0 - if precision: - div = 1024.0 - format = '%.' + str(precision) + 'f%s' - else: - div = 1024 - format = '%d%s' - while idx < 5 and size >= 1024: - size /= 1024.0 - idx += 1 - return format % (size, units[idx]) - - -def exec_sql_in_tenant(sql, cursor, tenant, mode, user='', password='', print_exception=True, retries=20): +def exec_sql_in_tenant(sql, cursor, tenant, mode, user='', password='', print_exception=True, retries=20, args=[]): if not user: user = 'SYS' if mode == 'oracle' else 'root' # find tenant ip, port @@ -79,11 +51,11 @@ def exec_sql_in_tenant(sql, cursor, tenant, mode, user='', password='', print_ex break if not tenant_cursor and retries: time.sleep(1) - return exec_sql_in_tenant(sql, cursor, tenant, mode, user, password, print_exception=print_exception, retries=retries-1) - return tenant_cursor.execute(sql, raise_exception=False, exc_level='verbose') if tenant_cursor else False + return exec_sql_in_tenant(sql, cursor, tenant, mode, user, password, print_exception=print_exception, retries=retries-1, args=args) + return tenant_cursor.execute(sql, args=args, raise_exception=False, exc_level='verbose') if tenant_cursor else False -def create_tenant(plugin_context, create_tenant_options=None, cursor=None, *args, **kwargs): +def create_tenant(plugin_context, create_tenant_options=[], cursor=None, *args, **kwargs): def get_option(key, default=''): value = getattr(options, key, default) if not value: @@ -95,249 +67,253 @@ def get_parsed_option(key, default=''): if value is None: return value try: - parsed_value = parse_size(value) + parsed_value = Capacity(value).btyes except: stdio.exception("") raise Exception("Invalid option {}: {}".format(key, value)) return parsed_value - def error(*arg, **kwargs): - stdio.error(*arg, **kwargs) + def error(msg='', *arg, **kwargs): + msg and stdio.error(msg, *arg, **kwargs) stdio.stop_loading('fail') - + cluster_config = plugin_context.cluster_config stdio = plugin_context.stdio - options = create_tenant_options if create_tenant_options else plugin_context.options - create_if_not_exists = get_option('create_if_not_exists', False) - cursor = plugin_context.get_return('connect').get_return('cursor') if not cursor else cursor - global tenant_cursor - tenant_cursor = None - - mode = get_option('mode', 'mysql').lower() - if not mode in ['mysql', 'oracle']: - error('No such tenant mode: %s.\n--mode must be `mysql` or `oracle`' % mode) - return - - # options not support - deserted_options = ('max_session_num', 'max_memory', 'min_memory', 'max_disk_size') - for opt in deserted_options: - if get_option(opt, None) is not None: - stdio.warn("option {} is no longer supported".format(opt)) - - name = get_option('tenant_name', 'test') - unit_name = '%s_unit' % name - sql = 'select * from oceanbase.DBA_OB_UNIT_CONFIGS order by name' - res = cursor.fetchall(sql) - if res is False: - return - for row in res: - if str(row['NAME']) == unit_name: - unit_name += '1' - - pool_name = '%s_pool' % name - - sql = "select * from oceanbase.DBA_OB_TENANTS where TENANT_NAME = %s" - tenant_exists = False - res = cursor.fetchone(sql, [name]) - if res: - if create_if_not_exists: - return plugin_context.return_true() - else: - error('Tenant %s already exists' % name) - return - elif res is False: - return - if not tenant_exists: - stdio.start_loading('Create tenant %s' % name) - zone_list = get_option('zone_list', set()) - zone_obs_num = {} - sql = "select zone, count(*) num from oceanbase.__all_server where status = 'active' group by zone" + multi_options = create_tenant_options if create_tenant_options else [plugin_context.options] + for options in multi_options: + create_if_not_exists = get_option('create_if_not_exists', False) + cursor = plugin_context.get_return('connect').get_return('cursor') if not cursor else cursor + global tenant_cursor + tenant_cursor = None + + mode = get_option('mode', 'mysql').lower() + if not mode in ['mysql', 'oracle']: + error('No such tenant mode: %s.\n--mode must be `mysql` or `oracle`' % mode) + return + + # options not support + deserted_options = ('max_session_num', 'max_memory', 'min_memory', 'max_disk_size') + for opt in deserted_options: + if get_option(opt, None) is not None: + stdio.warn("option {} is no longer supported".format(opt)) + + name = get_option('tenant_name', 'test') + unit_name = '%s_unit' % name + sql = 'select * from oceanbase.DBA_OB_UNIT_CONFIGS order by name' res = cursor.fetchall(sql) if res is False: - error() return - for row in res: - zone_obs_num[str(row['zone'])] = row['num'] - - if not zone_list: - zone_list = zone_obs_num.keys() - if isinstance(zone_list, str): - zones = zone_list.replace(';', ',').split(',') - else: - zones = zone_list - zone_list = "('%s')" % "','".join(zones) - - min_unit_num = min(zone_obs_num.items(), key=lambda x: x[1])[1] - unit_num = get_option('unit_num', min_unit_num) - if unit_num > min_unit_num: - return error('resource pool unit num is bigger than zone server count') - - sql = "select count(*) num from oceanbase.__all_server where status = 'active' and start_service_time > 0" - count = 30 - while count: - num = cursor.fetchone(sql) - if num is False: - error() + if str(row['NAME']) == unit_name: + unit_name += '1' + + pool_name = '%s_pool' % name + + sql = "select * from oceanbase.DBA_OB_TENANTS where TENANT_NAME = %s" + tenant_exists = False + res = cursor.fetchone(sql, [name]) + if res: + if create_if_not_exists: + continue + else: + error('Tenant %s already exists' % name) return - num = num['num'] - if num >= unit_num: - break - count -= 1 - time.sleep(1) - if count == 0: - stdio.error(EC_OBSERVER_CAN_NOT_MIGRATE_IN) + elif res is False: return + if not tenant_exists: + stdio.start_loading('Create tenant %s' % name) + zone_list = get_option('zone_list', set()) + zone_obs_num = {} + sql = "select zone, count(*) num from oceanbase.__all_server where status = 'active' group by zone" + res = cursor.fetchall(sql) + if res is False: + error() + return - sql = "SELECT * FROM oceanbase.GV$OB_SERVERS where zone in %s" % zone_list - servers_stats = cursor.fetchall(sql) - if servers_stats is False: - error() - return - cpu_available = servers_stats[0]['CPU_CAPACITY_MAX'] - servers_stats[0]['CPU_ASSIGNED_MAX'] - mem_available = servers_stats[0]['MEM_CAPACITY'] - servers_stats[0]['MEM_ASSIGNED'] - disk_available = servers_stats[0]['DATA_DISK_CAPACITY'] - servers_stats[0]['DATA_DISK_IN_USE'] - log_disk_available = servers_stats[0]['LOG_DISK_CAPACITY'] - servers_stats[0]['LOG_DISK_ASSIGNED'] - for servers_stat in servers_stats[1:]: - cpu_available = min(servers_stat['CPU_CAPACITY_MAX'] - servers_stat['CPU_ASSIGNED_MAX'], cpu_available) - mem_available = min(servers_stat['MEM_CAPACITY'] - servers_stat['MEM_ASSIGNED'], mem_available) - disk_available = min(servers_stat['DATA_DISK_CAPACITY'] - servers_stat['DATA_DISK_IN_USE'], disk_available) - log_disk_available = min(servers_stat['LOG_DISK_CAPACITY'] - servers_stat['LOG_DISK_ASSIGNED'], log_disk_available) - - MIN_CPU = 1 - MIN_MEMORY = 1073741824 - MIN_LOG_DISK_SIZE = 2147483648 - MIN_IOPS = 1024 - - if cpu_available < MIN_CPU: - return error('%s: resource not enough: cpu count less than %s' % (zone_list, MIN_CPU)) - if mem_available < MIN_MEMORY: - return error('%s: resource not enough: memory less than %s' % (zone_list, format_size(MIN_MEMORY))) - if log_disk_available < MIN_LOG_DISK_SIZE: - return error('%s: resource not enough: log disk size less than %s' % (zone_list, format_size(MIN_MEMORY))) - - # cpu options - max_cpu = get_option('max_cpu', cpu_available) - min_cpu = get_option('min_cpu', max_cpu) - if cpu_available < max_cpu: - return error('resource not enough: cpu (Avail: %s, Need: %s)' % (cpu_available, max_cpu)) - if max_cpu < min_cpu: - return error('min_cpu must less then max_cpu') - - # memory options - memory_size = get_parsed_option('memory_size', None) - log_disk_size = get_parsed_option('log_disk_size', None) - - if memory_size is None: - memory_size = mem_available - if log_disk_size is None: - log_disk_size = log_disk_available - - if mem_available < memory_size: - return error('resource not enough: memory (Avail: %s, Need: %s)' % (format_size(mem_available), format_size(memory_size))) - - # log disk size options - if log_disk_size is not None and log_disk_available < log_disk_size: - return error('resource not enough: log disk space (Avail: %s, Need: %s)' % (format_size(disk_available), format_size(log_disk_size))) - - # iops options - max_iops = get_option('max_iops', None) - min_iops = get_option('min_iops', None) - iops_weight = get_option('iops_weight', None) - if max_iops is not None and max_iops < MIN_IOPS: - return error('max_iops must greater than %d' % MIN_IOPS) - if max_iops is not None and min_iops is not None and max_iops < min_iops: - return error('min_iops must less then max_iops') - - zone_num = len(zones) - charset = get_option('charset', '') - collate = get_option('collate', '') - replica_num = get_option('replica_num', zone_num) - logonly_replica_num = get_option('logonly_replica_num', 0) - tablegroup = get_option('tablegroup', '') - primary_zone = get_option('primary_zone', 'RANDOM') - locality = get_option('locality', '') - variables = get_option('variables', '') - - if replica_num == 0: - replica_num = zone_num - elif replica_num > zone_num: - return error('replica_num cannot be greater than zone num (%s)' % zone_num) - if not primary_zone: - primary_zone = 'RANDOM' - if logonly_replica_num > replica_num: - return error('logonly_replica_num cannot be greater than replica_num (%s)' % replica_num) - - # create resource unit - sql = "create resource unit %s max_cpu %.1f, memory_size %d" % (unit_name, max_cpu, memory_size) - if min_cpu is not None: - sql += ', min_cpu %.1f' % min_cpu - if max_iops is not None: - sql += ', max_iops %d' % max_iops - if min_iops is not None: - sql += ', min_iops %d' % min_iops - if iops_weight is not None: - sql += ', iops_weight %d' % iops_weight - if log_disk_size is not None: - sql += ', log_disk_size %d' % log_disk_size - - res = cursor.execute(sql) - if res is False: - error() - return + for row in res: + zone_obs_num[str(row['zone'])] = row['num'] + + if not zone_list: + zone_list = zone_obs_num.keys() + if isinstance(zone_list, str): + zones = zone_list.replace(';', ',').split(',') + else: + zones = zone_list + zone_list = "('%s')" % "','".join(zones) + + min_unit_num = min(zone_obs_num.items(), key=lambda x: x[1])[1] + unit_num = get_option('unit_num', min_unit_num) + if unit_num > min_unit_num: + return error('resource pool unit num is bigger than zone server count') + + sql = "select count(*) num from oceanbase.__all_server where status = 'active' and start_service_time > 0" + count = 30 + while count: + num = cursor.fetchone(sql) + if num is False: + error() + return + num = num['num'] + if num >= unit_num: + break + count -= 1 + time.sleep(1) + if count == 0: + stdio.error(EC_OBSERVER_CAN_NOT_MIGRATE_IN) + return - # create resource pool - sql = "create resource pool %s unit='%s', unit_num=%d, zone_list=%s" % (pool_name, unit_name, unit_num, zone_list) - res = cursor.execute(sql) - if res is False: - error() - return + sql = "SELECT * FROM oceanbase.GV$OB_SERVERS where zone in %s" % zone_list + servers_stats = cursor.fetchall(sql) + if servers_stats is False: + error() + return + cpu_available = servers_stats[0]['CPU_CAPACITY_MAX'] - servers_stats[0]['CPU_ASSIGNED_MAX'] + mem_available = servers_stats[0]['MEM_CAPACITY'] - servers_stats[0]['MEM_ASSIGNED'] + disk_available = servers_stats[0]['DATA_DISK_CAPACITY'] - servers_stats[0]['DATA_DISK_IN_USE'] + log_disk_available = servers_stats[0]['LOG_DISK_CAPACITY'] - servers_stats[0]['LOG_DISK_ASSIGNED'] + for servers_stat in servers_stats[1:]: + cpu_available = min(servers_stat['CPU_CAPACITY_MAX'] - servers_stat['CPU_ASSIGNED_MAX'], cpu_available) + mem_available = min(servers_stat['MEM_CAPACITY'] - servers_stat['MEM_ASSIGNED'], mem_available) + disk_available = min(servers_stat['DATA_DISK_CAPACITY'] - servers_stat['DATA_DISK_IN_USE'], disk_available) + log_disk_available = min(servers_stat['LOG_DISK_CAPACITY'] - servers_stat['LOG_DISK_ASSIGNED'], log_disk_available) + + MIN_CPU = 1 + MIN_MEMORY = 1073741824 + MIN_LOG_DISK_SIZE = 2147483648 + MIN_IOPS = 1024 + + if cpu_available < MIN_CPU: + return error('%s: resource not enough: cpu count less than %s' % (zone_list, MIN_CPU)) + if mem_available < MIN_MEMORY: + return error('%s: resource not enough: memory less than %s' % (zone_list, Capacity(MIN_MEMORY))) + if log_disk_available < MIN_LOG_DISK_SIZE: + return error('%s: resource not enough: log disk size less than %s' % (zone_list, Capacity(MIN_MEMORY))) + + # cpu options + max_cpu = get_option('max_cpu', cpu_available) + min_cpu = get_option('min_cpu', max_cpu) + if cpu_available < max_cpu: + return error('resource not enough: cpu (Avail: %s, Need: %s)' % (cpu_available, max_cpu)) + if max_cpu < min_cpu: + return error('min_cpu must less then max_cpu') + + # memory options + memory_size = get_parsed_option('memory_size', None) + log_disk_size = get_parsed_option('log_disk_size', None) + + if memory_size is None: + memory_size = mem_available + if log_disk_size is None: + log_disk_size = log_disk_available + + if mem_available < memory_size: + return error('resource not enough: memory (Avail: %s, Need: %s)' % (Capacity(mem_available), Capacity(memory_size))) + + # log disk size options + if log_disk_size is not None and log_disk_available < log_disk_size: + return error('resource not enough: log disk space (Avail: %s, Need: %s)' % (Capacity(disk_available), Capacity(log_disk_size))) + + # iops options + max_iops = get_option('max_iops', None) + min_iops = get_option('min_iops', None) + iops_weight = get_option('iops_weight', None) + if max_iops is not None and max_iops < MIN_IOPS: + return error('max_iops must greater than %d' % MIN_IOPS) + if max_iops is not None and min_iops is not None and max_iops < min_iops: + return error('min_iops must less then max_iops') + + zone_num = len(zones) + charset = get_option('charset', '') + collate = get_option('collate', '') + replica_num = get_option('replica_num', zone_num) + logonly_replica_num = get_option('logonly_replica_num', 0) + tablegroup = get_option('tablegroup', '') + primary_zone = get_option('primary_zone', 'RANDOM') + locality = get_option('locality', '') + variables = get_option('variables', "ob_tcp_invited_nodes='%'") + + if replica_num == 0: + replica_num = zone_num + elif replica_num > zone_num: + return error('replica_num cannot be greater than zone num (%s)' % zone_num) + if not primary_zone: + primary_zone = 'RANDOM' + if logonly_replica_num > replica_num: + return error('logonly_replica_num cannot be greater than replica_num (%s)' % replica_num) + + # create resource unit + sql = "create resource unit %s max_cpu %.1f, memory_size %d" % (unit_name, max_cpu, memory_size) + if min_cpu is not None: + sql += ', min_cpu %.1f' % min_cpu + if max_iops is not None: + sql += ', max_iops %d' % max_iops + if min_iops is not None: + sql += ', min_iops %d' % min_iops + if iops_weight is not None: + sql += ', iops_weight %d' % iops_weight + if log_disk_size is not None: + sql += ', log_disk_size %d' % log_disk_size + + res = cursor.execute(sql) + if res is False: + error() + return - # create tenant - sql = "create tenant %s replica_num=%d,zone_list=%s,primary_zone='%s',resource_pool_list=('%s')" - sql = sql % (name, replica_num, zone_list, primary_zone, pool_name) - if charset: - sql += ", charset = '%s'" % charset - if collate: - sql += ", collate = '%s'" % collate - if logonly_replica_num: - sql += ", logonly_replica_num = %d" % logonly_replica_num - if tablegroup: - sql += ", default tablegroup ='%s'" % tablegroup - if locality: - sql += ", locality = '%s'" % locality - - set_mode = "ob_compatibility_mode = '%s'" % mode - if variables: - sql += "set %s, %s" % (variables, set_mode) - else: - sql += "set %s" % set_mode - res = cursor.execute(sql) - if res is False: - error() - return - stdio.stop_loading('succeed') - database = get_option('database') - if database: - sql = 'create database {}'.format(database) - if not exec_sql_in_tenant(sql=sql, cursor=cursor, tenant=name, mode=mode) and not create_if_not_exists: - stdio.error('failed to create database {}'.format(database)) - return + # create resource pool + sql = "create resource pool %s unit='%s', unit_num=%d, zone_list=%s" % (pool_name, unit_name, unit_num, zone_list) + res = cursor.execute(sql) + if res is False: + error() + return - db_username = get_option('db_username') - db_password = get_option('db_password', '') - if db_username: - if mode == "mysql": - sql = """create user if not exists '{username}' IDENTIFIED BY '{password}'; - grant all on *.* to '{username}' WITH GRANT OPTION;""".format( - username=db_username, password=db_password) - else: - # todo: fix oracle user create - sql = """create {username} IDENTIFIED BY {password}; -grant all on *.* to {username} WITH GRANT OPTION; -grant dba to {username}; -grant all privileges to {username};""" - if not exec_sql_in_tenant(sql=sql, cursor=cursor, tenant=name, mode=mode): - stdio.error('failed to create user {}'.format(db_username)) - return - return plugin_context.return_true() \ No newline at end of file + # create tenant + sql = "create tenant %s replica_num=%d,zone_list=%s,primary_zone='%s',resource_pool_list=('%s')" + sql = sql % (name, replica_num, zone_list, primary_zone, pool_name) + if charset: + sql += ", charset = '%s'" % charset + if collate: + sql += ", collate = '%s'" % collate + if logonly_replica_num: + sql += ", logonly_replica_num = %d" % logonly_replica_num + if tablegroup: + sql += ", default tablegroup ='%s'" % tablegroup + if locality: + sql += ", locality = '%s'" % locality + + set_mode = "ob_compatibility_mode = '%s'" % mode + if variables: + sql += "set %s, %s" % (variables, set_mode) + else: + sql += "set %s" % set_mode + res = cursor.execute(sql) + if res is False: + error() + return + stdio.stop_loading('succeed') + root_password = get_option(name+'_root_password', "") + if root_password: + sql = "alter user root IDENTIFIED BY %s" + stdio.verbose(sql) + if not exec_sql_in_tenant(sql=sql, cursor=cursor, tenant=name, mode=mode, args=[root_password]) and not create_if_not_exists: + stdio.error('failed to set root@{} password {}'.format(name)) + return + database = get_option('database') + if database: + sql = 'create database {}'.format(database) + if not exec_sql_in_tenant(sql=sql, cursor=cursor, tenant=name, mode=mode) and not create_if_not_exists: + stdio.error('failed to create database {}'.format(database)) + return + + db_username = get_option('db_username') + db_password = get_option('db_password', '') + if db_username: + if mode == "mysql": + sql = """create user if not exists '{username}' IDENTIFIED BY %s; + grant all on *.* to '{username}' WITH GRANT OPTION;""".format( + username=db_username) + else: + error("Create user in oracle tenant is not supported") + if not exec_sql_in_tenant(sql=sql, cursor=cursor, tenant=name, mode=mode, args=[db_password]): + stdio.error('failed to create user {}'.format(db_username)) + return + return plugin_context.return_true() diff --git a/plugins/oceanbase/4.0.0.0/file_map.yaml b/plugins/oceanbase/4.0.0.0/file_map.yaml index bfe5236..eb867ff 100644 --- a/plugins/oceanbase/4.0.0.0/file_map.yaml +++ b/plugins/oceanbase/4.0.0.0/file_map.yaml @@ -2,6 +2,7 @@ target_path: bin/observer type: bin mode: 755 + require: oceanbase-ce-libs - src_path: ./home/admin/oceanbase/bin target_path: bin type: dir diff --git a/plugins/oceanbase/4.0.0.0/generate_config.py b/plugins/oceanbase/4.0.0.0/generate_config.py index 67c0893..431859c 100644 --- a/plugins/oceanbase/4.0.0.0/generate_config.py +++ b/plugins/oceanbase/4.0.0.0/generate_config.py @@ -27,38 +27,10 @@ from _errno import EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE, EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED, EC_OBSERVER_GET_MEMINFO_FAIL import _errno as err +from _types import Capacity from tool import ConfigUtil -def parse_size(size): - _bytes = 0 - if not isinstance(size, str) or size.isdigit(): - _bytes = int(size) - else: - units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} - match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) - _bytes = int(match.group(1)) * units[match.group(2)] - return _bytes - - -def format_size(size, precision=1): - units = ['B', 'K', 'M', 'G'] - units_num = len(units) - 1 - idx = 0 - if precision: - div = 1024.0 - format = '%.' + str(precision) + 'f%s' - limit = 1024 - else: - div = 1024 - limit = 1024 - format = '%d%s' - while idx < units_num and size >= limit: - size /= div - idx += 1 - return format % (size, units[idx]) - - def get_system_memory(memory_limit, min_pool_memory, generate_config_mini): if generate_config_mini and memory_limit <= 6 << 30: system_memory = 1 << 30 @@ -177,7 +149,7 @@ def summit_config(): auto_set_min_pool_memory = False system_memory = 0 if user_server_config.get('system_memory'): - system_memory = parse_size(user_server_config.get('system_memory')) + system_memory = Capacity(user_server_config.get('system_memory')).btyes if generate_config_mini and '__min_full_resource_pool_memory' not in user_server_config: auto_set_min_pool_memory = True min_pool_memory = server_config['__min_full_resource_pool_memory'] @@ -198,11 +170,11 @@ def summit_config(): for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): if k in memory_key_map: key = memory_key_map[k] - server_memory_stats[key] = parse_size(str(v)) + server_memory_stats[key] = Capacity(str(v)).btyes if user_server_config.get('memory_limit_percentage'): if ip in ip_server_memory_info: - total_memory = parse_size(ip_server_memory_info[ip]['total']) + total_memory = Capacity(ip_server_memory_info[ip]['total']).btyes memory_limit = int(total_memory * user_server_config.get('memory_limit_percentage') / 100) elif generate_check: stdio.error(EC_OBSERVER_GET_MEMINFO_FAIL.format(server=server)) @@ -213,7 +185,7 @@ def summit_config(): elif not server_config.get('memory_limit'): if generate_config_mini: memory_limit = MINI_MEMORY_SIZE - update_server_conf(server, 'memory_limit', format_size(memory_limit, 0)) + update_server_conf(server, 'memory_limit', str(Capacity(memory_limit, 0))) update_server_conf(server, 'production_mode', False) if auto_set_min_pool_memory: min_pool_memory = 1073741824 @@ -223,16 +195,16 @@ def summit_config(): server_memory_stats = ip_server_memory_info[ip] if generate_check: if server_memory_stats['available'] < START_NEED_MEMORY: - stdio.error(EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE.format(ip=ip, available=format_size(server_memory_stats['available']), need=format_size(START_NEED_MEMORY))) + stdio.error(EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE.format(ip=ip, available=str(Capacity(server_memory_stats['available'])), need=str(Capacity(START_NEED_MEMORY)))) success = False continue if server_memory_stats['free'] + server_memory_stats['buffers'] + server_memory_stats['cached'] < MIN_MEMORY: - stdio.error(EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=format_size(server_memory_stats['free']), cached=format_size(server_memory_stats['buffers'] + server_memory_stats['cached']), need=format_size(MIN_MEMORY))) + stdio.error(EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=str(Capacity(server_memory_stats['free'])), cached=str(Capacity(server_memory_stats['buffers'] + server_memory_stats['cached'])), need=str(Capacity(MIN_MEMORY)))) success = False continue - memory_limit = max(MIN_MEMORY, int(server_memory_stats['available'] * 0.9)) - update_server_conf(server, 'memory_limit', format_size(memory_limit, 0)) + memory_limit = max(MIN_MEMORY, int(int(server_memory_stats['available'] * 0.9))) + update_server_conf(server, 'memory_limit', str(Capacity(memory_limit, 0))) auto_set_memory = True elif generate_check: stdio.error(EC_OBSERVER_GET_MEMINFO_FAIL.format(server=server)) @@ -241,12 +213,12 @@ def summit_config(): else: memory_limit = MIN_MEMORY else: - memory_limit = parse_size(server_config.get('memory_limit')) + memory_limit = Capacity(server_config.get('memory_limit')).btyes if system_memory == 0: auto_set_system_memory = True system_memory = get_system_memory(memory_limit, min_pool_memory, generate_config_mini) - update_server_conf(server, 'system_memory', format_size(system_memory, 0)) + update_server_conf(server, 'system_memory', str(Capacity(system_memory, 0))) # cpu if not server_config.get('cpu_count'): @@ -262,8 +234,8 @@ def summit_config(): stdio.warn('(%s): automatically adjust the cpu_count %s' % (server, MIN_CPU_COUNT)) # disk - datafile_size = parse_size(server_config.get('datafile_size', 0)) - log_disk_size = parse_size(server_config.get('log_disk_size', 0)) + datafile_size = Capacity(server_config.get('datafile_size', 0)).btyes + log_disk_size = Capacity(server_config.get('log_disk_size', 0)).btyes if not server_config.get('datafile_size') or not server_config.get('log_disk_size'): disk = {'/': 0} ret = client.execute_command('df --block-size=1024') @@ -315,17 +287,17 @@ def summit_config(): datafile_size = data_dir_disk['total'] * datafile_disk_percentage / 100 elif generate_config_mini: datafile_size = MINI_DATA_FILE_SIZE - update_server_conf(server, 'datafile_size', format_size(datafile_size, 0)) + update_server_conf(server, 'datafile_size', str(Capacity(datafile_size, 0))) else: auto_set_datafile_size = True if not log_disk_size: log_disk_percentage = int(user_server_config.get('log_disk_percentage', 0)) if log_disk_percentage: - log_disk_size = clog_dir_disk['total'] * log_disk_percentage / 100 + log_disk_size = int(clog_dir_disk['total'] * log_disk_percentage / 100) elif generate_config_mini: log_disk_size = MINI_LOG_DISK_SIZE - update_server_conf(server, 'log_disk_size', format_size(log_disk_size, 0)) + update_server_conf(server, 'log_disk_size', str(Capacity(log_disk_size, 0))) else: auto_set_log_disk_size = True @@ -342,19 +314,19 @@ def summit_config(): MIN_NEED += min_memory * 3 else: min_datafile_size = datafile_size - MIN_NEED += datafile_size + MIN_NEED += Capacity(datafile_size).btyes if auto_set_log_disk_size: min_log_disk_size = memory_limit * 3 MIN_NEED += min_memory * 3 else: min_log_disk_size = log_disk_size - MIN_NEED += log_disk_size - min_need = min_log_size + min_datafile_size + min_log_disk_size + MIN_NEED += Capacity(min_log_disk_size).btyes + min_need = min_log_size + Capacity(min_datafile_size).btyes + Capacity(min_log_disk_size).btyes disk_free = data_dir_disk['avail'] if MIN_NEED > disk_free: if generate_check: - stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=format_size(disk_free), need=format_size(MIN_NEED))) + stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=str(Capacity(disk_free)), need=str(Capacity(MIN_NEED)))) success = False continue else: @@ -364,13 +336,13 @@ def summit_config(): log_disk_size = MIN_MEMORY * 3 if auto_set_memory: memory_limit = MIN_MEMORY - update_server_conf(server, 'memory_limit', format_size(memory_limit, 0)) + update_server_conf(server, 'memory_limit', str(Capacity(memory_limit, 0))) if auto_set_system_memory: system_memory = get_system_memory(memory_limit, min_pool_memory, generate_config_mini) - update_server_conf(server, 'system_memory', format_size(system_memory, 0)) + update_server_conf(server, 'system_memory', str(Capacity(system_memory, 0))) elif min_need > disk_free: if generate_check and not auto_set_memory: - stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=format_size(disk_free), need=format_size(min_need))) + stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=str(Capacity(disk_free)), need=str(Capacity(min_need)))) success = False continue @@ -382,12 +354,12 @@ def summit_config(): if auto_set_log_disk_size is False: disk_free -= min_log_disk_size memory_factor -= 3 - memory_limit = format_size(disk_free / max(1, memory_factor), 0) + memory_limit = str(Capacity(disk_free / max(1, memory_factor), 0)) update_server_conf(server, 'memory_limit', memory_limit) - memory_limit = parse_size(memory_limit) + memory_limit = Capacity(memory_limit).btyes if auto_set_system_memory: system_memory = get_system_memory(memory_limit, min_pool_memory, generate_config_mini) - update_server_conf(server, 'system_memory', format_size(system_memory, 0)) + update_server_conf(server, 'system_memory', str(Capacity(system_memory, 0))) log_disk_size = memory_limit * 3 datafile_size = max(disk_free - log_disk_size, log_disk_size) else: @@ -395,9 +367,9 @@ def summit_config(): datafile_size = max(disk_free - log_size - SLOG_SIZE - log_disk_size, log_disk_size) if auto_set_datafile_size: - update_server_conf(server, 'datafile_size', format_size(datafile_size, 0)) + update_server_conf(server, 'datafile_size', str(Capacity(datafile_size, 0))) if auto_set_log_disk_size: - update_server_conf(server, 'log_disk_size', format_size(log_disk_size, 0)) + update_server_conf(server, 'log_disk_size', str(Capacity(log_disk_size, 0))) else: datafile_min_memory_limit = memory_limit if auto_set_datafile_size: @@ -407,15 +379,15 @@ def summit_config(): min_need = min_log_size + datafile_size + SLOG_SIZE if generate_check and min_need > disk_free: if not auto_set_memory: - stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=format_size(disk_free), need=format_size(min_need))) + stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=str(Capacity(disk_free)), need=str(Capacity(min_need)))) success = False continue datafile_min_memory_limit = (disk_free - log_size - SLOG_SIZE) / 3 if datafile_min_memory_limit < min_memory: - stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=format_size(disk_free), need=format_size(min_need))) + stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=str(Capacity(disk_free)), need=str(Capacity(min_need)))) success = False continue - datafile_min_memory_limit = parse_size(format_size(datafile_min_memory_limit, 0)) + datafile_min_memory_limit = Capacity(str(Capacity(datafile_min_memory_limit, 0))).btyes datafile_size = datafile_min_memory_limit * 3 log_disk_min_memory_limit = memory_limit @@ -426,27 +398,27 @@ def summit_config(): min_need = min_log_size + log_disk_size if generate_check and min_need > disk_free: if not auto_set_memory: - stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=format_size(disk_free), need=format_size(min_need))) + stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=str(Capacity(disk_free)), need=str(Capacity(min_need)))) success = False continue log_disk_min_memory_limit = (disk_free - log_size) / 3 if log_disk_min_memory_limit < min_memory: - stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=format_size(disk_free), need=format_size(min_need))) + stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=str(Capacity(disk_free)), need=str(Capacity(min_need)))) success = False continue - log_disk_min_memory_limit = parse_size(format_size(log_disk_min_memory_limit, 0)) + log_disk_min_memory_limit = Capacity(str(Capacity(log_disk_min_memory_limit, 0))).btyes log_disk_size = log_disk_min_memory_limit * 3 if auto_set_memory: - update_server_conf(server, 'memory_limit', format_size(memory_limit, 0)) + update_server_conf(server, 'memory_limit', str(Capacity(memory_limit, 0))) if auto_set_system_memory: system_memory = get_system_memory(memory_limit, min_pool_memory, generate_config_mini) update_server_conf(server, 'system_memory', system_memory) if auto_set_datafile_size: - update_server_conf(server, 'datafile_size', format_size(datafile_size, 0)) + update_server_conf(server, 'datafile_size', str(Capacity(datafile_size, 0))) if auto_set_log_disk_size: - update_server_conf(server, 'log_disk_size', format_size(log_disk_size, 0)) + update_server_conf(server, 'log_disk_size', str(Capacity(log_disk_size, 0))) if memory_limit < PRO_MEMORY_MIN: update_server_conf(server, 'production_mode', False) @@ -479,10 +451,10 @@ def summit_config(): server_info = servers_info.get(server) if not server_info: continue - memory_limit = server_info['memory_limit'] - system_memory = server_info['system_memory'] - log_disk_size = server_info['log_disk_size'] - min_pool_memory = server_info['min_pool_memory'] + memory_limit = Capacity(server_info['memory_limit']).btyes + system_memory = Capacity(server_info['system_memory']).btyes + log_disk_size = Capacity(server_info['log_disk_size']).btyes + min_pool_memory = Capacity(server_info['min_pool_memory']).btyes if not sys_log_disk_size: if not sys_memory_size: sys_memory_size = max(min_pool_memory, min(int((memory_limit - system_memory) * 0.25), 16 << 30)) @@ -493,7 +465,7 @@ def summit_config(): if expect_log_disk_size > max_available and generate_check: stdio.error(err.EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_LOG_DISK_AVAILABLE.format(avail=max_available, need=expect_log_disk_size)) success = False - cluster_config.update_global_conf('ocp_meta_tenant_log_disk_size', format_size(expect_log_disk_size, 0), save=False) + cluster_config.update_global_conf('ocp_meta_tenant_log_disk_size', str(Capacity(expect_log_disk_size, 0)), save=False) if generate_config_mini and 'ocp_meta_tenant_memory_size' not in global_config and 'memory_size' not in global_config.get('ocp_meta_tenant', {}): update_global_conf('ocp_meta_tenant_memory_size', '1536M') @@ -512,12 +484,12 @@ def summit_config(): if key in generate_configs.get(server, {}): value = generate_configs[server][key] servers.append(server) - values.append(parse_size(value) if is_capacity_key else value) + values.append(Capacity(value).btyes if is_capacity_key else value) if values: if len(values) != server_num and key in generate_global_config: continue value = min(values) - generate_global_config[key] = format_size(value, 0) if is_capacity_key else value + generate_global_config[key] = str(Capacity(value, 0)) if is_capacity_key else value for server in servers: del generate_configs[server][key] diff --git a/plugins/oceanbase/4.0.0.0/list_tenant.py b/plugins/oceanbase/4.0.0.0/list_tenant.py index cd03539..86ad9a6 100644 --- a/plugins/oceanbase/4.0.0.0/list_tenant.py +++ b/plugins/oceanbase/4.0.0.0/list_tenant.py @@ -20,35 +20,7 @@ from __future__ import absolute_import, division, print_function -import re - - -def parse_size(size): - _bytes = 0 - if isinstance(size, str): - size = size.strip() - if not isinstance(size, str) or size.isdigit(): - _bytes = int(size) - else: - units = {"B": 1, "K": 1 << 10, "M": 1 << 20, "G": 1 << 30, "T": 1 << 40} - match = re.match(r'^([1-9][0-9]*)\s*([B,K,M,G,T])$', size.upper()) - _bytes = int(match.group(1)) * units[match.group(2)] - return _bytes - - -def format_size(size, precision=1): - units = ['B', 'K', 'M', 'G', 'T', 'P'] - idx = 0 - if precision: - div = 1024.0 - format = '%.' + str(precision) + 'f%s' - else: - div = 1024 - format = '%d%s' - while idx < 5 and size >= 1024: - size /= 1024.0 - idx += 1 - return format % (size, units[idx]) +from _types import Capacity def list_tenant(plugin_context, cursor, *args, **kwargs): @@ -82,8 +54,8 @@ def list_tenant(plugin_context, cursor, *args, **kwargs): 'min_cpu', 'memory_size', 'max_iops', 'min_iops', 'log_disk_size', 'iops_weight'], lambda x: [x['TENANT_NAME'], x['TENANT_TYPE'], x['COMPATIBILITY_MODE'], x['PRIMARY_ZONE'], - x['MAX_CPU'], x['MIN_CPU'], format_size(x['MEMORY_SIZE']), x['MAX_IOPS'], x['MIN_IOPS'], - format_size(x['LOG_DISK_SIZE']), x['IOPS_WEIGHT']], + x['MAX_CPU'], x['MIN_CPU'], str(Capacity(x['MEMORY_SIZE'])), x['MAX_IOPS'], x['MIN_IOPS'], + str(Capacity(x['LOG_DISK_SIZE'])), x['IOPS_WEIGHT']], title='tenant') stdio.stop_loading('succeed') return plugin_context.return_true() diff --git a/plugins/oceanbase/4.0.0.0/parameter.yaml b/plugins/oceanbase/4.0.0.0/parameter.yaml index 680ef8c..f775d63 100644 --- a/plugins/oceanbase/4.0.0.0/parameter.yaml +++ b/plugins/oceanbase/4.0.0.0/parameter.yaml @@ -1905,3 +1905,61 @@ need_redeploy: true description_en: The password for obagent monitor user description_local: obagent 监控用户的密码 +- name: ocp_monitor_tenant + require: false + type: DICT + default: + tenant_name: ocp_monitor + max_cpu: 1 + memory_size: 2147483648 + need_redeploy: true + description_en: The tenant specifications for ocp monitor db + description_local: ocp 的监控数据库使用的租户定义 +- name: ocp_monitor_tenant_max_cpu + name_local: OCP 监控数据库租户的CPU数 + essential: true + require: false + type: INT + default: 1 + need_redeploy: true + description_en: The tenant cpu count for ocp monitor db + description_local: ocp 监控数据库使用的CPU数量 +- name: ocp_monitor_tenant_memory_size + name_local: OCP 监控数据库租户内存 + essential: true + require: false + type: CAPACITY_MB + default: 2G + need_redeploy: true + description_en: The tenant memory size for ocp monitor db + description_local: ocp 监控数据库使用的租户内存大小 +- name: ocp_monitor_tenant_log_disk_size + name_local: OCP 监控数据库租户日志磁盘大小 + essential: true + require: false + type: CAPACITY_MB + default: 6656M + need_redeploy: true + description_en: The tenant log disk size for ocp monitor db + description_local: ocp 监控数据库使用的租户日志磁盘大小 +- name: ocp_monitor_db + require: false + type: SAFE_STRING + default: ocp_monitor + need_redeploy: true + description_en: The database name for ocp monitor db + description_local: ocp 的监控数据库使用的数据库名 +- name: ocp_monitor_username + require: false + type: SAFE_STRING + default: monitor + need_redeploy: true + description_en: The user name for ocp meta db + description_local: ocp 的监控数据库使用的用户名 +- name: ocp_monitor_password + require: false + type: STRING + default: oceanbase + need_redeploy: true + description_en: The password for ocp monitor db + description_local: ocp 的监控数据库使用的密码 diff --git a/plugins/oceanbase/4.0.0.0/requirement.yaml b/plugins/oceanbase/4.0.0.0/requirement.yaml new file mode 100644 index 0000000..d74b386 --- /dev/null +++ b/plugins/oceanbase/4.0.0.0/requirement.yaml @@ -0,0 +1,2 @@ +oceanbase-ce-libs: + version: $version diff --git a/plugins/oceanbase/4.0.0.0/start.py b/plugins/oceanbase/4.0.0.0/start.py index d06ae7d..8b0492b 100644 --- a/plugins/oceanbase/4.0.0.0/start.py +++ b/plugins/oceanbase/4.0.0.0/start.py @@ -174,6 +174,7 @@ def start(plugin_context, *args, **kwargs): not_cmd_opt = [ 'home_path', 'obconfig_url', 'root_password', 'proxyro_password', 'redo_dir', 'clog_dir', 'ilog_dir', 'slog_dir', '$_zone_idc', 'production_mode', + 'ocp_monitor_tenant', 'ocp_monitor_username', 'ocp_monitor_password', 'ocp_monitor_db', 'ocp_meta_tenant', 'ocp_meta_username', 'ocp_meta_password', 'ocp_meta_db', 'ocp_agent_monitor_password', 'ocp_root_password' ] get_value = lambda key: "'%s'" % server_config[key] if isinstance(server_config[key], str) else server_config[key] diff --git a/plugins/oceanbase/4.0.0.0/start_check.py b/plugins/oceanbase/4.0.0.0/start_check.py index 6159663..5d21e95 100644 --- a/plugins/oceanbase/4.0.0.0/start_check.py +++ b/plugins/oceanbase/4.0.0.0/start_check.py @@ -27,6 +27,7 @@ from math import sqrt import _errno as err +from _types import Capacity stdio = None @@ -43,26 +44,6 @@ def get_port_socket_inode(client, port): return res.stdout.strip().split('\n') -def parse_size(size): - _bytes = 0 - if not isinstance(size, str) or size.isdigit(): - _bytes = int(size) - else: - units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} - match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) - _bytes = int(match.group(1)) * units[match.group(2)] - return _bytes - - -def format_size(size): - units = ['B', 'K', 'M', 'G', 'T', 'P'] - idx = 0 - while idx < 5 and size >= 1024: - size /= 1024.0 - idx += 1 - return '%.1f%s' % (size, units[idx]) - - def time_delta(client): time_st = time.time() * 1000 time_srv = int(client.execute_command('date +%s%N').stdout) / 1000000 @@ -383,12 +364,12 @@ def system_memory_check(): memory_limit = 0 percentage = 0 if server_config.get('memory_limit'): - memory_limit = parse_size(server_config['memory_limit']) + memory_limit = Capacity(server_config['memory_limit']).btyes if production_mode and memory_limit < PRO_MEMORY_MIN: - error('mem', err.EC_OBSERVER_PRODUCTION_MODE_LIMIT.format(server=server, key='memory_limit', limit=format_size(PRO_MEMORY_MIN)), [err.SUB_SET_NO_PRODUCTION_MODE.format()]) + error('mem', err.EC_OBSERVER_PRODUCTION_MODE_LIMIT.format(server=server, key='memory_limit', limit=str(Capacity(PRO_MEMORY_MIN))), [err.SUB_SET_NO_PRODUCTION_MODE.format()]) memory['num'] += memory_limit elif 'memory_limit_percentage' in server_config: - percentage = int(parse_size(server_config['memory_limit_percentage'])) + percentage = server_config['memory_limit_percentage'] memory['percentage'] += percentage else: percentage = 80 @@ -396,7 +377,7 @@ def system_memory_check(): memory['servers'][server] = { 'num': memory_limit, 'percentage': percentage, - 'system_memory': parse_size(server_config.get('system_memory', 0)) + 'system_memory': Capacity(server_config.get('system_memory', 0)).btyes } data_path = server_config['data_dir'] if server_config.get('data_dir') else os.path.join(server_config['home_path'], 'store') @@ -405,14 +386,14 @@ def system_memory_check(): if not client.execute_command('ls %s/sstable/block_file' % data_path): disk[data_path] = {'server': server} clog_mount[clog_dir] = {'server': server} - if 'datafile_size' in server_config and server_config['datafile_size'] and parse_size(server_config['datafile_size']): + if 'datafile_size' in server_config and server_config['datafile_size'] and server_config['datafile_size']: # if need is string, it means use datafile_size disk[data_path]['need'] = server_config['datafile_size'] elif 'datafile_disk_percentage' in server_config and server_config['datafile_disk_percentage']: # if need is integer, it means use datafile_disk_percentage disk[data_path]['need'] = int(server_config['datafile_disk_percentage']) - if 'log_disk_size' in server_config and server_config['log_disk_size'] and parse_size(server_config['log_disk_size']): + if 'log_disk_size' in server_config and server_config['log_disk_size'] and server_config['log_disk_size']: # if need is string, it means use log_disk_size clog_mount[clog_dir]['need'] = server_config['log_disk_size'] elif 'log_disk_percentage' in server_config and server_config['log_disk_percentage']: @@ -571,15 +552,15 @@ def system_memory_check(): for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): if k in memory_key_map: key = memory_key_map[k] - server_memory_stats[key] = parse_size(str(v)) + server_memory_stats[key] = Capacity(str(v)).btyes ip_server_memory_info[ip] = server_memory_stats server_memory_stat = servers_memory[ip] min_start_need = server_num * START_NEED_MEMORY - total_use = server_memory_stat['percentage'] * server_memory_stats['total'] / 100 + server_memory_stat['num'] + total_use = int(server_memory_stat['percentage'] * server_memory_stats['total'] / 100 + server_memory_stat['num']) if min_start_need > server_memory_stats['available']: for server in ip_servers: - error('mem', err.EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE.format(ip=ip, available=format_size(server_memory_stats['available']), need=format_size(min_start_need)), [err.SUG_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE.format(ip=ip)]) + error('mem', err.EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE.format(ip=ip, available=str(Capacity(server_memory_stats['available'])), need=str(Capacity(min_start_need))), [err.SUG_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE.format(ip=ip)]) elif total_use > server_memory_stats['free'] + server_memory_stats['buffers'] + server_memory_stats['cached']: for server in ip_servers: server_generate_config = generate_configs.get(server, {}) @@ -589,11 +570,11 @@ def system_memory_check(): if key in global_generate_config or key in server_generate_config: suggest.auto_fix = False break - error('mem', err.EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=format_size(server_memory_stats['free']), cached=format_size(server_memory_stats['buffers'] + server_memory_stats['cached']), need=format_size(total_use)), [suggest]) + error('mem', err.EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=str(Capacity(server_memory_stats['free'])), cached=str(Capacity(server_memory_stats['buffers'] + server_memory_stats['cached'])), need=str(Capacity(total_use))), [suggest]) elif total_use > server_memory_stats['free']: system_memory_check() for server in ip_servers: - alert('mem', err.EC_OBSERVER_NOT_ENOUGH_MEMORY.format(ip=ip, free=format_size(server_memory_stats['free']), need=format_size(total_use)), [err.SUG_OBSERVER_REDUCE_MEM.format()]) + alert('mem', err.EC_OBSERVER_NOT_ENOUGH_MEMORY.format(ip=ip, free=str(Capacity(server_memory_stats['free'])), need=str(Capacity(total_use))), [err.SUG_OBSERVER_REDUCE_MEM.format()]) else: system_memory_check() @@ -619,7 +600,7 @@ def system_memory_check(): # slog need 10G disk[mount_path]['need'] += max(disk[mount_path]['total'] - slog_size, 0) * need / 100 else: - disk[mount_path]['need'] += parse_size(need) + disk[mount_path]['need'] += Capacity(need).btyes disk[mount_path]['need'] += slog_size disk[mount_path]['is_data_disk'] = True @@ -639,7 +620,7 @@ def system_memory_check(): log_disk_size = disk[mount_path]['total'] * need / 100 else: # log_disk_size - log_disk_size = parse_size(need) + log_disk_size = Capacity(need).btyes servers_log_disk_size[servers_clog_mount[ip][path]['server']] = log_disk_size disk[mount_path]['need'] += log_disk_size disk[mount_path]['is_clog_disk'] = True @@ -679,7 +660,7 @@ def system_memory_check(): break tmp_suggests.append(suggest) tmp_suggests = sorted(tmp_suggests, key=lambda suggest: suggest.auto_fix, reverse=True) - critical('disk', err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=p, avail=format_size(avail), need=format_size(need)), tmp_suggests + suggests) + critical('disk', err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=p, avail=str(Capacity(avail)), need=str(Capacity(need))), tmp_suggests + suggests) global_conf = cluster_config.get_global_conf() has_ocp = 'ocp-express' in plugin_context.components @@ -692,7 +673,7 @@ def system_memory_check(): for key in global_conf_with_default: if key.startswith(ocp_meta_tenant_prefix) and original_global_conf.get(key, None): global_conf_with_default['ocp_meta_tenant'][key.replace(ocp_meta_tenant_prefix, '', 1)] = global_conf_with_default[key] - meta_db_memory_size = parse_size(global_conf_with_default['ocp_meta_tenant'].get('memory_size')) + meta_db_memory_size = Capacity(global_conf_with_default['ocp_meta_tenant'].get('memory_size')).btyes servers_sys_memory = {} if meta_db_memory_size: sys_memory_size = None @@ -708,7 +689,7 @@ def system_memory_check(): if system_memory == 0: system_memory = get_system_memory(memory_limit, min_pool_memory) if not sys_memory_size: - sys_memory_size = servers_sys_memory[server] = max(min_pool_memory, min((memory_limit - system_memory) * 0.25, parse_size('16G'))) + sys_memory_size = servers_sys_memory[server] = max(min_pool_memory, min((memory_limit - system_memory) * 0.25, Capacity('16G').btyes)) if meta_db_memory_size + system_memory + sys_memory_size <= memory_limit: break else: @@ -719,7 +700,7 @@ def system_memory_check(): error('ocp meta db', err.EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_MEM.format(), [suggest]) meta_db_log_disk_size = global_conf_with_default['ocp_meta_tenant'].get('log_disk_size') - meta_db_log_disk_size = parse_size(meta_db_log_disk_size) if meta_db_log_disk_size else meta_db_log_disk_size + meta_db_log_disk_size = Capacity(meta_db_log_disk_size).btyes if meta_db_log_disk_size else meta_db_log_disk_size if not meta_db_log_disk_size and meta_db_memory_size: meta_db_log_disk_size = meta_db_memory_size * 3 if meta_db_log_disk_size: diff --git a/plugins/oceanbase/4.0.0.0/upgrade_route.py b/plugins/oceanbase/4.0.0.0/upgrade_route.py index 1ac3de1..ebf3c02 100644 --- a/plugins/oceanbase/4.0.0.0/upgrade_route.py +++ b/plugins/oceanbase/4.0.0.0/upgrade_route.py @@ -35,7 +35,7 @@ def __init__(self, version, deprecated = False, require_from_binary = False): version = version.split('_') release = version[1] if len(version) > 1 else self.RELEASE_NULL version = version[0] - super(VersionNode, self).__init__('', version, release, '', md5) + super(VersionNode, self).__init__('', version, release, '', md5, 0) self.next = [] self.can_be_upgraded_to = [] self.can_be_upgraded_to = [] @@ -196,7 +196,7 @@ def upgrade_route(plugin_context, current_repository, dest_repository, *args, ** stdio = plugin_context.stdio repository_dir = dest_repository.repository_dir - if dest_repository.version >= Version("4.3"): + if dest_repository.version >= Version("4.4"): stdio.error('upgrade observer to version {} is not support, please upgrade obd first.'.format(dest_repository.version)) return diff --git a/plugins/oceanbase/4.2.0.0/create_standby_tenant_pre.py b/plugins/oceanbase/4.2.0.0/create_standby_tenant_pre.py index 62f20a9..a297c5a 100644 --- a/plugins/oceanbase/4.2.0.0/create_standby_tenant_pre.py +++ b/plugins/oceanbase/4.2.0.0/create_standby_tenant_pre.py @@ -21,7 +21,7 @@ from collections import defaultdict from tool import ConfigUtil - +from _stdio import FormtatText tenant_cursor_cache = defaultdict(dict) @@ -82,12 +82,12 @@ def get_option(key, default=''): if not mode in ['mysql', 'oracle']: error('No such tenant mode: %s.\n--mode must be `mysql` or `oracle`' % mode) return - + primary_cluster_config = cluster_configs.get(primary_deploy_name) if not primary_cluster_config: stdio.error('No such deploy: %s.' % primary_deploy_name) return False - + root_password = get_option('tenant_root_password', '') standbyro_password_input = get_option('standbyro_password', None) # check standbyro_password @@ -142,8 +142,9 @@ def get_option(key, default=''): # check primary tenant have full log sql = 'select MAX(BEGIN_LSN) as max_begin_lsn from oceanbase.GV$OB_LOG_STAT as a WHERE a.tenant_id =%s' res = primary_cursor.fetchone(sql, (primary_tenant_info['tenant_id'], )) - if not res: + if not res or res['max_begin_lsn'] is None: error('Check primary tenant have full log failed.') + stdio.print(FormtatText.success('Please try again in a moment.')) return if res['max_begin_lsn'] > 0: error('Primary cluster have not full log, not support create standby cluster.') @@ -201,7 +202,7 @@ def get_option(key, default=''): if not primary_cluster_config.update_component_attr('standbyro_password', standbyro_password_dict, save=True): error('Dump standbyro password failed.') return - + plugin_context.set_variable('standbyro_password', standbyro_password) stdio.stop_loading('succeed') return plugin_context.return_true() diff --git a/plugins/oceanbase/4.2.0.0/create_tenant.py b/plugins/oceanbase/4.2.0.0/create_tenant.py index 00cfbf1..f1b288c 100644 --- a/plugins/oceanbase/4.2.0.0/create_tenant.py +++ b/plugins/oceanbase/4.2.0.0/create_tenant.py @@ -20,45 +20,18 @@ from __future__ import absolute_import, division, print_function -import re import time from collections import defaultdict from copy import deepcopy from _errno import EC_OBSERVER_CAN_NOT_MIGRATE_IN - -tenant_cursor_cache = defaultdict(dict) +from _types import Capacity -def parse_size(size): - _bytes = 0 - if isinstance(size, str): - size = size.strip() - if not isinstance(size, str) or size.isdigit(): - _bytes = int(size) - else: - units = {"B": 1, "K": 1 << 10, "M": 1 << 20, "G": 1 << 30, "T": 1 << 40} - match = re.match(r'^(0|[1-9][0-9]*)\s*([B,K,M,G,T])$', size.upper()) - _bytes = int(match.group(1)) * units[match.group(2)] - return _bytes - - -def format_size(size, precision=1): - units = ['B', 'K', 'M', 'G', 'T', 'P'] - idx = 0 - if precision: - div = 1024.0 - format = '%.' + str(precision) + 'f%s' - else: - div = 1024 - format = '%d%s' - while idx < 5 and size >= 1024: - size /= 1024.0 - idx += 1 - return format % (size, units[idx]) +tenant_cursor_cache = defaultdict(dict) -def exec_sql_in_tenant(sql, cursor, tenant, mode, user='', password='', print_exception=True, retries=20): +def exec_sql_in_tenant(sql, cursor, tenant, mode, user='', password='', print_exception=True, retries=20, args=[]): if not user: user = 'SYS' if mode == 'oracle' else 'root' # find tenant ip, port @@ -79,8 +52,8 @@ def exec_sql_in_tenant(sql, cursor, tenant, mode, user='', password='', print_ex break if not tenant_cursor and retries: time.sleep(1) - return exec_sql_in_tenant(sql, cursor, tenant, mode, user, password, print_exception=print_exception, retries=retries-1) - return tenant_cursor.execute(sql, raise_exception=False, exc_level='verbose') if tenant_cursor else False + return exec_sql_in_tenant(sql, cursor, tenant, mode, user, password, print_exception=print_exception, retries=retries-1, args=args) + return tenant_cursor.execute(sql, args=args, raise_exception=False, exc_level='verbose') if tenant_cursor else False def dump_standby_relation(relation_tenants, cluster_configs, dump_relation_tenants, stdio): @@ -129,7 +102,7 @@ def dump_standbyro_password(deploy_name, tenant_name, standbyro_password, cluste return True -def create_tenant(plugin_context, cursor = None, create_tenant_options=None, relation_tenants={}, cluster_configs={}, primary_tenant_info={}, standbyro_password='', *args, **kwargs): +def create_tenant(plugin_context, cursor = None, create_tenant_options=[], relation_tenants={}, cluster_configs={}, primary_tenant_info={}, standbyro_password='', *args, **kwargs): def get_option(key, default=''): value = getattr(options, key, default) if not value: @@ -141,7 +114,7 @@ def get_parsed_option(key, default=''): if value is None: return value try: - parsed_value = parse_size(value) + parsed_value = Capacity(value).btyes except: stdio.exception("") raise Exception("Invalid option {}: {}".format(key, value)) @@ -151,400 +124,395 @@ def error(msg='', *arg, **kwargs): msg and stdio.error(msg, *arg, **kwargs) stdio.stop_loading('fail') - cluster_config = plugin_context.cluster_config stdio = plugin_context.stdio - options = create_tenant_options if create_tenant_options else plugin_context.options - create_if_not_exists = get_option('create_if_not_exists', False) - standby_deploy_name = plugin_context.cluster_config.deploy_name - cursor = plugin_context.get_return('connect').get_return('cursor') if not cursor else cursor - cursor = cursor if cursor else plugin_context.get_variable('cursors').get(standby_deploy_name) - global tenant_cursor - tenant_cursor = None + multi_options = create_tenant_options if create_tenant_options else [plugin_context.options] + for options in multi_options: + create_if_not_exists = get_option('create_if_not_exists', False) + standby_deploy_name = plugin_context.cluster_config.deploy_name + cursor = plugin_context.get_return('connect').get_return('cursor') if not cursor else cursor + cursor = cursor if cursor else plugin_context.get_variable('cursors').get(standby_deploy_name) + global tenant_cursor + tenant_cursor = None - if primary_tenant_info: - primary_deploy_name = primary_tenant_info.get('primary_deploy_name') - primary_tenant = primary_tenant_info.get('primary_tenant') - primary_cursor = plugin_context.get_variable('cursors').get(primary_deploy_name) - primary_memory_size = primary_tenant_info['memory_size'] - primary_log_disk_size = primary_tenant_info['log_disk_size'] - primary_params = ['max_cpu', 'min_cpu', 'unit_num', 'memory_size', 'log_disk_size', 'max_iops', 'min_iops', 'iops_weight'] - for param in primary_params: - if get_option(param, None) is None and param in primary_tenant_info: - setattr(options, param, primary_tenant_info[param]) - - mode = get_option('mode', 'mysql').lower() - if not mode in ['mysql', 'oracle']: - error('No such tenant mode: %s.\n--mode must be `mysql` or `oracle`' % mode) - return - - # options not support - deserted_options = ('max_session_num', 'max_memory', 'min_memory', 'max_disk_size') - for opt in deserted_options: - if get_option(opt, None) is not None: - stdio.warn("option {} is no longer supported".format(opt)) - - if primary_tenant_info: - name = get_option('tenant_name', primary_tenant) - else: - name = get_option('tenant_name', 'test') - unit_name = '%s_unit' % name - sql = 'select * from oceanbase.DBA_OB_UNIT_CONFIGS where name like "{}%" order by unit_config_id desc limit 1'.format(unit_name) - res = cursor.fetchone(sql) - if res is False: - return - if res: - unit_name += '{}'.format(int(res['UNIT_CONFIG_ID']) + 1) - - pool_name = '%s_pool' % name - - sql = "select * from oceanbase.DBA_OB_TENANTS where TENANT_NAME = %s" - tenant_exists = False - res = cursor.fetchone(sql, (name, )) - if res: - if create_if_not_exists: - return plugin_context.return_true() - else: - error('Tenant %s already exists' % name) - return - elif res is False: - return - if not tenant_exists: - stdio.start_loading('Create tenant %s' % name) - zone_list = get_option('zone_list', set()) - zone_obs_num = {} - sql = "select zone, count(*) num from oceanbase.__all_server where status = 'active' group by zone" - res = cursor.fetchall(sql) - if res is False: - error() + if primary_tenant_info: + primary_deploy_name = primary_tenant_info.get('primary_deploy_name') + primary_tenant = primary_tenant_info.get('primary_tenant') + primary_cursor = plugin_context.get_variable('cursors').get(primary_deploy_name) + primary_memory_size = primary_tenant_info['memory_size'] + primary_log_disk_size = primary_tenant_info['log_disk_size'] + primary_params = ['max_cpu', 'min_cpu', 'unit_num', 'memory_size', 'log_disk_size', 'max_iops', 'min_iops', 'iops_weight'] + for param in primary_params: + if get_option(param, None) is None and param in primary_tenant_info: + setattr(options, param, primary_tenant_info[param]) + + mode = get_option('mode', 'mysql').lower() + if not mode in ['mysql', 'oracle']: + error('No such tenant mode: %s.\n--mode must be `mysql` or `oracle`' % mode) return - for row in res: - zone_obs_num[str(row['zone'])] = row['num'] + # options not support + deserted_options = ('max_session_num', 'max_memory', 'min_memory', 'max_disk_size') + for opt in deserted_options: + if get_option(opt, None) is not None: + stdio.warn("option {} is no longer supported".format(opt)) - if not zone_list: - zone_list = zone_obs_num.keys() - if isinstance(zone_list, str): - zones = zone_list.replace(';', ',').split(',') + if primary_tenant_info: + name = get_option('tenant_name', primary_tenant) else: - zones = zone_list - zone_list = "('%s')" % "','".join(zones) - - min_unit_num = min(zone_obs_num.items(), key=lambda x: x[1])[1] - unit_num = get_option('unit_num', min_unit_num) - if unit_num > min_unit_num: - return error('resource pool unit num is bigger than zone server count') - - sql = "select count(*) num from oceanbase.__all_server where status = 'active' and start_service_time > 0" - count = 30 - while count: - num = cursor.fetchone(sql) - if num is False: - error() - return - num = num['num'] - if num >= unit_num: - break - count -= 1 - time.sleep(1) - if count == 0: - stdio.error(EC_OBSERVER_CAN_NOT_MIGRATE_IN) + name = get_option('tenant_name', 'test') + unit_name = '%s_unit' % name + sql = 'select * from oceanbase.DBA_OB_UNIT_CONFIGS where name like "{}%" order by unit_config_id desc limit 1'.format(unit_name) + res = cursor.fetchone(sql) + if res is False: return + if res: + unit_name += '{}'.format(int(res['UNIT_CONFIG_ID']) + 1) - sql = "SELECT * FROM oceanbase.GV$OB_SERVERS where zone in %s" % zone_list - servers_stats = cursor.fetchall(sql) - if servers_stats is False: - error() - return - cpu_available = servers_stats[0]['CPU_CAPACITY_MAX'] - servers_stats[0]['CPU_ASSIGNED_MAX'] - mem_available = servers_stats[0]['MEM_CAPACITY'] - servers_stats[0]['MEM_ASSIGNED'] - disk_available = servers_stats[0]['DATA_DISK_CAPACITY'] - servers_stats[0]['DATA_DISK_IN_USE'] - log_disk_available = servers_stats[0]['LOG_DISK_CAPACITY'] - servers_stats[0]['LOG_DISK_ASSIGNED'] - for servers_stat in servers_stats[1:]: - cpu_available = min(servers_stat['CPU_CAPACITY_MAX'] - servers_stat['CPU_ASSIGNED_MAX'], cpu_available) - mem_available = min(servers_stat['MEM_CAPACITY'] - servers_stat['MEM_ASSIGNED'], mem_available) - disk_available = min(servers_stat['DATA_DISK_CAPACITY'] - servers_stat['DATA_DISK_IN_USE'], disk_available) - log_disk_available = min(servers_stat['LOG_DISK_CAPACITY'] - servers_stat['LOG_DISK_ASSIGNED'], log_disk_available) - - MIN_CPU = 1 - MIN_MEMORY = 1073741824 - MIN_LOG_DISK_SIZE = 2147483648 - MIN_IOPS = 1024 - STANDBY_MIN_MEMORY = 1073741824 * 2 - STANDBY_WARN_MEMORY = 1073741824 * 4 - STANDBY_MIN_LOG_DISK_SIZE = 1073741824 * 4 - - if cpu_available < MIN_CPU: - return error('%s: resource not enough: cpu count less than %s' % (zone_list, MIN_CPU)) - if mem_available < MIN_MEMORY: - return error('%s: resource not enough: memory less than %s' % (zone_list, format_size(MIN_MEMORY))) - if log_disk_available < MIN_LOG_DISK_SIZE: - return error('%s: resource not enough: log disk size less than %s' % (zone_list, format_size(MIN_MEMORY))) + pool_name = '%s_pool' % name - if primary_tenant_info: - recreate_cmd = '' - check_available_param = {} - check_available_param['max_cpu'] = [int(cpu_available), ''] - check_available_param['min_cpu'] = [int(cpu_available), ''] - check_available_param['memory_size'] = [mem_available, 'B'] - check_available_param['log_disk_size'] = [disk_available, 'B'] - for param, param_info in check_available_param.items(): - if get_option(param, None) is None and param_info[0] < primary_tenant_info[param]: - recreate_cmd += ' --{}={}{} '.format(param, param_info[0], param_info[1]) - stdio.warn("available {} is less then primary tenant's {} quota, primary tenant's {}:{}{}, current available:{}{}".format(param, param, primary_tenant_info[param], param_info[1], param_info[0], param_info[1])) - - if recreate_cmd: - stdio.error("Resource confirmation: if you insist to take the risk, please recreate the tenant with '{}'".format(recreate_cmd)) + sql = "select * from oceanbase.DBA_OB_TENANTS where TENANT_NAME = %s" + tenant_exists = False + res = cursor.fetchone(sql, (name, )) + if res: + if create_if_not_exists: + continue + else: + error('Tenant %s already exists' % name) return - # cpu options - max_cpu = get_option('max_cpu', cpu_available) - min_cpu = get_option('min_cpu', max_cpu) - if cpu_available < max_cpu: - return error('Resource not enough: cpu (Avail: %s, Need: %s)' % (cpu_available, max_cpu)) - if max_cpu < min_cpu: - return error('min_cpu must less then max_cpu') - if min_cpu < MIN_CPU: - return error('min_cpu must greater then %s' % MIN_CPU) - - # memory options - memory_size = get_parsed_option('memory_size', None) - log_disk_size = get_parsed_option('log_disk_size', None) - - if memory_size is None: - memory_size = mem_available - if log_disk_size is None: - log_disk_size = log_disk_available - - if mem_available < memory_size: - return error('resource not enough: memory (Avail: %s, Need: %s)' % (format_size(mem_available), format_size(memory_size))) - if memory_size < MIN_MEMORY: - return error('memory must greater then %s' % format_size(MIN_MEMORY)) - - # log disk size options - if log_disk_size is not None and log_disk_available < log_disk_size: - return error('resource not enough: log disk space (Avail: %s, Need: %s)' % (format_size(disk_available), format_size(log_disk_size))) - - if primary_tenant_info: - if parse_size(primary_memory_size) < STANDBY_MIN_MEMORY: - return error('Primary tenant memory_size:{}B is less than {}B, creating a standby tenant is not supported.'.format(primary_memory_size, STANDBY_MIN_MEMORY)) - if parse_size(primary_memory_size) < STANDBY_WARN_MEMORY: - stdio.warn('Primary tenant memory_size: {}B , suggestion: {}B'.format(primary_memory_size, STANDBY_WARN_MEMORY)) - if parse_size(primary_log_disk_size) < STANDBY_MIN_LOG_DISK_SIZE: - return error('Primary tenant log_disk_size:{}B is less than {}B, creating a standby tenant is not supported.'.format(primary_log_disk_size, STANDBY_MIN_LOG_DISK_SIZE)) - - # iops options - max_iops = get_option('max_iops', None) - min_iops = get_option('min_iops', None) - iops_weight = get_option('iops_weight', None) - if max_iops is not None and max_iops < MIN_IOPS: - return error('max_iops must greater than %d' % MIN_IOPS) - if max_iops is not None and min_iops is not None and max_iops < min_iops: - return error('min_iops must less then max_iops') - - zone_num = len(zones) - charset = get_option('charset', '') - collate = get_option('collate', '') - replica_num = get_option('replica_num', zone_num) - logonly_replica_num = get_option('logonly_replica_num', 0) - tablegroup = get_option('tablegroup', '') - primary_zone = get_option('primary_zone', 'RANDOM') - locality = get_option('locality', '') - variables = get_option('variables', '') - - if replica_num == 0: - replica_num = zone_num - elif replica_num > zone_num: - return error('replica_num cannot be greater than zone num (%s)' % zone_num) - if not primary_zone: - primary_zone = 'RANDOM' - if logonly_replica_num > replica_num: - return error('logonly_replica_num cannot be greater than replica_num (%s)' % replica_num) - - # create resource unit - sql = "create resource unit %s max_cpu %.1f, memory_size %d" % (unit_name, max_cpu, memory_size) - if min_cpu is not None: - sql += ', min_cpu %.1f' % min_cpu - if max_iops is not None: - sql += ', max_iops %d' % max_iops - if min_iops is not None: - sql += ', min_iops %d' % min_iops - if iops_weight is not None: - sql += ', iops_weight %d' % iops_weight - if log_disk_size is not None: - sql += ', log_disk_size %d' % log_disk_size - - res = cursor.execute(sql) - if res is False: - error() - return - - # create resource pool - sql = "create resource pool %s unit='%s', unit_num=%d, zone_list=%s" % (pool_name, unit_name, unit_num, zone_list) - try: - cursor.execute(sql, raise_exception=True) - except Exception as e: - stdio.exception('create resource pool failed, you can try again by using SQL "drop resource pool {}" to delete the resource pool, if you are certain that the resource pool is not being used. error info: {}'.format(pool_name, e)) + elif res is False: return + if not tenant_exists: + stdio.start_loading('Create tenant %s' % name) + zone_list = get_option('zone_list', set()) + zone_obs_num = {} + sql = "select zone, count(*) num from oceanbase.__all_server where status = 'active' group by zone" + res = cursor.fetchall(sql) + if res is False: + error() + return - # create tenant - if not primary_tenant_info: - # create normal tenant - sql = "create tenant %s replica_num=%d,zone_list=%s,primary_zone='%s',resource_pool_list=('%s')" - sql = sql % (name, replica_num, zone_list, primary_zone, pool_name) - if charset: - sql += ", charset = '%s'" % charset - if collate: - sql += ", collate = '%s'" % collate - if logonly_replica_num: - sql += ", logonly_replica_num = %d" % logonly_replica_num - if tablegroup: - sql += ", default tablegroup ='%s'" % tablegroup - if locality: - sql += ", locality = '%s'" % locality - - set_mode = "ob_compatibility_mode = '%s'" % mode - if variables: - sql += "set %s, %s" % (variables, set_mode) + for row in res: + zone_obs_num[str(row['zone'])] = row['num'] + if not zone_list: + zone_list = zone_obs_num.keys() + if isinstance(zone_list, str): + zones = zone_list.replace(';', ',').split(',') else: - sql += "set %s" % set_mode + zones = zone_list + zone_list = "('%s')" % "','".join(zones) + + min_unit_num = min(zone_obs_num.items(), key=lambda x: x[1])[1] + unit_num = get_option('unit_num', min_unit_num) + if unit_num > min_unit_num: + return error('resource pool unit num is bigger than zone server count') + + sql = "select count(*) num from oceanbase.__all_server where status = 'active' and start_service_time > 0" + count = 30 + while count: + num = cursor.fetchone(sql) + if num is False: + error() + return + num = num['num'] + if num >= unit_num: + break + count -= 1 + time.sleep(1) + if count == 0: + stdio.error(EC_OBSERVER_CAN_NOT_MIGRATE_IN) + return + + sql = "SELECT * FROM oceanbase.GV$OB_SERVERS where zone in %s" % zone_list + servers_stats = cursor.fetchall(sql) + if servers_stats is False: + error() + return + cpu_available = servers_stats[0]['CPU_CAPACITY_MAX'] - servers_stats[0]['CPU_ASSIGNED_MAX'] + mem_available = servers_stats[0]['MEM_CAPACITY'] - servers_stats[0]['MEM_ASSIGNED'] + disk_available = servers_stats[0]['DATA_DISK_CAPACITY'] - servers_stats[0]['DATA_DISK_IN_USE'] + log_disk_available = servers_stats[0]['LOG_DISK_CAPACITY'] - servers_stats[0]['LOG_DISK_ASSIGNED'] + for servers_stat in servers_stats[1:]: + cpu_available = min(servers_stat['CPU_CAPACITY_MAX'] - servers_stat['CPU_ASSIGNED_MAX'], cpu_available) + mem_available = min(servers_stat['MEM_CAPACITY'] - servers_stat['MEM_ASSIGNED'], mem_available) + disk_available = min(servers_stat['DATA_DISK_CAPACITY'] - servers_stat['DATA_DISK_IN_USE'], disk_available) + log_disk_available = min(servers_stat['LOG_DISK_CAPACITY'] - servers_stat['LOG_DISK_ASSIGNED'], log_disk_available) + + MIN_CPU = 1 + MIN_MEMORY = 1073741824 + MIN_LOG_DISK_SIZE = 2147483648 + MIN_IOPS = 1024 + STANDBY_MIN_MEMORY = 1073741824 * 2 + STANDBY_WARN_MEMORY = 1073741824 * 4 + STANDBY_MIN_LOG_DISK_SIZE = 1073741824 * 4 + + if cpu_available < MIN_CPU: + return error('%s: resource not enough: cpu count less than %s' % (zone_list, MIN_CPU)) + if mem_available < MIN_MEMORY: + return error('%s: resource not enough: memory less than %s' % (zone_list, Capacity(MIN_MEMORY))) + if log_disk_available < MIN_LOG_DISK_SIZE: + return error('%s: resource not enough: log disk size less than %s' % (zone_list, Capacity(MIN_MEMORY))) + + if primary_tenant_info: + recreate_cmd = '' + check_available_param = {} + check_available_param['max_cpu'] = [int(cpu_available), ''] + check_available_param['min_cpu'] = [int(cpu_available), ''] + check_available_param['memory_size'] = [mem_available, 'B'] + check_available_param['log_disk_size'] = [disk_available, 'B'] + for param, param_info in check_available_param.items(): + if get_option(param, None) is None and param_info[0] < primary_tenant_info[param]: + recreate_cmd += ' --{}={}{} '.format(param, param_info[0], param_info[1]) + stdio.warn("available {} is less then primary tenant's {} quota, primary tenant's {}:{}{}, current available:{}{}".format(param, param, primary_tenant_info[param], param_info[1], param_info[0], param_info[1])) + + if recreate_cmd: + stdio.error("Resource confirmation: if you insist to take the risk, please recreate the tenant with '{}'".format(recreate_cmd)) + return + # cpu options + max_cpu = get_option('max_cpu', cpu_available) + min_cpu = get_option('min_cpu', max_cpu) + if cpu_available < max_cpu: + return error('Resource not enough: cpu (Avail: %s, Need: %s)' % (cpu_available, max_cpu)) + if max_cpu < min_cpu: + return error('min_cpu must less then max_cpu') + if min_cpu < MIN_CPU: + return error('min_cpu must greater then %s' % MIN_CPU) + + # memory options + memory_size = get_parsed_option('memory_size', None) + log_disk_size = get_parsed_option('log_disk_size', None) + + if memory_size is None: + memory_size = mem_available + if log_disk_size is None: + log_disk_size = log_disk_available + + if mem_available < memory_size: + return error('resource not enough: memory (Avail: %s, Need: %s)' % (Capacity(mem_available), Capacity(memory_size))) + if memory_size < MIN_MEMORY: + return error('memory must greater then %s' % Capacity(MIN_MEMORY)) + + # log disk size options + if log_disk_size is not None and log_disk_available < log_disk_size: + return error('resource not enough: log disk space (Avail: %s, Need: %s)' % (Capacity(disk_available), Capacity(log_disk_size))) + + if primary_tenant_info: + if Capacity(primary_memory_size).btyes < STANDBY_MIN_MEMORY: + return error('Primary tenant memory_size:{}B is less than {}B, creating a standby tenant is not supported.'.format(primary_memory_size, STANDBY_MIN_MEMORY)) + if Capacity(primary_memory_size).btyes < STANDBY_WARN_MEMORY: + stdio.warn('Primary tenant memory_size: {}B , suggestion: {}B'.format(primary_memory_size, STANDBY_WARN_MEMORY)) + if Capacity(primary_log_disk_size).btyes < STANDBY_MIN_LOG_DISK_SIZE: + return error('Primary tenant log_disk_size:{}B is less than {}B, creating a standby tenant is not supported.'.format(primary_log_disk_size, STANDBY_MIN_LOG_DISK_SIZE)) + + # iops options + max_iops = get_option('max_iops', None) + min_iops = get_option('min_iops', None) + iops_weight = get_option('iops_weight', None) + if max_iops is not None and max_iops < MIN_IOPS: + return error('max_iops must greater than %d' % MIN_IOPS) + if max_iops is not None and min_iops is not None and max_iops < min_iops: + return error('min_iops must less then max_iops') + + zone_num = len(zones) + charset = get_option('charset', '') + collate = get_option('collate', '') + replica_num = get_option('replica_num', zone_num) + logonly_replica_num = get_option('logonly_replica_num', 0) + tablegroup = get_option('tablegroup', '') + primary_zone = get_option('primary_zone', 'RANDOM') + locality = get_option('locality', '') + variables = get_option('variables', "ob_tcp_invited_nodes='%'") + + if replica_num == 0: + replica_num = zone_num + elif replica_num > zone_num: + return error('replica_num cannot be greater than zone num (%s)' % zone_num) + if not primary_zone: + primary_zone = 'RANDOM' + if logonly_replica_num > replica_num: + return error('logonly_replica_num cannot be greater than replica_num (%s)' % replica_num) + + # create resource unit + sql = "create resource unit %s max_cpu %.1f, memory_size %d" % (unit_name, max_cpu, memory_size) + if min_cpu is not None: + sql += ', min_cpu %.1f' % min_cpu + if max_iops is not None: + sql += ', max_iops %d' % max_iops + if min_iops is not None: + sql += ', min_iops %d' % min_iops + if iops_weight is not None: + sql += ', iops_weight %d' % iops_weight + if log_disk_size is not None: + sql += ', log_disk_size %d' % log_disk_size + + res = cursor.execute(sql) + if res is False: + error() + return + + # create resource pool + sql = "create resource pool %s unit='%s', unit_num=%d, zone_list=%s" % (pool_name, unit_name, unit_num, zone_list) try: cursor.execute(sql, raise_exception=True) except Exception as e: - stdio.exception('Create error, error info:{}'.format(e)) + stdio.exception('create resource pool failed, you can try again by using SQL "drop resource pool {}" to delete the resource pool, if you are certain that the resource pool is not being used. error info: {}'.format(pool_name, e)) return - stdio.stop_loading('succeed') - root_password = get_option(name+'_root_password', None) - if root_password: - sql = "alter user root IDENTIFIED BY '%s'" % root_password - stdio.verbose(sql) - if not exec_sql_in_tenant(sql=sql, cursor=cursor, tenant=name, mode=mode) and not create_if_not_exists: - stdio.error('failed to set root@{} password {}'.format(name)) - return - database = get_option('database') - if database: - sql = 'create database {}'.format(database) - if not exec_sql_in_tenant(sql=sql, cursor=cursor, tenant=name, mode=mode, password=root_password if root_password else '') and not create_if_not_exists: - stdio.error('failed to create database {}'.format(database)) - return - - db_username = get_option('db_username') - db_password = get_option('db_password', '') - if db_username: - if mode == "mysql": - sql = """create user if not exists '{username}' IDENTIFIED BY '{password}'; - grant all on *.* to '{username}' WITH GRANT OPTION;""".format( - username=db_username, password=db_password) + + # create tenant + if not primary_tenant_info: + # create normal tenant + sql = "create tenant %s replica_num=%d,zone_list=%s,primary_zone='%s',resource_pool_list=('%s')" + sql = sql % (name, replica_num, zone_list, primary_zone, pool_name) + if charset: + sql += ", charset = '%s'" % charset + if collate: + sql += ", collate = '%s'" % collate + if logonly_replica_num: + sql += ", logonly_replica_num = %d" % logonly_replica_num + if tablegroup: + sql += ", default tablegroup ='%s'" % tablegroup + if locality: + sql += ", locality = '%s'" % locality + + set_mode = "ob_compatibility_mode = '%s'" % mode + if variables: + sql += "set %s, %s" % (variables, set_mode) else: - # todo: fix oracle user create - sql = """create {username} IDENTIFIED BY {password}; - grant all on *.* to {username} WITH GRANT OPTION; - grant dba to {username}; - grant all privileges to {username};""" - if not exec_sql_in_tenant(sql=sql, cursor=cursor, tenant=name, mode=mode): - stdio.error('failed to create user {}'.format(db_username)) + sql += "set %s" % set_mode + try: + cursor.execute(sql, raise_exception=True) + except Exception as e: + stdio.exception('Create error, error info:{}'.format(e)) + return + stdio.stop_loading('succeed') + root_password = get_option(name+'_root_password', "") + if root_password: + sql = "alter user root IDENTIFIED BY %s" + stdio.verbose(sql) + if not exec_sql_in_tenant(sql=sql, cursor=cursor, tenant=name, mode=mode, args=[root_password]) and not create_if_not_exists: + stdio.error('failed to set root@{} password {}'.format(name)) + return + database = get_option('database') + if database: + sql = 'create database {}'.format(database) + if not exec_sql_in_tenant(sql=sql, cursor=cursor, tenant=name, mode=mode, password=root_password if root_password else '') and not create_if_not_exists: + stdio.error('failed to create database {}'.format(database)) + return + + db_username = get_option('db_username') + db_password = get_option('db_password', '') + if db_username: + if mode == "mysql": + sql = """create user if not exists '{username}' IDENTIFIED BY %s; + grant all on *.* to '{username}' WITH GRANT OPTION;""".format( + username=db_username) + else: + error("Create user in oracle tenant is not supported") + if not exec_sql_in_tenant(sql=sql, cursor=cursor, tenant=name, mode=mode, args=[db_password]): + stdio.error('failed to create user {}'.format(db_username)) + return + else: + # create standby tenant + # query ip_list + sql = '''select group_concat(host separator ";") as ip_list from (select concat(svr_ip,":",SQL_PORT) as host from oceanbase.cdb_ob_access_point where tenant_name=%s)''' + res = primary_cursor.fetchone(sql, (primary_tenant, )) + if not res: + stdio.error('ip_list query error.') return - else: - # create standby tenant - # query ip_list - sql = '''select group_concat(host separator ";") as ip_list from (select concat(svr_ip,":",SQL_PORT) as host from oceanbase.cdb_ob_access_point where tenant_name=%s)''' - res = primary_cursor.fetchone(sql, (primary_tenant, )) - if not res: - stdio.error('ip_list query error.') - return - sql = '''CREATE STANDBY TENANT {} LOG_RESTORE_SOURCE = 'SERVICE={} USER=standbyro@{} PASSWORD={}' RESOURCE_POOL_LIST=('{}') , primary_zone='{}' '''.format(name, res['ip_list'], primary_tenant, standbyro_password, pool_name, primary_zone) - if locality: - sql += ", locality = '%s'" % locality + sql = '''CREATE STANDBY TENANT {} LOG_RESTORE_SOURCE = 'SERVICE={} USER=standbyro@{} PASSWORD={}' RESOURCE_POOL_LIST=('{}') , primary_zone='{}' '''.format(name, res['ip_list'], primary_tenant, standbyro_password, pool_name, primary_zone) + if locality: + sql += ", locality = '%s'" % locality - try: - cursor.execute(sql, raise_exception=True, exc_level='verbose') - except Exception as e: - stdio.verbose('create standby tenant fail, clean and retry. fail message:{}'.format(e)) - # clean and retry create standby tenant - res = cursor.fetchone("select TENANT_ID from oceanbase.DBA_OB_TENANTS where tenant_name = %s", (name, ), raise_exception=False) - if res is False: - error('Create standby tenant fail. message:{}'.format(e)) - return - if res: - # drop tenant - tenant_id = res['TENANT_ID'] - res = cursor.execute("drop tenant %s FORCE" % name, raise_exception=False) + try: + cursor.execute(sql, raise_exception=True, exc_level='verbose') + except Exception as e: + stdio.verbose('create standby tenant fail, clean and retry. fail message:{}'.format(e)) + # clean and retry create standby tenant + res = cursor.fetchone("select TENANT_ID from oceanbase.DBA_OB_TENANTS where tenant_name = %s", (name, ), raise_exception=False) if res is False: error('Create standby tenant fail. message:{}'.format(e)) return - - # wait drop tenant - count = 600 - while count > 0: - res = cursor.fetchone('select count(1) as count from oceanbase.GV$OB_UNITS where TENANT_ID=%s or TENANT_ID=%s', (tenant_id, int(tenant_id)-1), raise_exception=False) + if res: + # drop tenant + tenant_id = res['TENANT_ID'] + res = cursor.execute("drop tenant %s FORCE" % name, raise_exception=False) if res is False: - error('query unit info failed') + error('Create standby tenant fail. message:{}'.format(e)) return - if res['count'] == 0: - break - count -= 1 - time.sleep(1) - if count == 0: - error('Retry create standby tenant failed: drop tenant timeout') - return + # wait drop tenant + count = 600 + while count > 0: + res = cursor.fetchone('select count(1) as count from oceanbase.GV$OB_UNITS where TENANT_ID=%s or TENANT_ID=%s', (tenant_id, int(tenant_id)-1), raise_exception=False) + if res is False: + error('query unit info failed') + return + if res['count'] == 0: + break + count -= 1 + time.sleep(1) + + if count == 0: + error('Retry create standby tenant failed: drop tenant timeout') + return - # create again - try: - cursor.execute(sql, raise_exception=True) - except Exception as e: - retry_message = 'After resolving this issue, you can clean up the environment by manually executing "obd cluster tenant drop {} -t {}", and then wait for a while before re-creating the standby tenant.'.format(standby_deploy_name, name) - error("create standby tenant failed, error: {}".format(e)) - stdio.print(retry_message) + # create again + try: + cursor.execute(sql, raise_exception=True) + except Exception as e: + retry_message = 'After resolving this issue, you can clean up the environment by manually executing "obd cluster tenant drop {} -t {}", and then wait for a while before re-creating the standby tenant.'.format(standby_deploy_name, name) + error("create standby tenant failed, error: {}".format(e)) + stdio.print(retry_message) + return + stdio.stop_loading('succeed') + + # check standby sync status + stdio.start_loading('Check standby sync status') + sql = "SELECT tenant_id, tenant_name, tenant_type, primary_zone, locality, compatibility_mode, status, in_recyclebin, (CASE WHEN LOCKED = 'YES' THEN 1 ELSE 0 END) AS locked, TIMESTAMPDIFF(SECOND, CREATE_TIME, now()) AS exist_seconds, arbitration_service_status, switchover_status, log_mode, sync_scn, recovery_until_scn, tenant_role FROM oceanbase.DBA_OB_TENANTS WHERE TENANT_TYPE IN ('SYS', 'USER') and tenant_name = %s" + res = cursor.fetchone(sql, (name, )) + if not res: + error('check standby sync status failed') return - stdio.stop_loading('succeed') - - # check standby sync status - stdio.start_loading('Check standby sync status') - sql = "SELECT tenant_id, tenant_name, tenant_type, primary_zone, locality, compatibility_mode, status, in_recyclebin, (CASE WHEN LOCKED = 'YES' THEN 1 ELSE 0 END) AS locked, TIMESTAMPDIFF(SECOND, CREATE_TIME, now()) AS exist_seconds, arbitration_service_status, switchover_status, log_mode, sync_scn, recovery_until_scn, tenant_role FROM oceanbase.DBA_OB_TENANTS WHERE TENANT_TYPE IN ('SYS', 'USER') and tenant_name = %s" - res = cursor.fetchone(sql, (name, )) - if not res: - error('check standby sync status failed') - return - - stdio.print_list([res], ['tenant_name', 'log_mode', 'locality', 'tenant_role', 'create_status'], - lambda x: [x['tenant_name'], x['log_mode'], x['locality'], x['tenant_role'], x['status']], title='standby tenant info') - - if res['status'] != 'NORMAL': - error('standby tenant status is not normal') - return - stdio.stop_loading('succeed') - - stdio.start_loading('Dump standby relation') - if not dump_standbyro_password(standby_deploy_name, name, standbyro_password, cluster_configs.get(standby_deploy_name), stdio): - return - if not dump_standby_relation(relation_tenants, cluster_configs, [[standby_deploy_name, name], [primary_deploy_name, primary_tenant]], stdio): - return - stdio.stop_loading('succeed') - - # check log sync task create - stdio.start_loading('Creating log sync task') - sql = "SELECT tenant_id, REPLACE(`sync_status`, ' ', '_') as sync_status, err_code, comment FROM oceanbase.V$OB_LS_LOG_RESTORE_STATUS WHERE tenant_id = %s group by sync_status " - count = 600 - while count > 0: - res = cursor.fetchall(sql, (res['tenant_id'], )) - if res: - break - count -= 1 - time.sleep(1) - stdio.verbose('Wait log sync create: retry {}'.format(200 - count)) - if count == 0: - stdio.warn('wait log sync create timeout') - - flag = False - for item in res: - if item.get('sync_status') != 'NORMAL': - flag = True - stdio.error('standby tenant log sync error, tenant_id:{}, sync_status:{}, err_code:{},comment:{}'.format(item['tenant_id'], item['sync_status'], item['err_code'], item['comment'])) - if flag: - stdio.stop_loading('failed') - stdio.stop_loading('succeed') + stdio.print_list([res], ['tenant_name', 'log_mode', 'locality', 'tenant_role', 'create_status'], + lambda x: [x['tenant_name'], x['log_mode'], x['locality'], x['tenant_role'], x['status']], title='standby tenant info') - stdio.print('You can use the command "obd cluster tenant show {} -g" to view the relationship between the primary and standby tenants.'.format(standby_deploy_name)) + if res['status'] != 'NORMAL': + error('standby tenant status is not normal') + return + stdio.stop_loading('succeed') - return plugin_context.return_true() \ No newline at end of file + stdio.start_loading('Dump standby relation') + if not dump_standbyro_password(standby_deploy_name, name, standbyro_password, cluster_configs.get(standby_deploy_name), stdio): + return + if not dump_standby_relation(relation_tenants, cluster_configs, [[standby_deploy_name, name], [primary_deploy_name, primary_tenant]], stdio): + return + stdio.stop_loading('succeed') + + # check log sync task create + stdio.start_loading('Creating log sync task') + sql = "SELECT tenant_id, REPLACE(`sync_status`, ' ', '_') as sync_status, err_code, comment FROM oceanbase.V$OB_LS_LOG_RESTORE_STATUS WHERE tenant_id = %s group by sync_status " + count = 600 + while count > 0: + res = cursor.fetchall(sql, (res['tenant_id'], )) + if res: + break + count -= 1 + time.sleep(1) + stdio.verbose('Wait log sync create: retry {}'.format(200 - count)) + if count == 0: + stdio.warn('wait log sync create timeout') + + flag = False + for item in res: + if item.get('sync_status') != 'NORMAL': + flag = True + stdio.error('standby tenant log sync error, tenant_id:{}, sync_status:{}, err_code:{},comment:{}'.format(item['tenant_id'], item['sync_status'], item['err_code'], item['comment'])) + + if flag: + stdio.stop_loading('failed') + stdio.stop_loading('succeed') + + stdio.print('You can use the command "obd cluster tenant show {} -g" to view the relationship between the primary and standby tenants.'.format(standby_deploy_name)) + + return plugin_context.return_true() diff --git a/plugins/oceanbase/4.2.0.0/generate_config.py b/plugins/oceanbase/4.2.0.0/generate_config.py index dd289e3..2b4be81 100644 --- a/plugins/oceanbase/4.2.0.0/generate_config.py +++ b/plugins/oceanbase/4.2.0.0/generate_config.py @@ -28,35 +28,7 @@ from _errno import EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE, EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED, EC_OBSERVER_GET_MEMINFO_FAIL import _errno as err from tool import ConfigUtil - - -def parse_size(size): - _bytes = 0 - if not isinstance(size, str) or size.isdigit(): - _bytes = int(size) - else: - units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} - match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) - _bytes = int(match.group(1)) * units[match.group(2)] - return _bytes - - -def format_size(size, precision=1): - units = ['B', 'K', 'M', 'G'] - units_num = len(units) - 1 - idx = 0 - if precision: - div = 1024.0 - format = '%.' + str(precision) + 'f%s' - limit = 1024 - else: - div = 1024 - limit = 1024 - format = '%d%s' - while idx < units_num and size >= limit: - size /= div - idx += 1 - return format % (size, units[idx]) +from _types import Capacity def get_system_memory(memory_limit, min_pool_memory, generate_config_mini): @@ -171,7 +143,7 @@ def summit_config(): auto_set_min_pool_memory = False system_memory = 0 if user_server_config.get('system_memory'): - system_memory = parse_size(user_server_config.get('system_memory')) + system_memory = Capacity(user_server_config.get('system_memory')).btyes if generate_config_mini and '__min_full_resource_pool_memory' not in user_server_config: auto_set_min_pool_memory = True min_pool_memory = server_config['__min_full_resource_pool_memory'] @@ -192,11 +164,11 @@ def summit_config(): for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): if k in memory_key_map: key = memory_key_map[k] - server_memory_stats[key] = parse_size(str(v)) + server_memory_stats[key] = Capacity(str(v)).btyes if user_server_config.get('memory_limit_percentage'): if ip in ip_server_memory_info: - total_memory = parse_size(ip_server_memory_info[ip]['total']) + total_memory = Capacity(ip_server_memory_info[ip]['total']).btyes memory_limit = int(total_memory * user_server_config.get('memory_limit_percentage') / 100) elif generate_check: stdio.error(EC_OBSERVER_GET_MEMINFO_FAIL.format(server=server)) @@ -207,7 +179,7 @@ def summit_config(): elif not server_config.get('memory_limit'): if generate_config_mini: memory_limit = MINI_MEMORY_SIZE - update_server_conf(server, 'memory_limit', format_size(memory_limit, 0)) + update_server_conf(server, 'memory_limit', str(Capacity(memory_limit, 0))) update_server_conf(server, 'production_mode', False) if auto_set_min_pool_memory: min_pool_memory = 1073741824 @@ -217,16 +189,16 @@ def summit_config(): server_memory_stats = ip_server_memory_info[ip] if generate_check: if server_memory_stats['available'] < START_NEED_MEMORY: - stdio.error(EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE.format(ip=ip, available=format_size(server_memory_stats['available']), need=format_size(START_NEED_MEMORY))) + stdio.error(EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE.format(ip=ip, available=str(Capacity(server_memory_stats['available'])), need=str(Capacity(START_NEED_MEMORY)))) success = False continue if server_memory_stats['free'] + server_memory_stats['buffers'] + server_memory_stats['cached'] < MIN_MEMORY: - stdio.error(EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=format_size(server_memory_stats['free']), cached=format_size(server_memory_stats['buffers'] + server_memory_stats['cached']), need=format_size(MIN_MEMORY))) + stdio.error(EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=str(Capacity(server_memory_stats['free'])), cached=str(Capacity(server_memory_stats['buffers'] + server_memory_stats['cached'])), need=str(Capacity(MIN_MEMORY)))) success = False continue - memory_limit = max(MIN_MEMORY, int(server_memory_stats['available'] * 0.9)) - update_server_conf(server, 'memory_limit', format_size(memory_limit, 0)) + memory_limit = max(MIN_MEMORY, int(int(server_memory_stats['available'] * 0.9))) + update_server_conf(server, 'memory_limit', str(Capacity(memory_limit, 0))) auto_set_memory = True elif generate_check: stdio.error(EC_OBSERVER_GET_MEMINFO_FAIL.format(server=server)) @@ -235,12 +207,12 @@ def summit_config(): else: memory_limit = MIN_MEMORY else: - memory_limit = parse_size(server_config.get('memory_limit')) + memory_limit = Capacity(server_config.get('memory_limit')).btyes if system_memory == 0: auto_set_system_memory = True system_memory = get_system_memory(memory_limit, min_pool_memory, generate_config_mini) - update_server_conf(server, 'system_memory', format_size(system_memory, 0)) + update_server_conf(server, 'system_memory', str(Capacity(system_memory, 0))) # cpu if not server_config.get('cpu_count'): @@ -256,8 +228,8 @@ def summit_config(): stdio.warn('(%s): automatically adjust the cpu_count %s' % (server, MIN_CPU_COUNT)) # disk - datafile_size = parse_size(server_config.get('datafile_size', 0)) - log_disk_size = parse_size(server_config.get('log_disk_size', 0)) + datafile_size = server_config.get('datafile_size', 0) + log_disk_size = server_config.get('log_disk_size', 0) if not server_config.get('datafile_size') or not server_config.get('log_disk_size'): disk = {'/': 0} ret = client.execute_command('df --block-size=1024') @@ -309,21 +281,21 @@ def summit_config(): datafile_size = data_dir_disk['total'] * datafile_disk_percentage / 100 elif generate_config_mini: datafile_size = MINI_DATA_FILE_SIZE - update_server_conf(server, 'datafile_size', format_size(datafile_size, 0)) + update_server_conf(server, 'datafile_size', str(Capacity(datafile_size, 0))) if 'datafile_maxsize' not in user_server_config: - update_server_conf(server, 'datafile_maxsize', format_size(MINI_DATA_FILE_MAX_SIZE, 0)) + update_server_conf(server, 'datafile_maxsize', str(Capacity(MINI_DATA_FILE_MAX_SIZE, 0))) if 'datafile_next' not in user_server_config: - update_server_conf(server, 'datafile_next', format_size(MINI_DATA_FILE_NEXT, 0)) + update_server_conf(server, 'datafile_next', str(Capacity(MINI_DATA_FILE_NEXT, 0))) else: auto_set_datafile_size = True if not log_disk_size: log_disk_percentage = int(user_server_config.get('log_disk_percentage', 0)) if log_disk_percentage: - log_disk_size = clog_dir_disk['total'] * log_disk_percentage / 100 + log_disk_size = int(clog_dir_disk['total'] * log_disk_percentage / 100) elif generate_config_mini: log_disk_size = MINI_LOG_DISK_SIZE - update_server_conf(server, 'log_disk_size', format_size(log_disk_size, 0)) + update_server_conf(server, 'log_disk_size', str(Capacity(log_disk_size, 0))) else: auto_set_log_disk_size = True @@ -332,6 +304,9 @@ def summit_config(): else: log_size = (256 << 20) * int(user_server_config.get('max_syslog_file_count', max_syslog_file_count_default)) * 4 + datafile_maxsize = datafile_next = 0 + DATA_RESERVED = 0.95 + DATA_NEXT = 0.1 if clog_dir_mount == data_dir_mount: min_log_size = log_size if clog_dir_mount == home_path_mount else 0 MIN_NEED = min_log_size + SLOG_SIZE @@ -339,34 +314,34 @@ def summit_config(): datafile_size =min_datafile_size = MINI_DATA_FILE_SIZE else: min_datafile_size = datafile_size - MIN_NEED += min_datafile_size + MIN_NEED += Capacity(min_datafile_size).btyes if auto_set_log_disk_size: log_disk_size = min_log_disk_size = (memory_limit - system_memory) * 3 + system_memory else: min_log_disk_size = log_disk_size - MIN_NEED += min_log_disk_size - min_need = min_log_size + min_datafile_size + min_log_disk_size + MIN_NEED += Capacity(min_log_disk_size).btyes + min_need = min_log_size + Capacity(min_datafile_size).btyes + Capacity(min_log_disk_size).btyes disk_free = data_dir_disk['avail'] if MIN_NEED > disk_free: if generate_check: - stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=format_size(disk_free), need=format_size(MIN_NEED))) + stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=str(Capacity(disk_free)), need=str(Capacity(MIN_NEED)))) success = False continue else: if auto_set_memory: memory_limit = MIN_MEMORY - update_server_conf(server, 'memory_limit', format_size(memory_limit, 0)) + update_server_conf(server, 'memory_limit', str(Capacity(memory_limit, 0))) if auto_set_system_memory: system_memory = get_system_memory(memory_limit, min_pool_memory, generate_config_mini) - update_server_conf(server, 'system_memory', format_size(system_memory, 0)) + update_server_conf(server, 'system_memory', str(Capacity(system_memory, 0))) if auto_set_datafile_size: datafile_size = MINI_DATA_FILE_SIZE if auto_set_log_disk_size: log_disk_size = (memory_limit - system_memory) * 3 + system_memory elif min_need > disk_free: if generate_check and not auto_set_memory: - stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=format_size(disk_free), need=format_size(min_need))) + stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=str(Capacity(disk_free)), need=str(Capacity(min_need)))) success = False continue @@ -378,26 +353,33 @@ def summit_config(): if auto_set_log_disk_size is False: disk_free -= min_log_disk_size memory_factor -= 3 - memory_limit = format_size(disk_free / max(1, memory_factor), 0) + memory_limit = str(Capacity(disk_free / max(1, memory_factor), 0)) update_server_conf(server, 'memory_limit', memory_limit) - memory_limit = parse_size(memory_limit) + memory_limit = Capacity(memory_limit).bytes if auto_set_system_memory: system_memory = get_system_memory(memory_limit, min_pool_memory, generate_config_mini) - update_server_conf(server, 'system_memory', format_size(system_memory, 0)) + update_server_conf(server, 'system_memory', str(Capacity(system_memory, 0))) if auto_set_log_disk_size: log_disk_size = (memory_limit - system_memory) * 3 + system_memory if auto_set_datafile_size: - datafile_size = max(disk_free - log_disk_size, memory_limit * 3) + datafile_size = min(disk_free - log_disk_size, memory_limit * 3) + datafile_maxsize = max(disk_free - log_disk_size, memory_limit * 3) + datafile_next = DATA_NEXT * datafile_maxsize else: if auto_set_log_disk_size: log_disk_size = (memory_limit - system_memory) * 3 + system_memory if auto_set_datafile_size: - datafile_size = max(disk_free - log_size - SLOG_SIZE - log_disk_size, memory_limit * 3) + datafile_size = min((disk_free - log_size - SLOG_SIZE - log_disk_size) * DATA_RESERVED, memory_limit * 3) + datafile_maxsize = max((disk_free - log_size - SLOG_SIZE - log_disk_size) * DATA_RESERVED, memory_limit * 3) + datafile_next = DATA_NEXT * datafile_maxsize if auto_set_datafile_size: - update_server_conf(server, 'datafile_size', format_size(datafile_size, 0)) + update_server_conf(server, 'datafile_size', str(Capacity(datafile_size, 0))) + if datafile_maxsize > datafile_size: + update_server_conf(server, 'datafile_maxsize', str(Capacity(datafile_maxsize, 0))) + update_server_conf(server, 'datafile_next', str(Capacity(datafile_next, 0))) if auto_set_log_disk_size: - update_server_conf(server, 'log_disk_size', format_size(log_disk_size, 0)) + update_server_conf(server, 'log_disk_size', str(Capacity(log_disk_size, 0))) else: datafile_min_memory_limit = memory_limit if auto_set_datafile_size: @@ -405,18 +387,21 @@ def summit_config(): min_log_size = log_size if data_dir_mount == home_path_mount else 0 disk_free = data_dir_disk['avail'] min_need = min_log_size + datafile_size + SLOG_SIZE + if generate_check and min_need > disk_free: if not auto_set_memory: - stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=format_size(disk_free), need=format_size(min_need))) + stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=str(Capacity(disk_free)), need=str(Capacity(min_need)))) success = False continue datafile_min_memory_limit = (disk_free - min_log_size - SLOG_SIZE) / 3 if datafile_min_memory_limit < min_memory: - stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=format_size(disk_free), need=format_size(min_need))) + stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=str(Capacity(disk_free)), need=str(Capacity(min_need)))) success = False continue - datafile_min_memory_limit = parse_size(format_size(datafile_min_memory_limit, 0)) + datafile_min_memory_limit = Capacity(str(Capacity(datafile_min_memory_limit, 0))).btyes datafile_size = datafile_min_memory_limit * 3 + datafile_maxsize = (disk_free - min_log_size - SLOG_SIZE) * DATA_RESERVED + datafile_next = DATA_NEXT * datafile_maxsize log_disk_min_memory_limit = memory_limit if auto_set_log_disk_size: @@ -426,27 +411,30 @@ def summit_config(): min_need = min_log_size + log_disk_size if generate_check and min_need > disk_free: if not auto_set_memory: - stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=format_size(disk_free), need=format_size(min_need))) + stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=str(Capacity(disk_free)), need=str(Capacity(min_need)))) success = False continue log_disk_min_memory_limit = (disk_free - log_size) / 3 if log_disk_min_memory_limit < min_memory: - stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=format_size(disk_free), need=format_size(min_need))) + stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=str(Capacity(disk_free)), need=str(Capacity(min_need)))) success = False continue - log_disk_min_memory_limit = parse_size(format_size(log_disk_min_memory_limit, 0)) + log_disk_min_memory_limit = Capacity(str(Capacity(log_disk_min_memory_limit, 0))).bytes log_disk_size = log_disk_min_memory_limit * 3 if auto_set_memory: - update_server_conf(server, 'memory_limit', format_size(memory_limit, 0)) + update_server_conf(server, 'memory_limit', str(Capacity(memory_limit, 0))) if auto_set_system_memory: system_memory = get_system_memory(memory_limit, min_pool_memory, generate_config_mini) update_server_conf(server, 'system_memory', system_memory) if auto_set_datafile_size: - update_server_conf(server, 'datafile_size', format_size(datafile_size, 0)) + update_server_conf(server, 'datafile_size', str(Capacity(datafile_size, 0))) + if datafile_maxsize > datafile_size: + update_server_conf(server, 'datafile_maxsize', str(Capacity(datafile_maxsize, 0))) + update_server_conf(server, 'datafile_next', str(Capacity(datafile_next, 0))) if auto_set_log_disk_size: - update_server_conf(server, 'log_disk_size', format_size(log_disk_size, 0)) + update_server_conf(server, 'log_disk_size', str(Capacity(log_disk_size, 0))) if memory_limit < PRO_MEMORY_MIN: update_server_conf(server, 'production_mode', False) @@ -479,10 +467,10 @@ def summit_config(): server_info = servers_info.get(server) if not server_info: continue - memory_limit = server_info['memory_limit'] - system_memory = server_info['system_memory'] - log_disk_size = server_info['log_disk_size'] - min_pool_memory = server_info['min_pool_memory'] + memory_limit = Capacity(server_info['memory_limit']).btyes + system_memory = Capacity(server_info['system_memory']).btyes + log_disk_size = Capacity(server_info['log_disk_size']).btyes + min_pool_memory = Capacity(server_info['min_pool_memory']).btyes if not sys_log_disk_size: if not sys_memory_size: sys_memory_size = max(min_pool_memory, min(int((memory_limit - system_memory) * 0.25), 16 << 30)) @@ -493,7 +481,7 @@ def summit_config(): if expect_log_disk_size > max_available and generate_check: stdio.error(err.EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_LOG_DISK_AVAILABLE.format(avail=max_available, need=expect_log_disk_size)) success = False - cluster_config.update_global_conf('ocp_meta_tenant_log_disk_size', format_size(expect_log_disk_size, 0), False) + cluster_config.update_global_conf('ocp_meta_tenant_log_disk_size', str(Capacity(expect_log_disk_size, 0)), False) if generate_config_mini and 'ocp_meta_tenant_memory_size' not in global_config and 'memory_size' not in global_config.get('ocp_meta_tenant', {}): update_global_conf('ocp_meta_tenant_memory_size', '1536M') @@ -512,12 +500,12 @@ def summit_config(): if key in generate_configs.get(server, {}): value = generate_configs[server][key] servers.append(server) - values.append(parse_size(value) if is_capacity_key else value) + values.append(Capacity(value).btyes if is_capacity_key else value) if values: if len(values) != server_num and key in generate_global_config: continue value = min(values) - generate_global_config[key] = format_size(value, 0) if is_capacity_key else value + generate_global_config[key] = str(Capacity(value, 0)) if is_capacity_key else value for server in servers: del generate_configs[server][key] diff --git a/plugins/oceanbase/4.2.0.0/list_tenant.py b/plugins/oceanbase/4.2.0.0/list_tenant.py index fe03f49..2d9aa84 100644 --- a/plugins/oceanbase/4.2.0.0/list_tenant.py +++ b/plugins/oceanbase/4.2.0.0/list_tenant.py @@ -20,35 +20,8 @@ from __future__ import absolute_import, division, print_function -import re import datetime - -def parse_size(size): - _bytes = 0 - if isinstance(size, str): - size = size.strip() - if not isinstance(size, str) or size.isdigit(): - _bytes = int(size) - else: - units = {"B": 1, "K": 1 << 10, "M": 1 << 20, "G": 1 << 30, "T": 1 << 40} - match = re.match(r'^([1-9][0-9]*)\s*([B,K,M,G,T])$', size.upper()) - _bytes = int(match.group(1)) * units[match.group(2)] - return _bytes - - -def format_size(size, precision=1): - units = ['B', 'K', 'M', 'G', 'T', 'P'] - idx = 0 - if precision: - div = 1024.0 - format = '%.' + str(precision) + 'f%s' - else: - div = 1024 - format = '%d%s' - while idx < 5 and size >= 1024: - size /= 1024.0 - idx += 1 - return format % (size, units[idx]) +from _types import Capacity def list_tenant(plugin_context, cursor, relation_tenants={}, *args, **kwargs): @@ -106,8 +79,8 @@ def get_option(key, default=''): 'min_cpu', 'memory_size', 'max_iops', 'min_iops', 'log_disk_size', 'iops_weight', 'tenant_role'], lambda x: [x['TENANT_NAME'], x['TENANT_TYPE'], x['COMPATIBILITY_MODE'], x['PRIMARY_ZONE'], - x['MAX_CPU'], x['MIN_CPU'], format_size(x['MEMORY_SIZE']), x['MAX_IOPS'], x['MIN_IOPS'], - format_size(x['LOG_DISK_SIZE']), x['IOPS_WEIGHT'], x['TENANT_ROLE']], + x['MAX_CPU'], x['MIN_CPU'], str(Capacity(x['MEMORY_SIZE'])), x['MAX_IOPS'], x['MIN_IOPS'], + str(Capacity(x['LOG_DISK_SIZE'])), x['IOPS_WEIGHT'], x['TENANT_ROLE']], title='tenant base info') else: stdio.stop_loading('fail') diff --git a/plugins/oceanbase/4.2.0.0/parameter.yaml b/plugins/oceanbase/4.2.0.0/parameter.yaml index b9f6545..37653f0 100644 --- a/plugins/oceanbase/4.2.0.0/parameter.yaml +++ b/plugins/oceanbase/4.2.0.0/parameter.yaml @@ -1869,3 +1869,61 @@ need_redeploy: true description_en: The password for obagent monitor user description_local: obagent 监控用户的密码 +- name: ocp_monitor_tenant + require: false + type: DICT + default: + tenant_name: ocp_monitor + max_cpu: 1 + memory_size: 2147483648 + need_redeploy: true + description_en: The tenant specifications for ocp monitor db + description_local: ocp 的监控数据库使用的租户定义 +- name: ocp_monitor_tenant_max_cpu + name_local: OCP 监控数据库租户的CPU数 + essential: true + require: false + type: INT + default: 1 + need_redeploy: true + description_en: The tenant cpu count for ocp monitor db + description_local: ocp 监控数据库使用的CPU数量 +- name: ocp_monitor_tenant_memory_size + name_local: OCP 监控数据库租户内存 + essential: true + require: false + type: CAPACITY_MB + default: 2G + need_redeploy: true + description_en: The tenant memory size for ocp monitor db + description_local: ocp 监控数据库使用的租户内存大小 +- name: ocp_monitor_tenant_log_disk_size + name_local: OCP 监控数据库租户日志磁盘大小 + essential: true + require: false + type: CAPACITY_MB + default: 6656M + need_redeploy: true + description_en: The tenant log disk size for ocp monitor db + description_local: ocp 监控数据库使用的租户日志磁盘大小 +- name: ocp_monitor_db + require: false + type: SAFE_STRING + default: ocp_monitor + need_redeploy: true + description_en: The database name for ocp monitor db + description_local: ocp 的监控数据库使用的数据库名 +- name: ocp_monitor_username + require: false + type: SAFE_STRING + default: monitor + need_redeploy: true + description_en: The user name for ocp meta db + description_local: ocp 的监控数据库使用的用户名 +- name: ocp_monitor_password + require: false + type: STRING + default: oceanbase + need_redeploy: true + description_en: The password for ocp monitor db + description_local: ocp 的监控数据库使用的密码 diff --git a/plugins/oceanbase/4.2.0.0/start.py b/plugins/oceanbase/4.2.0.0/start.py index 08e6df1..2518026 100644 --- a/plugins/oceanbase/4.2.0.0/start.py +++ b/plugins/oceanbase/4.2.0.0/start.py @@ -178,6 +178,7 @@ def start(plugin_context, *args, **kwargs): not_cmd_opt = [ 'home_path', 'obconfig_url', 'root_password', 'proxyro_password', 'redo_dir', 'clog_dir', 'ilog_dir', 'slog_dir', '$_zone_idc', 'production_mode', + 'ocp_monitor_tenant', 'ocp_monitor_username', 'ocp_monitor_password', 'ocp_monitor_db', 'ocp_meta_tenant', 'ocp_meta_username', 'ocp_meta_password', 'ocp_meta_db', 'ocp_agent_monitor_password','ocp_root_password' ] get_value = lambda key: "'%s'" % server_config[key] if isinstance(server_config[key], str) else server_config[key] diff --git a/plugins/oceanbase/4.2.0.0/start_check.py b/plugins/oceanbase/4.2.0.0/start_check.py index 5f3d3a1..936960f 100644 --- a/plugins/oceanbase/4.2.0.0/start_check.py +++ b/plugins/oceanbase/4.2.0.0/start_check.py @@ -27,12 +27,14 @@ from math import sqrt import _errno as err +from _types import Capacity stdio = None success = True production_mode = False + def get_port_socket_inode(client, port): port = hex(port)[2:].zfill(4).upper() cmd = "bash -c 'cat /proc/net/{tcp*,udp*}' | awk -F' ' '{if($4==\"0A\") print $2,$4,$10}' | grep ':%s' | awk -F' ' '{print $3}' | uniq" % port @@ -43,26 +45,6 @@ def get_port_socket_inode(client, port): return res.stdout.strip().split('\n') -def parse_size(size): - _bytes = 0 - if not isinstance(size, str) or size.isdigit(): - _bytes = int(size) - else: - units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} - match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) - _bytes = int(match.group(1)) * units[match.group(2)] - return _bytes - - -def format_size(size): - units = ['B', 'K', 'M', 'G', 'T', 'P'] - idx = 0 - while idx < 5 and size >= 1024: - size /= 1024.0 - idx += 1 - return '%.1f%s' % (size, units[idx]) - - def time_delta(client): time_st = time.time() * 1000 time_srv = int(client.execute_command('date +%s%N').stdout) / 1000000 @@ -383,12 +365,12 @@ def system_memory_check(): memory_limit = 0 percentage = 0 if server_config.get('memory_limit'): - memory_limit = parse_size(server_config['memory_limit']) + memory_limit = Capacity(server_config['memory_limit']).btyes if production_mode and memory_limit < PRO_MEMORY_MIN: - error('mem', err.EC_OBSERVER_PRODUCTION_MODE_LIMIT.format(server=server, key='memory_limit', limit=format_size(PRO_MEMORY_MIN)), [err.SUB_SET_NO_PRODUCTION_MODE.format()]) + error('mem', err.EC_OBSERVER_PRODUCTION_MODE_LIMIT.format(server=server, key='memory_limit', limit=Capacity(PRO_MEMORY_MIN)), [err.SUB_SET_NO_PRODUCTION_MODE.format()]) memory['num'] += memory_limit elif 'memory_limit_percentage' in server_config: - percentage = int(parse_size(server_config['memory_limit_percentage'])) + percentage = server_config['memory_limit_percentage'] memory['percentage'] += percentage else: percentage = 80 @@ -396,7 +378,7 @@ def system_memory_check(): memory['servers'][server] = { 'num': memory_limit, 'percentage': percentage, - 'system_memory': parse_size(server_config.get('system_memory', 0)) + 'system_memory': Capacity(server_config.get('system_memory', 0)).btyes } data_path = server_config['data_dir'] if server_config.get('data_dir') else os.path.join(server_config['home_path'], 'store') @@ -405,14 +387,14 @@ def system_memory_check(): if not client.execute_command('ls %s/sstable/block_file' % data_path): disk[data_path] = {'server': server} clog_mount[clog_dir] = {'server': server} - if 'datafile_size' in server_config and server_config['datafile_size'] and parse_size(server_config['datafile_size']): + if 'datafile_size' in server_config and server_config['datafile_size'] and server_config['datafile_size']: # if need is string, it means use datafile_size disk[data_path]['need'] = server_config['datafile_size'] elif 'datafile_disk_percentage' in server_config and server_config['datafile_disk_percentage']: # if need is integer, it means use datafile_disk_percentage disk[data_path]['need'] = int(server_config['datafile_disk_percentage']) - if 'log_disk_size' in server_config and server_config['log_disk_size'] and parse_size(server_config['log_disk_size']): + if 'log_disk_size' in server_config and server_config['log_disk_size'] and server_config['log_disk_size']: # if need is string, it means use log_disk_size clog_mount[clog_dir]['need'] = server_config['log_disk_size'] elif 'log_disk_percentage' in server_config and server_config['log_disk_percentage']: @@ -570,15 +552,15 @@ def system_memory_check(): for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): if k in memory_key_map: key = memory_key_map[k] - server_memory_stats[key] = parse_size(str(v)) + server_memory_stats[key] = Capacity(str(v)).btyes ip_server_memory_info[ip] = server_memory_stats server_memory_stat = servers_memory[ip] min_start_need = server_num * START_NEED_MEMORY - total_use = server_memory_stat['percentage'] * server_memory_stats['total'] / 100 + server_memory_stat['num'] + total_use = int(server_memory_stat['percentage'] * server_memory_stats['total'] / 100 + server_memory_stat['num']) if min_start_need > server_memory_stats['available']: for server in ip_servers: - error('mem', err.EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE.format(ip=ip, available=format_size(server_memory_stats['available']), need=format_size(min_start_need)), [err.SUG_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE.format(ip=ip)]) + error('mem', err.EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE.format(ip=ip, available=Capacity(server_memory_stats['available']), need=Capacity(min_start_need)), [err.SUG_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE.format(ip=ip)]) elif total_use > server_memory_stats['free'] + server_memory_stats['buffers'] + server_memory_stats['cached']: for server in ip_servers: server_generate_config = generate_configs.get(server, {}) @@ -588,11 +570,11 @@ def system_memory_check(): if key in global_generate_config or key in server_generate_config: suggest.auto_fix = False break - error('mem', err.EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=format_size(server_memory_stats['free']), cached=format_size(server_memory_stats['buffers'] + server_memory_stats['cached']), need=format_size(total_use)), [suggest]) + error('mem', err.EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=Capacity(server_memory_stats['free']), cached=Capacity(server_memory_stats['buffers'] + server_memory_stats['cached']), need=Capacity(total_use)), [suggest]) elif total_use > server_memory_stats['free']: system_memory_check() for server in ip_servers: - alert('mem', err.EC_OBSERVER_NOT_ENOUGH_MEMORY.format(ip=ip, free=format_size(server_memory_stats['free']), need=format_size(total_use)), [err.SUG_OBSERVER_REDUCE_MEM.format()]) + alert('mem', err.EC_OBSERVER_NOT_ENOUGH_MEMORY.format(ip=ip, free=Capacity(server_memory_stats['free']), need=Capacity(total_use)), [err.SUG_OBSERVER_REDUCE_MEM.format()]) else: system_memory_check() @@ -618,7 +600,7 @@ def system_memory_check(): # slog need 4G disk[mount_path]['need'] += max(disk[mount_path]['total'] - slog_size, 0) * need / 100 else: - disk[mount_path]['need'] += parse_size(need) + disk[mount_path]['need'] += Capacity(need).btyes disk[mount_path]['need'] += slog_size disk[mount_path]['is_data_disk'] = True @@ -638,7 +620,7 @@ def system_memory_check(): log_disk_size = disk[mount_path]['total'] * need / 100 else: # log_disk_size - log_disk_size = parse_size(need) + log_disk_size = Capacity(need).btyes servers_log_disk_size[servers_clog_mount[ip][path]['server']] = log_disk_size disk[mount_path]['need'] += log_disk_size disk[mount_path]['is_clog_disk'] = True @@ -678,7 +660,7 @@ def system_memory_check(): break tmp_suggests.append(suggest) tmp_suggests = sorted(tmp_suggests, key=lambda suggest: suggest.auto_fix, reverse=True) - critical('disk', err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=p, avail=format_size(avail), need=format_size(need)), tmp_suggests + suggests) + critical('disk', err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=p, avail=Capacity(avail), need=Capacity(need)), tmp_suggests + suggests) global_conf = cluster_config.get_global_conf() has_ocp = 'ocp-express' in plugin_context.components @@ -691,7 +673,7 @@ def system_memory_check(): for key in global_conf_with_default: if key.startswith(ocp_meta_tenant_prefix) and original_global_conf.get(key, None): global_conf_with_default['ocp_meta_tenant'][key.replace(ocp_meta_tenant_prefix, '', 1)] = global_conf_with_default[key] - meta_db_memory_size = parse_size(global_conf_with_default['ocp_meta_tenant'].get('memory_size')) + meta_db_memory_size = Capacity(global_conf_with_default['ocp_meta_tenant'].get('memory_size')).btyes servers_sys_memory = {} if meta_db_memory_size: sys_memory_size = None @@ -707,7 +689,7 @@ def system_memory_check(): if system_memory == 0: system_memory = get_system_memory(memory_limit, min_pool_memory) if not sys_memory_size: - sys_memory_size = servers_sys_memory[server] = max(min_pool_memory, min((memory_limit - system_memory) * 0.25, parse_size('16G'))) + sys_memory_size = servers_sys_memory[server] = max(min_pool_memory, min((memory_limit - system_memory) * 0.25, Capacity('16G').btyes)) if meta_db_memory_size + system_memory + sys_memory_size <= memory_limit: break else: @@ -718,7 +700,7 @@ def system_memory_check(): error('ocp meta db', err.EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_MEM.format(), [suggest]) meta_db_log_disk_size = global_conf_with_default['ocp_meta_tenant'].get('log_disk_size') - meta_db_log_disk_size = parse_size(meta_db_log_disk_size) if meta_db_log_disk_size else meta_db_log_disk_size + meta_db_log_disk_size = Capacity(meta_db_log_disk_size).btyes if meta_db_log_disk_size else meta_db_log_disk_size if not meta_db_log_disk_size and meta_db_memory_size: meta_db_log_disk_size = meta_db_memory_size * 3 if meta_db_log_disk_size: diff --git a/plugins/oceanbase/4.2.1.0/bootstrap.py b/plugins/oceanbase/4.2.1.0/bootstrap.py index dafb55a..4545547 100644 --- a/plugins/oceanbase/4.2.1.0/bootstrap.py +++ b/plugins/oceanbase/4.2.1.0/bootstrap.py @@ -26,6 +26,7 @@ from _deploy import InnerConfigItem +## start generating ocp and ocp-express tenant info from this version, ocp releases with ob from 4.2.1 packaged in ocp-all-in-one def bootstrap(plugin_context, *args, **kwargs): cluster_config = plugin_context.cluster_config stdio = plugin_context.stdio @@ -127,25 +128,39 @@ def is_bootstrap(): stdio.verbose(sql) raise_cursor.execute(sql, [value]) - has_ocp = 'ocp-express' in added_components and 'ocp-express' in be_depend - if any([key in global_conf for key in ["ocp_meta_tenant", "ocp_meta_db", "ocp_meta_username", "ocp_meta_password"]]): - has_ocp = True - if has_ocp: - global_conf_with_default = deepcopy(cluster_config.get_global_conf_with_default()) - original_global_conf = cluster_config.get_original_global_conf() - ocp_meta_tenant_prefix = 'ocp_meta_tenant_' + # check the requirements of ocp meta and monitor tenant + global_conf_with_default = deepcopy(cluster_config.get_global_conf_with_default()) + original_global_conf = cluster_config.get_original_global_conf() + + ocp_tenants = [] + tenants_componets_map = { + "meta": ["ocp-express", "ocp-server", "ocp-server-ce"], + "monitor": ["ocp-server", "ocp-server-ce"], + } + ocp_tenant_keys = ['tenant', 'db', 'username', 'password'] + for tenant in tenants_componets_map: + components = tenants_componets_map[tenant] + prefix = "ocp_%s_" % tenant + if not any([component in added_components and component in be_depend for component in components]): + for key in ocp_tenant_keys: + config_key = prefix + key + if config_key in global_conf: + break + else: + continue + # set create tenant variable for key in global_conf_with_default: - if key.startswith(ocp_meta_tenant_prefix) and original_global_conf.get(key, None): - global_conf_with_default['ocp_meta_tenant'][key.replace(ocp_meta_tenant_prefix, '', 1)] = global_conf_with_default[key] - tenant_info = global_conf_with_default["ocp_meta_tenant"] + if key.startswith(prefix) and original_global_conf.get(key, None): + global_conf_with_default[prefix + 'tenant'][key.replace(prefix, '', 1)] = global_conf_with_default[key] + tenant_info = global_conf_with_default[prefix + "tenant"] tenant_info["variables"] = "ob_tcp_invited_nodes='%'" tenant_info["create_if_not_exists"] = True - tenant_info["database"] = global_conf_with_default["ocp_meta_db"] - tenant_info["db_username"] = global_conf_with_default["ocp_meta_username"] - tenant_info["db_password"] = global_conf_with_default.get("ocp_meta_password", "") - tenant_info["ocp_root_password"] = global_conf_with_default.get("ocp_root_password", "") - tenant_options = Values(tenant_info) - plugin_context.set_variable("create_tenant_options", tenant_options) + tenant_info["database"] = global_conf_with_default[prefix + "db"] + tenant_info["db_username"] = global_conf_with_default[prefix + "username"] + tenant_info["db_password"] = global_conf_with_default.get(prefix + "password", "") + tenant_info["{0}_root_password".format(tenant_info['tenant_name'])] = global_conf_with_default.get(prefix + "password", "") + ocp_tenants.append(Values(tenant_info)) + plugin_context.set_variable("create_tenant_options", ocp_tenants) # wait for server online all_server_online = False diff --git a/plugins/oceanbase/4.2.1.0/generate_config.py b/plugins/oceanbase/4.2.1.0/generate_config.py index 478d1b7..273afa3 100644 --- a/plugins/oceanbase/4.2.1.0/generate_config.py +++ b/plugins/oceanbase/4.2.1.0/generate_config.py @@ -28,35 +28,7 @@ from _errno import EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE, EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED, EC_OBSERVER_GET_MEMINFO_FAIL import _errno as err from tool import ConfigUtil - - -def parse_size(size): - _bytes = 0 - if not isinstance(size, str) or size.isdigit(): - _bytes = int(size) - else: - units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} - match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) - _bytes = int(match.group(1)) * units[match.group(2)] - return _bytes - - -def format_size(size, precision=1): - units = ['B', 'K', 'M', 'G'] - units_num = len(units) - 1 - idx = 0 - if precision: - div = 1024.0 - format = '%.' + str(precision) + 'f%s' - limit = 1024 - else: - div = 1024 - limit = 1024 - format = '%d%s' - while idx < units_num and size >= limit: - size /= div - idx += 1 - return format % (size, units[idx]) +from _types import Capacity def get_system_memory(memory_limit, min_pool_memory, generate_config_mini): @@ -106,6 +78,7 @@ def update_server_conf(server, key, value): generate_configs[server][key] = value def update_global_conf(key, value): generate_configs['global'][key] = value + def summit_config(): generate_global_config = generate_configs['global'] for key in generate_global_config: @@ -171,7 +144,7 @@ def summit_config(): auto_set_min_pool_memory = False system_memory = 0 if user_server_config.get('system_memory'): - system_memory = parse_size(user_server_config.get('system_memory')) + system_memory = Capacity(user_server_config.get('system_memory')).btyes if generate_config_mini and '__min_full_resource_pool_memory' not in user_server_config: auto_set_min_pool_memory = True min_pool_memory = server_config['__min_full_resource_pool_memory'] @@ -192,11 +165,11 @@ def summit_config(): for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): if k in memory_key_map: key = memory_key_map[k] - server_memory_stats[key] = parse_size(str(v)) + server_memory_stats[key] = Capacity(str(v)).btyes if user_server_config.get('memory_limit_percentage'): if ip in ip_server_memory_info: - total_memory = parse_size(ip_server_memory_info[ip]['total']) + total_memory = Capacity(ip_server_memory_info[ip]['total']).btyes memory_limit = int(total_memory * user_server_config.get('memory_limit_percentage') / 100) elif generate_check: stdio.error(EC_OBSERVER_GET_MEMINFO_FAIL.format(server=server)) @@ -207,7 +180,7 @@ def summit_config(): elif not server_config.get('memory_limit'): if generate_config_mini: memory_limit = MINI_MEMORY_SIZE - update_server_conf(server, 'memory_limit', format_size(memory_limit, 0)) + update_server_conf(server, 'memory_limit', str(Capacity(memory_limit, 0))) update_server_conf(server, 'production_mode', False) if auto_set_min_pool_memory: min_pool_memory = 1073741824 @@ -217,16 +190,26 @@ def summit_config(): server_memory_stats = ip_server_memory_info[ip] if generate_check: if server_memory_stats['available'] < START_NEED_MEMORY: - stdio.error(EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE.format(ip=ip, available=format_size(server_memory_stats['available']), need=format_size(START_NEED_MEMORY))) + stdio.error(EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE.format(ip=ip, available=Capacity(server_memory_stats['available']), need=Capacity(START_NEED_MEMORY))) success = False continue if server_memory_stats['free'] + server_memory_stats['buffers'] + server_memory_stats['cached'] < MIN_MEMORY: - stdio.error(EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=format_size(server_memory_stats['free']), cached=format_size(server_memory_stats['buffers'] + server_memory_stats['cached']), need=format_size(MIN_MEMORY))) + stdio.error(EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=Capacity(server_memory_stats['free']), cached=Capacity(server_memory_stats['buffers'] + server_memory_stats['cached']), need=Capacity(MIN_MEMORY))) success = False continue - memory_limit = max(MIN_MEMORY, int(server_memory_stats['available'] * 0.9)) - update_server_conf(server, 'memory_limit', format_size(memory_limit, 0)) + + # calculate memory available after reserve for other components + resource = plugin_context.namespace.get_variable("required_resource") + available_memory = server_memory_stats['available'] + if resource is not None: + for _, reserved_resource in resource.items(): + if 'memory' in reserved_resource.keys(): + for server_ip in reserved_resource['memory'].keys(): + if server_ip == ip: + available_memory = available_memory - Capacity(reserved_resource['memory'][server_ip]).btyes + memory_limit = max(MIN_MEMORY, int(available_memory * 0.9)) + update_server_conf(server, 'memory_limit', str(Capacity(memory_limit, 0))) auto_set_memory = True elif generate_check: stdio.error(EC_OBSERVER_GET_MEMINFO_FAIL.format(server=server)) @@ -235,12 +218,12 @@ def summit_config(): else: memory_limit = MIN_MEMORY else: - memory_limit = parse_size(server_config.get('memory_limit')) + memory_limit = Capacity(server_config.get('memory_limit')).btyes if system_memory == 0: auto_set_system_memory = True system_memory = get_system_memory(memory_limit, min_pool_memory, generate_config_mini) - update_server_conf(server, 'system_memory', format_size(system_memory, 0)) + update_server_conf(server, 'system_memory', str(Capacity(system_memory, 0))) # cpu if not server_config.get('cpu_count'): @@ -256,8 +239,8 @@ def summit_config(): stdio.warn('(%s): automatically adjust the cpu_count %s' % (server, MIN_CPU_COUNT)) # disk - datafile_size = parse_size(server_config.get('datafile_size', 0)) - log_disk_size = parse_size(server_config.get('log_disk_size', 0)) + datafile_size = Capacity(server_config.get('datafile_size', 0)).btyes + log_disk_size = Capacity(server_config.get('log_disk_size', 0)).btyes if not server_config.get('datafile_size') or not server_config.get('log_disk_size'): disk = {'/': 0} ret = client.execute_command('df --block-size=1024') @@ -309,21 +292,21 @@ def summit_config(): datafile_size = data_dir_disk['total'] * datafile_disk_percentage / 100 elif generate_config_mini: datafile_size = MINI_DATA_FILE_SIZE - update_server_conf(server, 'datafile_size', format_size(datafile_size, 0)) + update_server_conf(server, 'datafile_size', str(Capacity(datafile_size, 0))) if 'datafile_maxsize' not in user_server_config: - update_server_conf(server, 'datafile_maxsize', format_size(MINI_DATA_FILE_MAX_SIZE, 0)) + update_server_conf(server, 'datafile_maxsize', str(Capacity(MINI_DATA_FILE_MAX_SIZE, 0))) if 'datafile_next' not in user_server_config: - update_server_conf(server, 'datafile_next', format_size(MINI_DATA_FILE_NEXT, 0)) + update_server_conf(server, 'datafile_next', str(Capacity(MINI_DATA_FILE_NEXT, 0))) else: auto_set_datafile_size = True if not log_disk_size: log_disk_percentage = int(user_server_config.get('log_disk_percentage', 0)) if log_disk_percentage: - log_disk_size = clog_dir_disk['total'] * log_disk_percentage / 100 + log_disk_size = int(clog_dir_disk['total'] * log_disk_percentage / 100) elif generate_config_mini: log_disk_size = MINI_LOG_DISK_SIZE - update_server_conf(server, 'log_disk_size', format_size(log_disk_size, 0)) + update_server_conf(server, 'log_disk_size', str(Capacity(log_disk_size, 0))) else: auto_set_log_disk_size = True @@ -332,6 +315,9 @@ def summit_config(): else: log_size = (256 << 20) * int(user_server_config.get('max_syslog_file_count', max_syslog_file_count_default)) * 4 + datafile_maxsize = datafile_next = 0 + DATA_RESERVED = 0.95 + DATA_NEXT = 0.1 if clog_dir_mount == data_dir_mount: min_log_size = log_size if clog_dir_mount == home_path_mount else 0 MIN_NEED = min_log_size + SLOG_SIZE @@ -339,34 +325,34 @@ def summit_config(): datafile_size =min_datafile_size = MINI_DATA_FILE_SIZE else: min_datafile_size = datafile_size - MIN_NEED += min_datafile_size + MIN_NEED += Capacity(min_datafile_size).btyes if auto_set_log_disk_size: log_disk_size = min_log_disk_size = (memory_limit - system_memory) * 3 + system_memory else: min_log_disk_size = log_disk_size - MIN_NEED += min_log_disk_size - min_need = min_log_size + min_datafile_size + min_log_disk_size + MIN_NEED += Capacity(min_log_disk_size).btyes + min_need = min_log_size + Capacity(min_datafile_size).btyes + Capacity(min_log_disk_size).btyes disk_free = data_dir_disk['avail'] if MIN_NEED > disk_free: if generate_check: - stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=format_size(disk_free), need=format_size(MIN_NEED))) + stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=Capacity(disk_free), need=Capacity(MIN_NEED))) success = False continue else: if auto_set_memory: memory_limit = MIN_MEMORY - update_server_conf(server, 'memory_limit', format_size(memory_limit, 0)) + update_server_conf(server, 'memory_limit', str(Capacity(memory_limit, 0))) if auto_set_system_memory: system_memory = get_system_memory(memory_limit, min_pool_memory, generate_config_mini) - update_server_conf(server, 'system_memory', format_size(system_memory, 0)) + update_server_conf(server, 'system_memory', str(Capacity(system_memory, 0))) if auto_set_datafile_size: datafile_size = MINI_DATA_FILE_SIZE if auto_set_log_disk_size: log_disk_size = (memory_limit - system_memory) * 3 + system_memory elif min_need > disk_free: if generate_check and not auto_set_memory: - stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=format_size(disk_free), need=format_size(min_need))) + stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=Capacity(disk_free), need=Capacity(min_need))) success = False continue @@ -378,26 +364,33 @@ def summit_config(): if auto_set_log_disk_size is False: disk_free -= min_log_disk_size memory_factor -= 3 - memory_limit = format_size(disk_free / max(1, memory_factor), 0) + memory_limit = str(Capacity(disk_free / max(1, memory_factor), 0)) update_server_conf(server, 'memory_limit', memory_limit) - memory_limit = parse_size(memory_limit) + memory_limit = Capacity(memory_limit).btyes if auto_set_system_memory: system_memory = get_system_memory(memory_limit, min_pool_memory, generate_config_mini) - update_server_conf(server, 'system_memory', format_size(system_memory, 0)) + update_server_conf(server, 'system_memory', str(Capacity(system_memory, 0))) if auto_set_log_disk_size: log_disk_size = (memory_limit - system_memory) * 3 + system_memory if auto_set_datafile_size: - datafile_size = max(disk_free - log_disk_size, memory_limit * 3) + datafile_size = min(disk_free - log_disk_size, memory_limit * 3) + if datafile_maxsize > datafile_size: + datafile_maxsize = max(disk_free - log_disk_size, memory_limit * 3) + datafile_next = DATA_NEXT * datafile_maxsize else: if auto_set_log_disk_size: log_disk_size = (memory_limit - system_memory) * 3 + system_memory if auto_set_datafile_size: - datafile_size = max(disk_free - log_size - SLOG_SIZE - log_disk_size, memory_limit * 3) + datafile_size = min((disk_free - log_size - SLOG_SIZE - log_disk_size) * DATA_RESERVED, memory_limit * 3) + datafile_maxsize = max((disk_free - log_size - SLOG_SIZE - log_disk_size) * DATA_RESERVED, memory_limit * 3) + datafile_next = DATA_NEXT * datafile_maxsize if auto_set_datafile_size: - update_server_conf(server, 'datafile_size', format_size(datafile_size, 0)) + update_server_conf(server, 'datafile_size', str(Capacity(datafile_size, 0))) + update_server_conf(server, 'datafile_maxsize', str(Capacity(datafile_maxsize, 0))) + update_server_conf(server, 'datafile_next', str(Capacity(datafile_next, 0))) if auto_set_log_disk_size: - update_server_conf(server, 'log_disk_size', format_size(log_disk_size, 0)) + update_server_conf(server, 'log_disk_size', str(Capacity(log_disk_size, 0))) else: datafile_min_memory_limit = memory_limit if auto_set_datafile_size: @@ -407,16 +400,18 @@ def summit_config(): min_need = min_log_size + datafile_size + SLOG_SIZE if generate_check and min_need > disk_free: if not auto_set_memory: - stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=format_size(disk_free), need=format_size(min_need))) + stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=Capacity(disk_free), need=Capacity(min_need))) success = False continue datafile_min_memory_limit = (disk_free - min_log_size - SLOG_SIZE) / 3 if datafile_min_memory_limit < min_memory: - stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=format_size(disk_free), need=format_size(min_need))) + stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=Capacity(disk_free), need=Capacity(min_need))) success = False continue - datafile_min_memory_limit = parse_size(format_size(datafile_min_memory_limit, 0)) + datafile_min_memory_limit = Capacity(str(Capacity(datafile_min_memory_limit, 0))).btyes datafile_size = datafile_min_memory_limit * 3 + datafile_maxsize = (disk_free - min_log_size - SLOG_SIZE) * DATA_RESERVED + datafile_next = DATA_NEXT * datafile_maxsize log_disk_min_memory_limit = memory_limit if auto_set_log_disk_size: @@ -426,27 +421,30 @@ def summit_config(): min_need = min_log_size + log_disk_size if generate_check and min_need > disk_free: if not auto_set_memory: - stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=format_size(disk_free), need=format_size(min_need))) + stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=Capacity(disk_free), need=Capacity(min_need))) success = False continue log_disk_min_memory_limit = (disk_free - log_size) / 3 if log_disk_min_memory_limit < min_memory: - stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=format_size(disk_free), need=format_size(min_need))) + stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=Capacity(disk_free), need=Capacity(min_need))) success = False continue - log_disk_min_memory_limit = parse_size(format_size(log_disk_min_memory_limit, 0)) + log_disk_min_memory_limit = Capacity(str(Capacity(log_disk_min_memory_limit, 0))).btyes log_disk_size = log_disk_min_memory_limit * 3 if auto_set_memory: - update_server_conf(server, 'memory_limit', format_size(memory_limit, 0)) + update_server_conf(server, 'memory_limit', str(Capacity(memory_limit, 0))) if auto_set_system_memory: system_memory = get_system_memory(memory_limit, min_pool_memory, generate_config_mini) update_server_conf(server, 'system_memory', system_memory) if auto_set_datafile_size: - update_server_conf(server, 'datafile_size', format_size(datafile_size, 0)) + update_server_conf(server, 'datafile_size', str(Capacity(datafile_size, 0))) + if datafile_maxsize > datafile_size: + update_server_conf(server, 'datafile_maxsize', str(Capacity(datafile_maxsize, 0))) + update_server_conf(server, 'datafile_next', str(Capacity(datafile_next, 0))) if auto_set_log_disk_size: - update_server_conf(server, 'log_disk_size', format_size(log_disk_size, 0)) + update_server_conf(server, 'log_disk_size', str(Capacity(log_disk_size, 0))) if memory_limit < PRO_MEMORY_MIN: update_server_conf(server, 'production_mode', False) @@ -479,10 +477,10 @@ def summit_config(): server_info = servers_info.get(server) if not server_info: continue - memory_limit = server_info['memory_limit'] - system_memory = server_info['system_memory'] - log_disk_size = server_info['log_disk_size'] - min_pool_memory = server_info['min_pool_memory'] + memory_limit = Capacity(server_info['memory_limit']).btyes + system_memory = Capacity(server_info['system_memory']).btyes + log_disk_size = Capacity(server_info['log_disk_size']).btyes + min_pool_memory = Capacity(server_info['min_pool_memory']).btyes if not sys_log_disk_size: if not sys_memory_size: sys_memory_size = max(min_pool_memory, min(int((memory_limit - system_memory) * 0.25), 16 << 30)) @@ -493,7 +491,7 @@ def summit_config(): if expect_log_disk_size > max_available and generate_check: stdio.error(err.EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_LOG_DISK_AVAILABLE.format(avail=max_available, need=expect_log_disk_size)) success = False - cluster_config.update_global_conf('ocp_meta_tenant_log_disk_size', format_size(expect_log_disk_size, 0), False) + cluster_config.update_global_conf('ocp_meta_tenant_log_disk_size', str(Capacity(expect_log_disk_size, 0)), False) if generate_config_mini and 'ocp_meta_tenant_memory_size' not in global_config and 'memory_size' not in global_config.get('ocp_meta_tenant', {}): update_global_conf('ocp_meta_tenant_memory_size', '1536M') @@ -512,12 +510,12 @@ def summit_config(): if key in generate_configs.get(server, {}): value = generate_configs[server][key] servers.append(server) - values.append(parse_size(value) if is_capacity_key else value) + values.append(Capacity(value).btyes if is_capacity_key else value) if values: if len(values) != server_num and key in generate_global_config: continue value = min(values) - generate_global_config[key] = format_size(value, 0) if is_capacity_key else value + generate_global_config[key] = str(Capacity(value, 0)) if is_capacity_key else value for server in servers: del generate_configs[server][key] diff --git a/plugins/oceanbase/4.2.1.0/parameter.yaml b/plugins/oceanbase/4.2.1.0/parameter.yaml index 505cc73..3efe577 100644 --- a/plugins/oceanbase/4.2.1.0/parameter.yaml +++ b/plugins/oceanbase/4.2.1.0/parameter.yaml @@ -1813,11 +1813,21 @@ need_redeploy: false description_en: Production mode switch, default True. Adjust the memory_limit and __min_full_resource_pool_memory The lower bound of memory is adjusted to 16G and 2147483648 description_local: 生产模式开关, 默认开启。开启后调整memory limit 和 __min_full_resource_pool_memory 下界调整为 16G 和 2147483648 +- name: ocp_monitor_tenant + require: false + type: DICT + default: + tenant_name: ocp_monitor + max_cpu: 1 + memory_size: 2147483648 + need_redeploy: true + description_en: The tenant specifications for ocp monitor db + description_local: ocp 的监控数据库使用的租户定义 - name: ocp_meta_tenant require: false type: DICT default: - tenant_name: ocp + tenant_name: ocp_meta max_cpu: 1 memory_size: 2147483648 need_redeploy: true @@ -1850,27 +1860,75 @@ need_redeploy: true description_en: The tenant log disk size for ocp meta db description_local: ocp express的元数据库使用的租户日志磁盘大小 +- name: ocp_monitor_tenant_max_cpu + name_local: OCP 监控数据库租户的CPU数 + essential: true + require: false + type: INT + default: 1 + need_redeploy: true + description_en: The tenant cpu count for ocp monitor db + description_local: ocp 监控数据库使用的CPU数量 +- name: ocp_monitor_tenant_memory_size + name_local: OCP 监控数据库租户内存 + essential: true + require: false + type: CAPACITY_MB + default: 2G + need_redeploy: true + description_en: The tenant memory size for ocp monitor db + description_local: ocp 监控数据库使用的租户内存大小 +- name: ocp_monitor_tenant_log_disk_size + name_local: OCP 监控数据库租户日志磁盘大小 + essential: true + require: false + type: CAPACITY_MB + default: 6656M + need_redeploy: true + description_en: The tenant log disk size for ocp monitor db + description_local: ocp 监控数据库使用的租户日志磁盘大小 - name: ocp_meta_db require: false type: SAFE_STRING - default: ocp_express + default: ocp_meta need_redeploy: true description_en: The database name for ocp meta db - description_local: ocp express的元数据库使用的数据库名 + description_local: ocp 的元数据库使用的数据库名 - name: ocp_meta_username require: false type: SAFE_STRING default: meta need_redeploy: true - description_en: The database name for ocp meta db - description_local: ocp express的元数据库使用的数据库名 + description_en: The user name for ocp meta db + description_local: ocp 的元数据库使用的用户名 - name: ocp_meta_password require: false type: STRING default: oceanbase need_redeploy: true - description_en: The database name for ocp meta db - description_local: ocp express的元数据库使用的数据库名 + description_en: The password for ocp meta db + description_local: ocp 的元数据库使用的密码 +- name: ocp_monitor_db + require: false + type: SAFE_STRING + default: ocp_monitor + need_redeploy: true + description_en: The database name for ocp monitor db + description_local: ocp 的监控数据库使用的数据库名 +- name: ocp_monitor_username + require: false + type: SAFE_STRING + default: monitor + need_redeploy: true + description_en: The user name for ocp meta db + description_local: ocp 的监控数据库使用的用户名 +- name: ocp_monitor_password + require: false + type: STRING + default: oceanbase + need_redeploy: true + description_en: The password for ocp monitor db + description_local: ocp 的监控数据库使用的密码 - name: ocp_agent_monitor_password require: false type: STRING diff --git a/plugins/oceanbase/4.2.2.0/bootstrap.py b/plugins/oceanbase/4.2.2.0/bootstrap.py index 54acdd4..9f7bf63 100644 --- a/plugins/oceanbase/4.2.2.0/bootstrap.py +++ b/plugins/oceanbase/4.2.2.0/bootstrap.py @@ -241,24 +241,38 @@ def bootstrap(plugin_context, *args, **kwargs): stdio.verbose(sql) raise_cursor.execute(sql, [value]) - has_ocp = 'ocp-express' in added_components and 'ocp-express' in be_depend - if any([key in global_conf for key in ["ocp_meta_tenant", "ocp_meta_db", "ocp_meta_username", "ocp_meta_password"]]): - has_ocp = True - if has_ocp: - global_conf_with_default = deepcopy(cluster_config.get_global_conf_with_default()) - original_global_conf = cluster_config.get_original_global_conf() - ocp_meta_tenant_prefix = 'ocp_meta_tenant_' + # check the requirements of ocp meta and monitor tenant + global_conf_with_default = deepcopy(cluster_config.get_global_conf_with_default()) + original_global_conf = cluster_config.get_original_global_conf() + + ocp_tenants = [] + tenants_componets_map = { + "meta": ["ocp-express", "ocp-server", "ocp-server-ce"], + "monitor": ["ocp-server", "ocp-server-ce"], + } + ocp_tenant_keys = ['tenant', 'db', 'username', 'password'] + for tenant in tenants_componets_map: + components = tenants_componets_map[tenant] + prefix = "ocp_%s_" % tenant + if not any([component in added_components and component in be_depend for component in components]): + for key in ocp_tenant_keys: + config_key = prefix + key + if config_key in global_conf: + break + else: + continue + # set create tenant variable for key in global_conf_with_default: - if key.startswith(ocp_meta_tenant_prefix) and original_global_conf.get(key, None): - global_conf_with_default['ocp_meta_tenant'][key.replace(ocp_meta_tenant_prefix, '', 1)] = global_conf_with_default[key] - tenant_info = global_conf_with_default["ocp_meta_tenant"] + if key.startswith(prefix) and original_global_conf.get(key, None): + global_conf_with_default[prefix + 'tenant'][key.replace(prefix, '', 1)] = global_conf_with_default[key] + tenant_info = global_conf_with_default[prefix + "tenant"] tenant_info["variables"] = "ob_tcp_invited_nodes='%'" tenant_info["create_if_not_exists"] = True - tenant_info["database"] = global_conf_with_default["ocp_meta_db"] - tenant_info["db_username"] = global_conf_with_default["ocp_meta_username"] - tenant_info["db_password"] = global_conf_with_default.get("ocp_meta_password", "") - tenant_info["ocp_root_password"] = global_conf_with_default.get("ocp_root_password", "") - tenant_options = Values(tenant_info) - plugin_context.set_variable("create_tenant_options", tenant_options) + tenant_info["database"] = global_conf_with_default[prefix + "db"] + tenant_info["db_username"] = global_conf_with_default[prefix + "username"] + tenant_info["db_password"] = global_conf_with_default.get(prefix + "password", "") + tenant_info["{0}_root_password".format(tenant_info['tenant_name'])] = global_conf_with_default.get(prefix + "password", "") + ocp_tenants.append(Values(tenant_info)) + plugin_context.set_variable("create_tenant_options", ocp_tenants) return plugin_context.return_true() diff --git a/plugins/oceanbase/4.2.2.0/connect.py b/plugins/oceanbase/4.2.2.0/connect.py index 0cc69c5..fcee8bc 100644 --- a/plugins/oceanbase/4.2.2.0/connect.py +++ b/plugins/oceanbase/4.2.2.0/connect.py @@ -35,6 +35,10 @@ from enum import Enum from os import path from datetime import datetime +from Crypto.Cipher import AES +from Crypto.Random import get_random_bytes +from Crypto.Util.Padding import pad +from const import RSA_KEY_SIZE if sys.version_info.major == 2: import MySQLdb as mysql else: @@ -43,6 +47,8 @@ from _errno import EC_FAIL_TO_CONNECT, EC_SQL_EXECUTE_FAILED from _stdio import SafeStdio + + class OcsResponse(object): def __init__(self, code, data, type): self.code = code @@ -206,6 +212,28 @@ def __getattr__(self, name): class OcsCursor(SafeStdio): + + class Header: + auth: str + ts: str + uri: str + keys: bytes + def __init__(self, auth, ts, uri, keys): + self.auth = auth + self.ts = ts + self.uri = uri + self.keys = keys + + def serialize_struct(self): + return json.dumps({ + 'auth': self.auth, + 'ts': self.ts, + 'uri': self.uri, + 'keys': base64.b64encode(self.keys).decode('utf-8') + }) + + + HEADERS = {'content-type': 'application/json'} def __init__(self, ip, port, homepath = None, password = None, stdio=None): @@ -216,6 +244,9 @@ def __init__(self, ip, port, homepath = None, password = None, stdio=None): self.homepath = homepath self.socket_file = 'obshell.' + str(port) + '.sock' self._auth_header = None + self._version = "" + self.aes_key = get_random_bytes(16) + self.aes_iv = get_random_bytes(16) @staticmethod def _encrypt(context, encrypt_key): @@ -223,6 +254,23 @@ def _encrypt(context, encrypt_key): cipher = PKCS1_cipher.new(key) return base64.b64encode(cipher.encrypt(bytes(context.encode('utf8')))).decode('utf8') + @staticmethod + def rsa_encrypt(context, encrypt_key): + key = RSA.import_key(base64.b64decode(encrypt_key)) + cipher = PKCS1_cipher.new(key) + data_to_encrypt = bytes(context.encode('utf8')) + max_chunk_size = int(RSA_KEY_SIZE / 8) - 11 + chunks = [data_to_encrypt[i:i + max_chunk_size] for i in range(0, len(data_to_encrypt), max_chunk_size)] + encrypted_chunks = [cipher.encrypt(chunk) for chunk in chunks] + encrypted = b''.join(encrypted_chunks) + encoded_encrypted_chunks = base64.b64encode(encrypted).decode('utf-8') + return encoded_encrypted_chunks + + @staticmethod + def aes_encrypt(self, data): + cipher = AES.new(self.aes_key, AES.MODE_CBC, self.aes_iv) + return base64.b64encode(cipher.encrypt(pad(bytes(data.encode('utf8')), AES.block_size))).decode('utf8') + @property def auth_header(self): if self._auth_header is None: @@ -231,25 +279,46 @@ def auth_header(self): self._auth_header = self._encrypt(auth_json, encrypt_key) return self._auth_header - def _make_headers(self, headers=None, safe=None): + @property + def version(self): + if self._version != "": + return self._version + status = requests.get(self._make_url('/api/v1/status'), headers=self._make_headers()) + if status.status_code == 200: + self._version = status.json()['data']['version'] + return self._version + else : + self.stdio.warn('get obshell version failed') + return None + + def _make_headers(self, headers=None, safe=None, uri=None): request_headers = copy.deepcopy(self.HEADERS) if safe is True : - request_headers['X-OCS-Auth'] = self.auth_header + # request_headers['X-OCS-Auth'] = self.auth_header + if self.version >= '4.2.3': + header = self.Header(auth=self.password, ts=str(int(datetime.now().timestamp()) + 100000), uri=uri, keys=self.aes_key+self.aes_iv) + request_headers['X-OCS-Header'] = self.rsa_encrypt(header.serialize_struct(), self._get_secrets()) + else: + request_headers['X-OCS-Auth'] = self.auth_header if headers: request_headers.update(headers) return request_headers def _make_url(self, url): - return 'http://{ip}:{port}/{url}'.format(ip=self.ip, port=self.port, url=url) + return 'http://{ip}:{port}{url}'.format(ip=self.ip, port=self.port, url=url) def _request(self, method, url, data=None, headers=None, params=None, safe=None, *args, **kwargs): try: if data is not None: data = json.dumps(data) + else: + data = json.dumps({}) + if safe and self.version >= '4.2.3': + data = self.aes_encrypt(self, data) self.stdio.verbose('send request to obshell: method: {}, url: {}, data: {}, headers: {}, params: {}'.format(method, url, data, headers, params)) - resp = requests.request(method, url, data=data, headers=self._make_headers(headers, safe), params=params, *args, **kwargs) + resp = requests.request(method, self._make_url(url), data=data, headers=self._make_headers(headers, safe, url), params=params, *args, **kwargs) except Exception as e: - # self.stdio.error('request error: {}'.format(e)) + self.stdio.error('request error: {}'.format(e)) return None parsed_resp = self._response_parser(resp) if parsed_resp.code != 200: @@ -294,14 +363,14 @@ def _response_parser(self, resp, is_socket=False): # get the public key from ocs agent def _get_secrets(self): - resp = self._request('GET', self._make_url('api/v1/secret')) + resp = self._request('GET', '/api/v1/secret') return resp.public_key if resp else None def request(self, method, url, data=None, headers=None, params=None, *args, **kwargs): - return self._request(method, self._make_url(url), data, headers, params, *args, **kwargs) + return self._request(method, url, data, headers, params, *args, **kwargs) def safe_request(self, method, url, data=None, headers=None, params=None, *args, **kwargs): - return self._request(method, self._make_url(url), data, headers, params, safe=True, *args, **kwargs) + return self._request(method, url, data, headers, params, safe=True, *args, **kwargs) def query_dag_util_succeed(self, _dag): dag = _dag @@ -325,51 +394,51 @@ def query_dag_util_finish(self, _dag): # normal route def info_request(self): - resp = self.request('GET', 'api/v1/info') + resp = self.request('GET', '/api/v1/info') return resp.info if resp and resp.type == 'InfoDTO' else None def status_request(self): - resp = self.request('GET', 'api/v1/status') + resp = self.request('GET', '/api/v1/status') return resp.status if resp and resp.type == 'StatusDTO' else None def secret_request(self): - return self.request('GET', 'api/v1/secret') + return self.request('GET', '/api/v1/secret') # ob routes def ob_init_request(self): - resp = self.safe_request('POST', 'api/v1/ob/init') + resp = self.safe_request('POST', '/api/v1/ob/init') return self.query_dag_util_finish(resp.dag) if resp else False def ob_stop_request(self, type = 'GLOBAL', target = None): - resp = self.safe_request('POST', 'api/v1/ob/stop', data = {'scope': {'type': type, 'target': target}, 'force': True}) + resp = self.safe_request('POST', '/api/v1/ob/stop', data = {'scope': {'type': type, 'target': target}, 'force': True}) return self.query_dag_util_finish(resp.dag) if resp else False def ob_start_request(self, type = 'GLOBAL', target = None): - resp = self.safe_request('POST', 'api/v1/ob/start', data = {'scope': {'type': type, 'target': target}}) + resp = self.safe_request('POST', '/api/v1/ob/start', data = {'scope': {'type': type, 'target': target}}) return self.query_dag_util_finish(resp.dag) if resp else False def ob_info_request(self, data): - resp = self.safe_request('POST', 'api/v1/ob/info', data=data) + resp = self.safe_request('POST', '/api/v1/ob/info', data=data) return resp # agent admin routes def agent_join_request(self, ip, port, zone): - resp = self.safe_request('POST', 'api/v1/agent', data={'agentInfo': {'ip': ip, 'port': port}, 'zoneName': zone}) + resp = self.safe_request('POST', '/api/v1/agent', data={'agentInfo': {'ip': ip, 'port': port}, 'zoneName': zone}) return self.query_dag_util_finish(resp.dag) if resp else False def agent_remove_request(self, ip, port): - resp = self.safe_request('DELETE', 'api/v1/agent', data={'ip': ip, 'port': port}) + resp = self.safe_request('DELETE', '/api/v1/agent', data={'ip': ip, 'port': port}) return self.query_dag_util_finish(resp.dag) if resp else False def agent_remove_by_socket(self, ssh_client, ip, port): - resp = self._curl_socket(ssh_client, 'DELETE', 'api/v1/agent', data={'ip': ip, 'port': port}) + resp = self._curl_socket(ssh_client, 'DELETE', '/api/v1/agent', data={'ip': ip, 'port': port}) return self.query_dag_util_finish(resp.dag) if resp else False # obcluster routes def obcluster_config_request(self, cluster_id, cluster_name, rs_list): encrypt_key = self._get_secrets() encrypt_password = self._encrypt(self.password, encrypt_key) - resp = self.safe_request('POST', 'api/v1/obcluster/config', data={'clusterId': cluster_id, 'clusterName': cluster_name, 'rootPwd': encrypt_password, 'rsList': rs_list}) + resp = self.safe_request('POST', '/api/v1/obcluster/config', data={'clusterId': cluster_id, 'clusterName': cluster_name, 'rootPwd': encrypt_password, 'rsList': rs_list}) return self.query_dag_util_finish(resp.dag) if resp else False # observer routes @@ -377,41 +446,47 @@ def observer_put_config_request(self, server_config, agent_list, restart = True) # 把serverconfig中的int类型的value全部转换成string类型 for key in server_config: server_config[key] = str(server_config[key]) - resp = self.safe_request('PUT', 'api/v1/observer/config', data={'observerConfig': server_config, 'restart': restart, 'scope': {'type': 'SERVER', 'target': agent_list}}) + resp = self.safe_request('PUT', '/api/v1/observer/config', data={'observerConfig': server_config, 'restart': restart, 'scope': {'type': 'SERVER', 'target': agent_list}}) return self.query_dag_util_finish(resp.dag) if resp else False # def observer_patch_config_request(self, server_config, servers, restart = False): - # resp = self.safe_request('POST', 'api/v1/observer/config', data={'observerConfig': server_config, 'restart': restart, 'scope': {'type': 'SERVER', 'target': servers}}) + # resp = self.safe_request('POST', '/api/v1/observer/config', data={'observerConfig': server_config, 'restart': restart, 'scope': {'type': 'SERVER', 'target': servers}}) # return self.query_dag_util_succeed(resp.dag) if resp else False def observer_scale_out_request(self, ip, port, zone, server_config): - resp = self.safe_request('POST', 'api/v1/ob/scale_out', data={'agentInfo': {'ip': ip, 'port': port}, 'obConfigs': server_config,'zone': zone}) + resp = self.safe_request('POST', '/api/v1/ob/scale_out', data={'agentInfo': {'ip': ip, 'port': port}, 'obConfigs': server_config,'zone': zone}) return self.query_dag_util_finish(resp.dag) if resp else False # upgrade routes def pkg_upload_request(self, data = None): - return self.safe_request('POST', 'api/v1/upgrade/pkg/upload', data=data) + return self.safe_request('POST', '/api/v1/upgrade/pkg/upload', data=data) def params_backup_request(self, data = None): - return self.safe_request('POST', 'api/v1/upgrade/params/backup', data=data) + return self.safe_request('POST', '/api/v1/upgrade/params/backup', data=data) # task routes def get_dag_request(self, id): - resp = self.safe_request('GET', 'api/v1/task/dag/%s' % id) + resp = self.safe_request('GET', '/api/v1/task/dag/%s' % id) return resp.dag if resp else None def dag_request(self, dag, operator): - resp = self.safe_request('POST', 'api/v1/task/dag/%s' % dag.id, data={'operator': operator}) + resp = self.safe_request('POST', '/api/v1/task/dag/%s' % dag.id, data={'operator': operator}) if not resp: return False return self.query_dag_util_finish(dag) def get_agent_last_maintenance_dag_request(self): - resp = self.request('GET', 'api/v1/task/dag/maintain/agent') + if self.version >='4.2.3': + resp = self.safe_request('GET', '/api/v1/task/dag/maintain/agent') + else: + resp = self.request('GET', '/api/v1/task/dag/maintain/agent') return resp.dag if resp else None def get_ob_last_maintenance_dag_request(self): - resp = self.request('GET', 'api/v1/task/dag/maintain/ob') + if self.version >= '4.2.3': + resp = self.safe_request('GET', '/api/v1/task/dag/maintain/ob') + else : + resp = self.request('GET', '/api/v1/task/dag/maintain/ob') return resp.dag if resp else None def get_ocs_cursor(plugin_context, *args, **kwargs): @@ -524,15 +599,15 @@ def return_true(**kwargs): for key, value in kwargs.items(): plugin_context.set_variable(key, value) return plugin_context.return_true(**kwargs) - + ocs_cursor = get_ocs_cursor(plugin_context, *args, **kwargs) + stdio = plugin_context.stdio if not ocs_cursor: stdio.stop_loading('fail') return plugin_context.return_false() count = retry_times cluster_config = plugin_context.cluster_config - stdio = plugin_context.stdio if target_server: servers = [target_server] server_config = cluster_config.get_server_conf(target_server) diff --git a/plugins/oceanbase/4.2.2.0/parameter.yaml b/plugins/oceanbase/4.2.2.0/parameter.yaml index 625af5b..fc76c9b 100644 --- a/plugins/oceanbase/4.2.2.0/parameter.yaml +++ b/plugins/oceanbase/4.2.2.0/parameter.yaml @@ -1880,4 +1880,62 @@ default: '' need_redeploy: true description_en: The password for obagent monitor user - description_local: obagent 监控用户的密码 \ No newline at end of file + description_local: obagent 监控用户的密码 +- name: ocp_monitor_tenant + require: false + type: DICT + default: + tenant_name: ocp_monitor + max_cpu: 1 + memory_size: 2147483648 + need_redeploy: true + description_en: The tenant specifications for ocp monitor db + description_local: ocp 的监控数据库使用的租户定义 +- name: ocp_monitor_tenant_max_cpu + name_local: OCP 监控数据库租户的CPU数 + essential: true + require: false + type: INT + default: 1 + need_redeploy: true + description_en: The tenant cpu count for ocp monitor db + description_local: ocp 监控数据库使用的CPU数量 +- name: ocp_monitor_tenant_memory_size + name_local: OCP 监控数据库租户内存 + essential: true + require: false + type: CAPACITY_MB + default: 2G + need_redeploy: true + description_en: The tenant memory size for ocp monitor db + description_local: ocp 监控数据库使用的租户内存大小 +- name: ocp_monitor_tenant_log_disk_size + name_local: OCP 监控数据库租户日志磁盘大小 + essential: true + require: false + type: CAPACITY_MB + default: 6656M + need_redeploy: true + description_en: The tenant log disk size for ocp monitor db + description_local: ocp 监控数据库使用的租户日志磁盘大小 +- name: ocp_monitor_db + require: false + type: SAFE_STRING + default: ocp_monitor + need_redeploy: true + description_en: The database name for ocp monitor db + description_local: ocp 的监控数据库使用的数据库名 +- name: ocp_monitor_username + require: false + type: SAFE_STRING + default: monitor + need_redeploy: true + description_en: The user name for ocp meta db + description_local: ocp 的监控数据库使用的用户名 +- name: ocp_monitor_password + require: false + type: STRING + default: oceanbase + need_redeploy: true + description_en: The password for ocp monitor db + description_local: ocp 的监控数据库使用的密码 diff --git a/plugins/oceanbase/4.2.2.0/start.py b/plugins/oceanbase/4.2.2.0/start.py index 7344561..a8b5152 100644 --- a/plugins/oceanbase/4.2.2.0/start.py +++ b/plugins/oceanbase/4.2.2.0/start.py @@ -181,6 +181,7 @@ def start(plugin_context, start_obshell=True, *args, **kwargs): not_cmd_opt = [ 'home_path', 'obconfig_url', 'root_password', 'proxyro_password', 'redo_dir', 'clog_dir', 'ilog_dir', 'slog_dir', '$_zone_idc', 'production_mode', + 'ocp_monitor_tenant', 'ocp_monitor_username', 'ocp_monitor_password', 'ocp_monitor_db', 'ocp_meta_tenant', 'ocp_meta_username', 'ocp_meta_password', 'ocp_meta_db', 'ocp_agent_monitor_password','ocp_root_password','obshell_port' ] get_value = lambda key: "'%s'" % server_config[key] if isinstance(server_config[key], str) else server_config[key] diff --git a/plugins/oceanbase/4.2.2.0/start_check.py b/plugins/oceanbase/4.2.2.0/start_check.py index d37ab50..0b1525e 100644 --- a/plugins/oceanbase/4.2.2.0/start_check.py +++ b/plugins/oceanbase/4.2.2.0/start_check.py @@ -29,6 +29,7 @@ from math import sqrt import _errno as err +from _types import Capacity stdio = None @@ -44,25 +45,6 @@ def get_port_socket_inode(client, port): stdio.verbose(res.stdout) return res.stdout.strip().split('\n') -def parse_size(size): - _bytes = 0 - if not isinstance(size, str) or size.isdigit(): - _bytes = int(size) - else: - units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} - match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) - _bytes = int(match.group(1)) * units[match.group(2)] - return _bytes - - -def format_size(size): - units = ['B', 'K', 'M', 'G', 'T', 'P'] - idx = 0 - while idx < 5 and size >= 1024: - size /= 1024.0 - idx += 1 - return '%.1f%s' % (size, units[idx]) - def time_delta(client): time_st = time.time() * 1000 @@ -133,6 +115,10 @@ def get_disk_info(all_paths, client, stdio): if overview_ret or all(all_path_success.values()): return disk_info +def has_obshell(repository): + repository_dir = repository.repository_dir + obshell_path = os.path.join(repository_dir, 'bin', 'obshell') + return os.path.exists(obshell_path) def start_check(plugin_context, init_check_status=False, strict_check=False, work_dir_check=False, work_dir_empty_check=True, generate_configs={}, precheck=False, source_option='start', *args, **kwargs): def check_pass(item): @@ -281,7 +267,10 @@ def system_memory_check(): if client.execute_command('ls /proc/%s' % remote_pid): stdio.verbose('%s is runnning, skip' % server) work_dir_check = False - port_check = upgrade_opt + for repository in plugin_context.repositories: + if repository.name == cluster_config.name: + break + port_check = upgrade_opt and not has_obshell(repository) parameter_check = False kernel_check = is_running_opt @@ -390,12 +379,12 @@ def system_memory_check(): memory_limit = 0 percentage = 0 if server_config.get('memory_limit'): - memory_limit = parse_size(server_config['memory_limit']) + memory_limit = Capacity(server_config['memory_limit']).btyes if production_mode and memory_limit < PRO_MEMORY_MIN: - error('mem', err.EC_OBSERVER_PRODUCTION_MODE_LIMIT.format(server=server, key='memory_limit', limit=format_size(PRO_MEMORY_MIN)), [err.SUB_SET_NO_PRODUCTION_MODE.format()]) + error('mem', err.EC_OBSERVER_PRODUCTION_MODE_LIMIT.format(server=server, key='memory_limit', limit=Capacity(PRO_MEMORY_MIN)), [err.SUB_SET_NO_PRODUCTION_MODE.format()]) memory['num'] += memory_limit elif 'memory_limit_percentage' in server_config: - percentage = int(parse_size(server_config['memory_limit_percentage'])) + percentage = server_config['memory_limit_percentage'] memory['percentage'] += percentage else: percentage = 80 @@ -403,7 +392,7 @@ def system_memory_check(): memory['servers'][server] = { 'num': memory_limit, 'percentage': percentage, - 'system_memory': parse_size(server_config.get('system_memory', 0)) + 'system_memory': Capacity(server_config.get('system_memory', 0)).btyes } data_path = server_config['data_dir'] if server_config.get('data_dir') else os.path.join(server_config['home_path'], 'store') @@ -412,14 +401,14 @@ def system_memory_check(): if not client.execute_command('ls %s/sstable/block_file' % data_path): disk[data_path] = {'server': server} clog_mount[clog_dir] = {'server': server} - if 'datafile_size' in server_config and server_config['datafile_size'] and parse_size(server_config['datafile_size']): + if 'datafile_size' in server_config and server_config['datafile_size'] and server_config['datafile_size']: # if need is string, it means use datafile_size disk[data_path]['need'] = server_config['datafile_size'] elif 'datafile_disk_percentage' in server_config and server_config['datafile_disk_percentage']: # if need is integer, it means use datafile_disk_percentage disk[data_path]['need'] = int(server_config['datafile_disk_percentage']) - if 'log_disk_size' in server_config and server_config['log_disk_size'] and parse_size(server_config['log_disk_size']): + if 'log_disk_size' in server_config and server_config['log_disk_size'] and server_config['log_disk_size']: # if need is string, it means use log_disk_size clog_mount[clog_dir]['need'] = server_config['log_disk_size'] elif 'log_disk_percentage' in server_config and server_config['log_disk_percentage']: @@ -580,15 +569,15 @@ def system_memory_check(): for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): if k in memory_key_map: key = memory_key_map[k] - server_memory_stats[key] = parse_size(str(v)) + server_memory_stats[key] = Capacity(str(v)).btyes ip_server_memory_info[ip] = server_memory_stats server_memory_stat = servers_memory[ip] min_start_need = server_num * START_NEED_MEMORY - total_use = server_memory_stat['percentage'] * server_memory_stats['total'] / 100 + server_memory_stat['num'] + total_use = int(server_memory_stat['percentage'] * server_memory_stats['total'] / 100 + server_memory_stat['num']) if min_start_need > server_memory_stats['available']: for server in ip_servers: - error('mem', err.EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE.format(ip=ip, available=format_size(server_memory_stats['available']), need=format_size(min_start_need)), [err.SUG_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE.format(ip=ip)]) + error('mem', err.EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE.format(ip=ip, available=Capacity(server_memory_stats['available']), need=Capacity(min_start_need)), [err.SUG_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE.format(ip=ip)]) elif total_use > server_memory_stats['free'] + server_memory_stats['buffers'] + server_memory_stats['cached']: for server in ip_servers: server_generate_config = generate_configs.get(server, {}) @@ -598,11 +587,11 @@ def system_memory_check(): if key in global_generate_config or key in server_generate_config: suggest.auto_fix = False break - error('mem', err.EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=format_size(server_memory_stats['free']), cached=format_size(server_memory_stats['buffers'] + server_memory_stats['cached']), need=format_size(total_use)), [suggest]) + error('mem', err.EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=Capacity(server_memory_stats['free']), cached=Capacity(server_memory_stats['buffers'] + server_memory_stats['cached']), need=Capacity(total_use)), [suggest]) elif total_use > server_memory_stats['free']: system_memory_check() for server in ip_servers: - alert('mem', err.EC_OBSERVER_NOT_ENOUGH_MEMORY.format(ip=ip, free=format_size(server_memory_stats['free']), need=format_size(total_use)), [err.SUG_OBSERVER_REDUCE_MEM.format()]) + alert('mem', err.EC_OBSERVER_NOT_ENOUGH_MEMORY.format(ip=ip, free=Capacity(server_memory_stats['free']), need=Capacity(total_use)), [err.SUG_OBSERVER_REDUCE_MEM.format()]) else: system_memory_check() @@ -628,7 +617,7 @@ def system_memory_check(): # slog need 4G disk[mount_path]['need'] += max(disk[mount_path]['total'] - slog_size, 0) * need / 100 else: - disk[mount_path]['need'] += parse_size(need) + disk[mount_path]['need'] += Capacity(need).btyes disk[mount_path]['need'] += slog_size disk[mount_path]['is_data_disk'] = True @@ -648,7 +637,7 @@ def system_memory_check(): log_disk_size = disk[mount_path]['total'] * need / 100 else: # log_disk_size - log_disk_size = parse_size(need) + log_disk_size = Capacity(need).btyes servers_log_disk_size[servers_clog_mount[ip][path]['server']] = log_disk_size disk[mount_path]['need'] += log_disk_size disk[mount_path]['is_clog_disk'] = True @@ -688,7 +677,7 @@ def system_memory_check(): break tmp_suggests.append(suggest) tmp_suggests = sorted(tmp_suggests, key=lambda suggest: suggest.auto_fix, reverse=True) - critical('disk', err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=p, avail=format_size(avail), need=format_size(need)), tmp_suggests + suggests) + critical('disk', err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=p, avail=Capacity(avail), need=Capacity(need)), tmp_suggests + suggests) global_conf = cluster_config.get_global_conf() has_ocp = 'ocp-express' in plugin_context.components @@ -701,7 +690,7 @@ def system_memory_check(): for key in global_conf_with_default: if key.startswith(ocp_meta_tenant_prefix) and original_global_conf.get(key, None): global_conf_with_default['ocp_meta_tenant'][key.replace(ocp_meta_tenant_prefix, '', 1)] = global_conf_with_default[key] - meta_db_memory_size = parse_size(global_conf_with_default['ocp_meta_tenant'].get('memory_size')) + meta_db_memory_size = Capacity(global_conf_with_default['ocp_meta_tenant'].get('memory_size')).btyes servers_sys_memory = {} if meta_db_memory_size: sys_memory_size = None @@ -717,7 +706,7 @@ def system_memory_check(): if system_memory == 0: system_memory = get_system_memory(memory_limit, min_pool_memory) if not sys_memory_size: - sys_memory_size = servers_sys_memory[server] = max(min_pool_memory, min((memory_limit - system_memory) * 0.25, parse_size('16G'))) + sys_memory_size = servers_sys_memory[server] = max(min_pool_memory, min((memory_limit - system_memory) * 0.25, Capacity('16G').btyes)) if meta_db_memory_size + system_memory + sys_memory_size <= memory_limit: break else: @@ -728,7 +717,7 @@ def system_memory_check(): error('ocp meta db', err.EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_MEM.format(), [suggest]) meta_db_log_disk_size = global_conf_with_default['ocp_meta_tenant'].get('log_disk_size') - meta_db_log_disk_size = parse_size(meta_db_log_disk_size) if meta_db_log_disk_size else meta_db_log_disk_size + meta_db_log_disk_size = Capacity(meta_db_log_disk_size).btyes if meta_db_log_disk_size else meta_db_log_disk_size if not meta_db_log_disk_size and meta_db_memory_size: meta_db_log_disk_size = meta_db_memory_size * 3 if meta_db_log_disk_size: diff --git a/plugins/oceanbase/4.3.0.0/create_tenant.py b/plugins/oceanbase/4.3.0.0/create_tenant.py new file mode 100644 index 0000000..b0a72ee --- /dev/null +++ b/plugins/oceanbase/4.3.0.0/create_tenant.py @@ -0,0 +1,521 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function + +import time +from collections import defaultdict +from copy import deepcopy + +from _errno import EC_OBSERVER_CAN_NOT_MIGRATE_IN +from _types import Capacity + + +tenant_cursor_cache = defaultdict(dict) + + +def exec_sql_in_tenant(sql, cursor, tenant, mode, user='', password='', print_exception=True, retries=20, args=[]): + if not user: + user = 'SYS' if mode == 'oracle' else 'root' + # find tenant ip, port + global tenant_cursor + if cursor in tenant_cursor_cache and tenant in tenant_cursor_cache[cursor] and user in tenant_cursor_cache[cursor][tenant]: + tenant_cursor = tenant_cursor_cache[cursor][tenant][user] + else: + query_sql = "select a.SVR_IP,c.SQL_PORT from oceanbase.DBA_OB_UNITS as a, oceanbase.DBA_OB_TENANTS as b, oceanbase.DBA_OB_SERVERS as c where a.TENANT_ID=b.TENANT_ID and a.SVR_IP=c.SVR_IP and a.svr_port=c.SVR_PORT and TENANT_NAME=%s" + tenant_server_ports = cursor.fetchall(query_sql, (tenant, ), raise_exception=False, exc_level='verbose') + for tenant_server_port in tenant_server_ports: + tenant_ip = tenant_server_port['SVR_IP'] + tenant_port = tenant_server_port['SQL_PORT'] + tenant_cursor = cursor.new_cursor(tenant=tenant, user=user, password=password, ip=tenant_ip, port=tenant_port, print_exception=print_exception) + if tenant_cursor: + if tenant not in tenant_cursor_cache[cursor]: + tenant_cursor_cache[cursor][tenant] = {} + tenant_cursor_cache[cursor][tenant][user] = tenant_cursor + break + if not tenant_cursor and retries: + time.sleep(1) + return exec_sql_in_tenant(sql, cursor, tenant, mode, user, password, print_exception=print_exception, retries=retries-1, args=args) + return tenant_cursor.execute(sql, args=args, raise_exception=False, exc_level='verbose') if tenant_cursor else False + + +def dump_standby_relation(relation_tenants, cluster_configs, dump_relation_tenants, stdio): + # find all relation tenant + deploy_name_tenants = deepcopy(relation_tenants) + deploy_name_tenants.extend(dump_relation_tenants) + for deploy_name_tenant_tup in deploy_name_tenants: + relation_deploy_name = deploy_name_tenant_tup[0] + relation_tenant_name = deploy_name_tenant_tup[1] + for deploy_name_tenant_inner in deploy_name_tenants: + if (relation_deploy_name, relation_tenant_name) != tuple(deploy_name_tenant_inner): + _dump_standby_relation(relation_deploy_name, relation_tenant_name, deploy_name_tenant_inner, cluster_configs.get(relation_deploy_name), stdio) + for cluster_config in cluster_configs.values(): + cluster_config.update_component_attr('standby_relation', cluster_config.get_component_attr('standby_relation'), save=True) + return True + + +def _dump_standby_relation(deploy_name, tenant_name, dump_relation_tenant, cluster_config, stdio): + stdio.verbose('dump standby relation, deploy_name:{}, tenant_name:{},dump_relation_tenant:{}'.format(deploy_name, tenant_name, dump_relation_tenant)) + if not cluster_config: + stdio.verbose('dump_standby_relation: No such deploy: %s.' % deploy_name) + return False + relation_dict = cluster_config.get_component_attr('standby_relation') + if relation_dict: + relation_tenants = relation_dict.get(tenant_name, []) + if not relation_tenants: + relation_dict[tenant_name] = [dump_relation_tenant] + elif tuple(dump_relation_tenant) not in [tuple(t) for t in relation_tenants]: + relation_tenants.append(dump_relation_tenant) + else: + relation_dict = {tenant_name: [dump_relation_tenant]} + cluster_config.update_component_attr('standby_relation', relation_dict, save=False) + return True + + +def dump_standbyro_password(deploy_name, tenant_name, standbyro_password, cluster_config, stdio): + if not cluster_config: + stdio.error('No such deploy: %s.' % deploy_name) + return False + standbyro_password_dict = cluster_config.get_component_attr('standbyro_password') + if standbyro_password_dict: + standbyro_password_dict[tenant_name] = standbyro_password + else: + standbyro_password_dict = {tenant_name: standbyro_password} + cluster_config.update_component_attr('standbyro_password', standbyro_password_dict, save=True) + return True + + +def create_tenant(plugin_context, cursor = None, create_tenant_options=[], relation_tenants={}, cluster_configs={}, primary_tenant_info={}, standbyro_password='', *args, **kwargs): + def get_option(key, default=''): + value = getattr(options, key, default) + if not value: + value = default + return value + + def get_parsed_option(key, default=''): + value = get_option(key=key, default=default) + if value is None: + return value + try: + parsed_value = Capacity(value).btyes + except: + stdio.exception("") + raise Exception("Invalid option {}: {}".format(key, value)) + return parsed_value + + def error(msg='', *arg, **kwargs): + msg and stdio.error(msg, *arg, **kwargs) + stdio.stop_loading('fail') + + stdio = plugin_context.stdio + multi_options = create_tenant_options if create_tenant_options else [plugin_context.options] + cursors = [] + for options in multi_options: + create_if_not_exists = get_option('create_if_not_exists', False) + standby_deploy_name = plugin_context.cluster_config.deploy_name + cursor = plugin_context.get_return('connect').get_return('cursor') if not cursor else cursor + cursor = cursor if cursor else plugin_context.get_variable('cursors').get(standby_deploy_name) + global tenant_cursor + tenant_cursor = None + + if primary_tenant_info: + primary_deploy_name = primary_tenant_info.get('primary_deploy_name') + primary_tenant = primary_tenant_info.get('primary_tenant') + primary_cursor = plugin_context.get_variable('cursors').get(primary_deploy_name) + primary_memory_size = primary_tenant_info['memory_size'] + primary_log_disk_size = primary_tenant_info['log_disk_size'] + primary_params = ['max_cpu', 'min_cpu', 'unit_num', 'memory_size', 'log_disk_size', 'max_iops', 'min_iops', 'iops_weight'] + for param in primary_params: + if get_option(param, None) is None and param in primary_tenant_info: + setattr(options, param, primary_tenant_info[param]) + + mode = get_option('mode', 'mysql').lower() + if not mode in ['mysql', 'oracle']: + error('No such tenant mode: %s.\n--mode must be `mysql` or `oracle`' % mode) + return + + # options not support + deserted_options = ('max_session_num', 'max_memory', 'min_memory', 'max_disk_size') + for opt in deserted_options: + if get_option(opt, None) is not None: + stdio.warn("option {} is no longer supported".format(opt)) + + if primary_tenant_info: + name = get_option('tenant_name', primary_tenant) + else: + name = get_option('tenant_name', 'test') + unit_name = '%s_unit' % name + sql = 'select * from oceanbase.DBA_OB_UNIT_CONFIGS where name like "{}%" order by unit_config_id desc limit 1'.format(unit_name) + res = cursor.fetchone(sql) + if res is False: + return + if res: + unit_name += '{}'.format(int(res['UNIT_CONFIG_ID']) + 1) + + pool_name = '%s_pool' % name + + sql = "select * from oceanbase.DBA_OB_TENANTS where TENANT_NAME = %s" + tenant_exists = False + res = cursor.fetchone(sql, (name, )) + if res: + if create_if_not_exists: + continue + else: + error('Tenant %s already exists' % name) + return + elif res is False: + return + if not tenant_exists: + stdio.start_loading('Create tenant %s' % name) + zone_list = get_option('zone_list', set()) + zone_obs_num = {} + sql = "select zone, count(*) num from oceanbase.__all_server where status = 'active' group by zone" + res = cursor.fetchall(sql) + if res is False: + error() + return + + for row in res: + zone_obs_num[str(row['zone'])] = row['num'] + if not zone_list: + zone_list = zone_obs_num.keys() + if isinstance(zone_list, str): + zones = zone_list.replace(';', ',').split(',') + else: + zones = zone_list + zone_list = "('%s')" % "','".join(zones) + + min_unit_num = min(zone_obs_num.items(), key=lambda x: x[1])[1] + unit_num = get_option('unit_num', min_unit_num) + if unit_num > min_unit_num: + return error('resource pool unit num is bigger than zone server count') + + sql = "select count(*) num from oceanbase.__all_server where status = 'active' and start_service_time > 0" + count = 30 + while count: + num = cursor.fetchone(sql) + if num is False: + error() + return + num = num['num'] + if num >= unit_num: + break + count -= 1 + time.sleep(1) + if count == 0: + stdio.error(EC_OBSERVER_CAN_NOT_MIGRATE_IN) + return + + sql = "SELECT * FROM oceanbase.GV$OB_SERVERS where zone in %s" % zone_list + servers_stats = cursor.fetchall(sql) + if servers_stats is False: + error() + return + cpu_available = servers_stats[0]['CPU_CAPACITY_MAX'] - servers_stats[0]['CPU_ASSIGNED_MAX'] + mem_available = servers_stats[0]['MEM_CAPACITY'] - servers_stats[0]['MEM_ASSIGNED'] + disk_available = servers_stats[0]['DATA_DISK_CAPACITY'] - servers_stats[0]['DATA_DISK_IN_USE'] + log_disk_available = servers_stats[0]['LOG_DISK_CAPACITY'] - servers_stats[0]['LOG_DISK_ASSIGNED'] + for servers_stat in servers_stats[1:]: + cpu_available = min(servers_stat['CPU_CAPACITY_MAX'] - servers_stat['CPU_ASSIGNED_MAX'], cpu_available) + mem_available = min(servers_stat['MEM_CAPACITY'] - servers_stat['MEM_ASSIGNED'], mem_available) + disk_available = min(servers_stat['DATA_DISK_CAPACITY'] - servers_stat['DATA_DISK_IN_USE'], disk_available) + log_disk_available = min(servers_stat['LOG_DISK_CAPACITY'] - servers_stat['LOG_DISK_ASSIGNED'], log_disk_available) + + MIN_CPU = 1 + MIN_MEMORY = 1073741824 + MIN_LOG_DISK_SIZE = 2147483648 + MIN_IOPS = 1024 + STANDBY_MIN_MEMORY = 1073741824 * 2 + STANDBY_WARN_MEMORY = 1073741824 * 4 + STANDBY_MIN_LOG_DISK_SIZE = 1073741824 * 4 + + if cpu_available < MIN_CPU: + return error('%s: resource not enough: cpu count less than %s' % (zone_list, MIN_CPU)) + if mem_available < MIN_MEMORY: + return error('%s: resource not enough: memory less than %s' % (zone_list, Capacity(MIN_MEMORY))) + if log_disk_available < MIN_LOG_DISK_SIZE: + return error('%s: resource not enough: log disk size less than %s' % (zone_list, Capacity(MIN_MEMORY))) + + if primary_tenant_info: + recreate_cmd = '' + check_available_param = {} + check_available_param['max_cpu'] = [int(cpu_available), ''] + check_available_param['min_cpu'] = [int(cpu_available), ''] + check_available_param['memory_size'] = [mem_available, 'B'] + check_available_param['log_disk_size'] = [disk_available, 'B'] + for param, param_info in check_available_param.items(): + if get_option(param, None) is None and param_info[0] < primary_tenant_info[param]: + recreate_cmd += ' --{}={}{} '.format(param, param_info[0], param_info[1]) + stdio.warn("available {} is less then primary tenant's {} quota, primary tenant's {}:{}{}, current available:{}{}".format(param, param, primary_tenant_info[param], param_info[1], param_info[0], param_info[1])) + + if recreate_cmd: + stdio.error("Resource confirmation: if you insist to take the risk, please recreate the tenant with '{}'".format(recreate_cmd)) + return + # cpu options + max_cpu = get_option('max_cpu', cpu_available) + min_cpu = get_option('min_cpu', max_cpu) + if cpu_available < max_cpu: + return error('Resource not enough: cpu (Avail: %s, Need: %s)' % (cpu_available, max_cpu)) + if max_cpu < min_cpu: + return error('min_cpu must less then max_cpu') + if min_cpu < MIN_CPU: + return error('min_cpu must greater then %s' % MIN_CPU) + + # memory options + memory_size = get_parsed_option('memory_size', None) + log_disk_size = get_parsed_option('log_disk_size', None) + + if memory_size is None: + memory_size = mem_available + if log_disk_size is None: + log_disk_size = log_disk_available + + if mem_available < memory_size: + return error('resource not enough: memory (Avail: %s, Need: %s)' % (Capacity(mem_available), Capacity(memory_size))) + if memory_size < MIN_MEMORY: + return error('memory must greater then %s' % Capacity(MIN_MEMORY)) + + # log disk size options + if log_disk_size is not None and log_disk_available < log_disk_size: + return error('resource not enough: log disk space (Avail: %s, Need: %s)' % (Capacity(disk_available), Capacity(log_disk_size))) + + if primary_tenant_info: + if Capacity(primary_memory_size).btyes < STANDBY_MIN_MEMORY: + return error('Primary tenant memory_size:{}B is less than {}B, creating a standby tenant is not supported.'.format(primary_memory_size, STANDBY_MIN_MEMORY)) + if Capacity(primary_memory_size).btyes < STANDBY_WARN_MEMORY: + stdio.warn('Primary tenant memory_size: {}B , suggestion: {}B'.format(primary_memory_size, STANDBY_WARN_MEMORY)) + if Capacity(primary_log_disk_size).btyes < STANDBY_MIN_LOG_DISK_SIZE: + return error('Primary tenant log_disk_size:{}B is less than {}B, creating a standby tenant is not supported.'.format(primary_log_disk_size, STANDBY_MIN_LOG_DISK_SIZE)) + + # iops options + max_iops = get_option('max_iops', None) + min_iops = get_option('min_iops', None) + iops_weight = get_option('iops_weight', None) + if max_iops is not None and max_iops < MIN_IOPS: + return error('max_iops must greater than %d' % MIN_IOPS) + if max_iops is not None and min_iops is not None and max_iops < min_iops: + return error('min_iops must less then max_iops') + + zone_num = len(zones) + charset = get_option('charset', '') + collate = get_option('collate', '') + replica_num = get_option('replica_num', zone_num) + logonly_replica_num = get_option('logonly_replica_num', 0) + tablegroup = get_option('tablegroup', '') + primary_zone = get_option('primary_zone', 'RANDOM') + locality = get_option('locality', '') + variables = get_option('variables', "ob_tcp_invited_nodes='%'") + + if replica_num == 0: + replica_num = zone_num + elif replica_num > zone_num: + return error('replica_num cannot be greater than zone num (%s)' % zone_num) + if not primary_zone: + primary_zone = 'RANDOM' + if logonly_replica_num > replica_num: + return error('logonly_replica_num cannot be greater than replica_num (%s)' % replica_num) + + # create resource unit + sql = "create resource unit %s max_cpu %.1f, memory_size %d" % (unit_name, max_cpu, memory_size) + if min_cpu is not None: + sql += ', min_cpu %.1f' % min_cpu + if max_iops is not None: + sql += ', max_iops %d' % max_iops + if min_iops is not None: + sql += ', min_iops %d' % min_iops + if iops_weight is not None: + sql += ', iops_weight %d' % iops_weight + if log_disk_size is not None: + sql += ', log_disk_size %d' % log_disk_size + + res = cursor.execute(sql) + if res is False: + error() + return + + # create resource pool + sql = "create resource pool %s unit='%s', unit_num=%d, zone_list=%s" % (pool_name, unit_name, unit_num, zone_list) + try: + cursor.execute(sql, raise_exception=True) + except Exception as e: + stdio.exception('create resource pool failed, you can try again by using SQL "drop resource pool {}" to delete the resource pool, if you are certain that the resource pool is not being used. error info: {}'.format(pool_name, e)) + return + + # create tenant + if not primary_tenant_info: + # create normal tenant + sql = "create tenant %s replica_num=%d,zone_list=%s,primary_zone='%s',resource_pool_list=('%s')" + sql = sql % (name, replica_num, zone_list, primary_zone, pool_name) + if charset: + sql += ", charset = '%s'" % charset + if collate: + sql += ", collate = '%s'" % collate + if logonly_replica_num: + sql += ", logonly_replica_num = %d" % logonly_replica_num + if tablegroup: + sql += ", default tablegroup ='%s'" % tablegroup + if locality: + sql += ", locality = '%s'" % locality + + set_mode = "ob_compatibility_mode = '%s'" % mode + if variables: + sql += "set %s, %s" % (variables, set_mode) + else: + sql += "set %s" % set_mode + try: + cursor.execute(sql, raise_exception=True) + except Exception as e: + stdio.exception('Create error, error info:{}'.format(e)) + return + stdio.stop_loading('succeed') + root_password = get_option(name+'_root_password', "") + if root_password: + sql = "alter user root IDENTIFIED BY %s" + stdio.verbose(sql) + if not exec_sql_in_tenant(sql=sql, cursor=cursor, tenant=name, mode=mode, args=[root_password]) and not create_if_not_exists: + stdio.error('failed to set root@{} password {}'.format(name)) + return + database = get_option('database') + if database: + sql = 'create database {}'.format(database) + if not exec_sql_in_tenant(sql=sql, cursor=cursor, tenant=name, mode=mode, password=root_password if root_password else '') and not create_if_not_exists: + stdio.error('failed to create database {}'.format(database)) + return + + db_username = get_option('db_username') + db_password = get_option('db_password', '') + if db_username: + if mode == "mysql": + sql = """create user if not exists '{username}' IDENTIFIED BY %s; + grant all on *.* to '{username}' WITH GRANT OPTION;""".format( + username=db_username) + else: + error("Create user in oracle tenant is not supported") + if not exec_sql_in_tenant(sql=sql, cursor=cursor, tenant=name, mode=mode, args=[db_password]): + stdio.error('failed to create user {}'.format(db_username)) + return + exec_sql_in_tenant(sql='show databases;', cursor=cursor, tenant=name, mode=mode, password=root_password if root_password else '') + cursors.append(tenant_cursor) + else: + # create standby tenant + # query ip_list + sql = '''select group_concat(host separator ";") as ip_list from (select concat(svr_ip,":",SQL_PORT) as host from oceanbase.cdb_ob_access_point where tenant_name=%s)''' + res = primary_cursor.fetchone(sql, (primary_tenant, )) + if not res: + stdio.error('ip_list query error.') + return + + sql = '''CREATE STANDBY TENANT {} LOG_RESTORE_SOURCE = 'SERVICE={} USER=standbyro@{} PASSWORD={}' RESOURCE_POOL_LIST=('{}') , primary_zone='{}' '''.format(name, res['ip_list'], primary_tenant, standbyro_password, pool_name, primary_zone) + if locality: + sql += ", locality = '%s'" % locality + + try: + cursor.execute(sql, raise_exception=True, exc_level='verbose') + except Exception as e: + stdio.verbose('create standby tenant fail, clean and retry. fail message:{}'.format(e)) + # clean and retry create standby tenant + res = cursor.fetchone("select TENANT_ID from oceanbase.DBA_OB_TENANTS where tenant_name = %s", (name, ), raise_exception=False) + if res is False: + error('Create standby tenant fail. message:{}'.format(e)) + return + if res: + # drop tenant + tenant_id = res['TENANT_ID'] + res = cursor.execute("drop tenant %s FORCE" % name, raise_exception=False) + if res is False: + error('Create standby tenant fail. message:{}'.format(e)) + return + + # wait drop tenant + count = 600 + while count > 0: + res = cursor.fetchone('select count(1) as count from oceanbase.GV$OB_UNITS where TENANT_ID=%s or TENANT_ID=%s', (tenant_id, int(tenant_id)-1), raise_exception=False) + if res is False: + error('query unit info failed') + return + if res['count'] == 0: + break + count -= 1 + time.sleep(1) + + if count == 0: + error('Retry create standby tenant failed: drop tenant timeout') + return + + # create again + try: + cursor.execute(sql, raise_exception=True) + except Exception as e: + retry_message = 'After resolving this issue, you can clean up the environment by manually executing "obd cluster tenant drop {} -t {}", and then wait for a while before re-creating the standby tenant.'.format(standby_deploy_name, name) + error("create standby tenant failed, error: {}".format(e)) + stdio.print(retry_message) + return + stdio.stop_loading('succeed') + + # check standby sync status + stdio.start_loading('Check standby sync status') + sql = "SELECT tenant_id, tenant_name, tenant_type, primary_zone, locality, compatibility_mode, status, in_recyclebin, (CASE WHEN LOCKED = 'YES' THEN 1 ELSE 0 END) AS locked, TIMESTAMPDIFF(SECOND, CREATE_TIME, now()) AS exist_seconds, arbitration_service_status, switchover_status, log_mode, sync_scn, recovery_until_scn, tenant_role FROM oceanbase.DBA_OB_TENANTS WHERE TENANT_TYPE IN ('SYS', 'USER') and tenant_name = %s" + res = cursor.fetchone(sql, (name, )) + if not res: + error('check standby sync status failed') + return + + stdio.print_list([res], ['tenant_name', 'log_mode', 'locality', 'tenant_role', 'create_status'], + lambda x: [x['tenant_name'], x['log_mode'], x['locality'], x['tenant_role'], x['status']], title='standby tenant info') + + if res['status'] != 'NORMAL': + error('standby tenant status is not normal') + return + stdio.stop_loading('succeed') + + stdio.start_loading('Dump standby relation') + if not dump_standbyro_password(standby_deploy_name, name, standbyro_password, cluster_configs.get(standby_deploy_name), stdio): + return + if not dump_standby_relation(relation_tenants, cluster_configs, [[standby_deploy_name, name], [primary_deploy_name, primary_tenant]], stdio): + return + stdio.stop_loading('succeed') + + # check log sync task create + stdio.start_loading('Creating log sync task') + sql = "SELECT tenant_id, REPLACE(`sync_status`, ' ', '_') as sync_status, err_code, comment FROM oceanbase.V$OB_LS_LOG_RESTORE_STATUS WHERE tenant_id = %s group by sync_status " + count = 600 + while count > 0: + res = cursor.fetchall(sql, (res['tenant_id'], )) + if res: + break + count -= 1 + time.sleep(1) + stdio.verbose('Wait log sync create: retry {}'.format(200 - count)) + if count == 0: + stdio.warn('wait log sync create timeout') + + flag = False + for item in res: + if item.get('sync_status') != 'NORMAL': + flag = True + stdio.error('standby tenant log sync error, tenant_id:{}, sync_status:{}, err_code:{},comment:{}'.format(item['tenant_id'], item['sync_status'], item['err_code'], item['comment'])) + + if flag: + stdio.stop_loading('failed') + stdio.stop_loading('succeed') + + stdio.print('You can use the command "obd cluster tenant show {} -g" to view the relationship between the primary and standby tenants.'.format(standby_deploy_name)) + + return plugin_context.return_true(tenant_cursor=cursors) diff --git a/plugins/oceanbase/4.3.0.0/parameter.yaml b/plugins/oceanbase/4.3.0.0/parameter.yaml new file mode 100644 index 0000000..33ad413 --- /dev/null +++ b/plugins/oceanbase/4.3.0.0/parameter.yaml @@ -0,0 +1,1948 @@ +- name: home_path + name_local: 工作目录 + require: true + essential: true + type: PATH + min_value: NULL + max_value: NULL + need_redeploy: true + description_en: the directory for the work data file + description_local: OceanBase工作目录 +- name: cluster_id + name_local: 集群ID + require: true + essential: true + type: INT + default: 1 + min_value: 1 + max_value: 4294901759 + modify_limit: modify + need_redeploy: true + description_en: ID of the cluster + description_local: 本OceanBase集群ID +- name: data_dir + name_local: 数据目录 + essential: true + type: PATH + min_value: NULL + max_value: NULL + need_redeploy: true + description_en: the directory for the data file + description_local: 存储sstable等数据的目录 +- name: redo_dir + name_local: 日志目录 + essential: true + type: PATH + min_value: NULL + max_value: NULL + need_redeploy: true + description_en: the directory for the redo file + description_local: 存储clog, iclog, slog数据的目录 +- name: clog_dir + type: PATH + min_value: NULL + max_value: NULL + need_redeploy: true + description_en: the directory for the clog file + description_local: 存储clog数据的目录, clog 应该与 ilog 同盘 +- name: slog_dir + type: PATH + min_value: NULL + max_value: NULL + need_redeploy: true + description_en: the directory for the slog file + description_local: 存储slog数据的目录. 4.0版本开始不支持配置该项 +- name: ilog_dir + type: PATH + min_value: NULL + max_value: NULL + need_redeploy: true + description_en: the directory for the ilog file + description_local: 存储ilog数据的目录 +- name: rpc_port + name_local: 内部通信端口 + require: true + essential: true + type: INT + default: 2882 + min_value: 1025 + max_value: 65535 + modify_limit: modify + need_restart: true + description_en: the port number for RPC protocol. + description_local: 集群内部通信的端口号 +- name: mysql_port + name_local: 服务端口 + require: true + essential: true + type: INT + default: 2881 + min_value: 1025 + max_value: 65535 + modify_limit: modify + need_restart: true + description_en: port number for mysql connection + description_local: SQL服务协议端口号 +- name: obshell_port + name_local: obshell 服务端口 + require: true + essential: true + type: INT + default: 2886 + min_value: 1025 + max_value: 65535 + modify_limit: modify + need_redeploy: true + description_en: The port for obshell agent + description_local: obshell agent 的端口号 +- name: zone + require: true + type: SAFE_STRING + default: zone1 + min_value: NULL + max_value: NULL + section: OBSERVER + need_redeploy: true + description_en: specifies the zone name + description_local: 节点所在的zone的名字。 +- name: sys_cpu_limit_trigger + require: false + type: INT + default: 80 + min_value: 50 + max_value: NULL + section: OBSERVER + need_restart: false + description_en: when the cpu usage percentage exceed the trigger, will limit the sys cpu usage + description_local: 当CPU利用率超过该阈值的时候,将暂停系统后台任务的执行 +- name: memory_limit_percentage + require: false + type: INT + default: 80 + min_value: 10 + max_value: 90 + modify_limit: decrease + section: OBSERVER + need_restart: false + description_en: memory limit percentage of the total physical memory + description_local: 系统总可用内存大小占总内存大小的百分比 +- name: sys_bkgd_migration_retry_num + require: false + type: INT + default: 3 + min_value: 3 + max_value: 100 + section: OBSERVER + need_restart: false + description_en: retry num limit during migration. + description_local: 副本迁移失败时最多重试次数 +- name: tableapi_transport_compress_func + require: false + type: SAFE_STRING + default: none + min_value: NULL + max_value: NULL + section: OBSERVER + need_restart: false + description_en: compressor used for tableAPI query result. + description_local: tableAPI查询结果传输使用的压缩算法 +- name: disk_io_thread_count + require: false + type: INT + default: 8 + min_value: 2 + max_value: 32 + section: OBSERVER + need_restart: false + description_en: The number of io threads on each disk. + description_local: 磁盘IO线程数。必须为偶数。 +- name: location_cache_refresh_min_interval + require: false + type: TIME + default: 100ms + min_value: 0s + max_value: NULL + section: LOCATION_CACHE + need_restart: false + description_en: the time interval in which no request for location cache renewal will be executed. + description_local: 位置缓存刷新请求的最小间隔,防止产生过多刷新请求造成系统压力过大 +- name: trace_log_slow_query_watermark + type: TIME + default: 1s + min_value: 1ms + max_value: NULL + section: OBSERVER + need_restart: false + description_en: the threshold of execution time (in milliseconds) of a query beyond which it is considered to be a slow query. + description_local: 执行时间超过该阈值的查询会被认为是慢查询,慢查询的追踪日志会被打印到系统日志中 +- name: max_string_print_length + require: false + type: INT + default: 500 + min_value: 0 + max_value: NULL + section: OBSERVER + need_restart: false + description_en: truncate very long string when printing to log file + description_local: 打印系统日志时,单行日志最大长度 +- name: row_compaction_update_limit + require: false + type: INT + default: 6 + min_value: 1 + max_value: 6400 + section: TRANS + need_restart: false + description_en: maximum update count before trigger row compaction + description_local: 触发内存中行内数据合并的修改次数 +- name: enable_rereplication + require: false + type: BOOL + default: true + min_value: NULL + max_value: NULL + section: LOAD_BALANCE + need_restart: false + description_en: specifies whether the partition auto-replication is turned on. + description_local: 自动补副本开关 +- name: rootservice_async_task_thread_count + require: false + type: INT + default: 4 + min_value: 1 + max_value: 10 + section: ROOT_SERVICE + need_restart: false + description_en: maximum of threads allowed for executing asynchronous task at rootserver. + description_local: RootService内部异步任务使用的线程池大小 +- name: major_compact_trigger + require: false + type: INT + default: 5 + min_value: 0 + max_value: 65535 + section: TENANT + need_restart: false + description_en: major_compact_trigger alias to minor_freeze_times + description_local: 多少次小合并触发一次全局合并。值为0时,表示关闭小合并 +- name: default_compress + require: false + type: SAFE_STRING + default: archive + min_value: NULL + max_value: NULL + section: OBSERVER + need_restart: false + description_en: default compress function name for create new table + description_local: Oracle模式下,建表时使用的默认压缩策略 +- name: ssl_client_authentication + require: false + type: BOOL + default: false + min_value: NULL + max_value: NULL + section: OBSERVER + need_restart: true + description_en: enable server supports SSL connection, takes effect only after server restart with all ca/cert/key file. + description_local: 是否开启SSL连接功能 +- name: datafile_size + name_local: 数据文件大小 + require: false + essential: true + type: CAPACITY_MB + default: 0 + min_value: 0M + max_value: NULL + modify_limit: decrease + section: SSTABLE + need_restart: false + description_en: size of the data file. Please enter an capacity, such as 20G + description_local: 数据文件大小。请输入带容量带单位的整数,如20G +- name: datafile_maxsize + name_local: 数据文件最大空间 + require: false + essential: true + type: CAPACITY_MB + default: 0 + min_value: 0M + max_value: NULL + modify_limit: decrease + section: SSTABLE + need_restart: false + description_en: the auto extend max size. Please enter an capacity, such as 20G + description_local: 数据文件最大空间。请输入带容量带单位的整数,如20G +- name: datafile_next + name_local: 数据文件自增步长 + require: false + essential: true + type: CAPACITY_MB + default: 0 + min_value: 0M + max_value: NULL + modify_limit: decrease + section: SSTABLE + need_restart: false + description_en: the auto extend step. Please enter an capacity, such as 2G + description_local: 数据文件自增步长。请输入带容量带单位的整数,如2G +- name: log_disk_percentage + require: false + type: INT + default: 0 + min_value: 0 + max_value: 99 + description_en: the percentage of disk space used by the clog files. + description_local: Redo 日志占用其所在磁盘总空间的百分比。 +- name: log_disk_size + name_local: Redo 日志大小 + require: false + essential: true + type: CAPACITY_MB + default: 0 + min_value: 0M + max_value: NULL + description_en: the size of disk space used by the clog files. Please enter an capacity, such as 20G + description_local: Redo 日志磁盘的大小。请输入带容量带单位的整数,如24G +- name: merge_stat_sampling_ratio + require: false + type: INT + default: 100 + min_value: 0 + max_value: 100 + section: OBSERVER + need_restart: false + description_en: column stats sampling ratio daily merge. + description_local: 合并时候数据列统计信息的采样率 +- name: cache_wash_threshold + require: false + type: CAPACITY_MB + default: 4GB + min_value: 0B + max_value: NULL + section: OBSERVER + need_restart: false + description_en: size of remaining memory at which cache eviction will be triggered. + description_local: 触发缓存清理的容量阈值 +- name: user_iort_up_percentage + require: false + type: INT + default: 100 + min_value: 0 + max_value: NULL + section: OBSERVER + need_restart: false + description_en: variable to control sys io, the percentage of use io rt can raise + description_local: 用户磁盘IO时延超过该阈值后,系统后台IO任务将被限流 +- name: high_priority_net_thread_count + require: false + type: INT + default: 0 + min_value: 0 + max_value: 100 + section: OBSERVER + need_restart: true + description_en: the number of rpc I/O threads for high priority messages, 0 means set off + description_local: 高优先级网络线程数,值0表示关闭 +- name: max_kept_major_version_number + require: false + type: INT + default: 2 + min_value: 1 + max_value: 16 + section: DAILY_MERGE + need_restart: false + description_en: the maximum number of kept major versions + description_local: 数据保留多少个冻结版本 +- name: enable_sys_unit_standalone + require: false + type: BOOL + default: false + min_value: NULL + max_value: NULL + section: LOAD_BALANCE + need_restart: false + description_en: specifies whether sys unit standalone deployment is turned on. + description_local: 系统租户UNIT是否独占节点 +- name: freeze_trigger_percentage + require: false + type: INT + default: 50 + min_value: 1 + max_value: 99 + section: TENANT + need_restart: false + description_en: the threshold of the size of the mem store when freeze will be triggered. + description_local: 触发全局冻结的租户使用内存阈值。另见enable_global_freeze_trigger。 +- name: enable_major_freeze + require: false + type: BOOL + default: true + min_value: NULL + max_value: NULL + section: ROOT_SERVICE + need_restart: false + description_en: specifies whether major_freeze function is turned on. + description_local: 自动全局冻结开关 +- name: balancer_tolerance_percentage + require: false + type: INT + default: 10 + min_value: 1 + max_value: 99 + section: LOAD_BALANCE + need_restart: false + description_en: specifies the tolerance (in percentage) of the unbalance of the disk space utilization among all units. + description_local: 租户内多个UNIT间磁盘不均衡程度的宽容度,在均值+-宽容度范围之内的不均衡不会触发执行均衡动作 +- name: server_cpu_quota_min + require: false + type: DOUBLE + default: 2.5 + min_value: 0 + max_value: 16 + section: TENANT + need_restart: true + description_en: the number of minimal vCPUs allocated to the server tenant(a special internal tenant that exists on every observer) + description_local: 系统可以使用的最小CPU配额,将会预留 +- name: memory_reserved + require: false + type: CAPACITY_MB + default: 500M + min_value: 10M + max_value: NULL + section: SSTABLE + need_restart: false + description_en: the size of the system memory reserved for emergency internal use. + description_local: 系统预留内存大小 +- name: server_cpu_quota_max + require: false + type: DOUBLE + default: 5 + min_value: 0 + max_value: 16 + section: TENANT + need_restart: true + description_en: the number of maximal vCPUs allocated to the server tenant + description_local: 系统可以使用的最大CPU配额 +- name: rootservice_ready_check_interval + require: false + type: TIME + default: 3s + min_value: 100000us + max_value: 1m + section: ROOT_SERVICE + need_restart: false + description_en: the interval between the schedule of the task that checks on the status of the ZONE during restarting. + description_local: RootService启动后等待和检查集群状态的时间间隔 +- name: debug_sync_timeout + require: false + type: TIME + default: 0 + min_value: 0 + max_value: NULL + section: OBSERVER + need_restart: false + description_en: Enable the debug sync facility and optionally specify a default wait timeout in micro seconds. A zero value keeps the facility disabled + description_local: 打开debug sync调试开关,并设置其超时时间;值为0时,则关闭。 +- name: syslog_level + require: false + type: SAFE_STRING + default: INFO + min_value: NULL + max_value: NULL + section: OBSERVER + need_restart: false + description_en: specifies the current level of logging. + description_local: 系统日志级别 +- name: resource_hard_limit + require: false + type: INT + default: 100 + min_value: 1 + max_value: 10000 + section: LOAD_BALANCE + need_restart: false + description_en: Used along with resource_soft_limit in unit allocation. If server utilization is less than resource_soft_limit, a policy of best fit will be used for unit allocation; otherwise, a least load policy will be employed. Ultimately,system utilization should not be large than resource_hard_limit. + description_local: CPU和内存等资源进行分配的时候,资源总量是实际数量乘以该百分比的值 +- name: leak_mod_to_check + require: false + type: SAFE_STRING + default: NONE + min_value: NULL + max_value: NULL + section: OBSERVER + need_restart: false + description_en: the name of the module under memory leak checks + description_local: 内存泄露检查,用于内部调试目的 +- name: balancer_task_timeout + require: false + type: TIME + default: 20m + min_value: 1s + max_value: NULL + section: LOAD_BALANCE + need_restart: false + description_en: the time to execute the load-balancing task before it is terminated. + description_local: 负载均衡等后台任务的超时时间 +- name: enable_upgrade_mode + require: false + type: BOOL + default: false + min_value: NULL + max_value: NULL + section: OBSERVER + need_restart: false + description_en: specifies whether upgrade mode is turned on. If turned on, daily merger and balancer will be disabled. + description_local: 升级模式开关。在升级模式中,会暂停部分系统后台功能。 +- name: multiblock_read_size + require: false + type: CAPACITY_MB + default: 128K + min_value: 0K + max_value: 2M + section: SSTABLE + need_restart: false + description_en: multiple block batch read size in one read io request. + description_local: 读取数据时IO聚合大小 +- name: migration_disable_time + require: false + type: TIME + default: 3600s + min_value: 1s + max_value: NULL + section: ROOT_SERVICE + need_restart: false + description_en: the duration in which the observer stays in the block_migrate_in status, which means no partition is allowed to migrate into the server. + description_local: 因磁盘满等原因导致某个节点数据迁入失败时,暂停迁入时长 +- name: tablet_size + require: false + type: CAPACITY_MB + default: 128M + min_value: NULL + max_value: NULL + section: OBSERVER + need_restart: false + description_en: default tablet size, has to be a multiple of 2M + description_local: 分区内部并行处理(合并、查询等)时每个分片的大小 +- name: dead_socket_detection_timeout + require: false + type: TIME + default: 10s + min_value: 0s + max_value: 2h + section: OBSERVER + need_restart: false + description_en: specify a tcp_user_timeout for RFC5482. A zero value makes the option disabled + description_local: 失效socket检测超时时间 +- name: server_check_interval + require: false + type: TIME + default: 30s + min_value: 1s + max_value: NULL + section: ROOT_SERVICE + need_restart: false + description_en: the time interval between schedules of a task that examines the __all_server table. + description_local: server表一致性检查的时间间隔 +- name: lease_time + require: false + type: TIME + default: 10s + min_value: 1s + max_value: 5m + section: ROOT_SERVICE + need_restart: false + description_en: Lease for current heartbeat. If the root server does not received any heartbeat from an observer in lease_time seconds, that observer is considered to be offline. + description_local: RootService与其他服务节点之间的租约时长。一般请勿修改。 +- name: rootservice_async_task_queue_size + require: false + type: INT + default: 16384 + min_value: 8 + max_value: 131072 + section: ROOT_SERVICE + need_restart: false + description_en: the size of the queue for all asynchronous tasks at rootserver. + description_local: RootService内部异步任务队列的容量 +- name: location_refresh_thread_count + require: false + type: INT + default: 4 + min_value: 2 + max_value: 64 + section: LOCATION_CACHE + need_restart: false + description_en: the number of threads that fetch the partition location information from the root service. + description_local: 用于位置缓存刷新的线程数 +- name: minor_compact_trigger + require: false + type: INT + default: 2 + min_value: 0 + max_value: 16 + section: TENANT + need_restart: false + description_en: minor_compact_trigger + description_local: 触发小合并的迷你合并次数 +- name: major_freeze_duty_time + type: MOMENT + default: Disable + min_value: 00:00 + max_value: 23:59 + section: DAILY_MERGE + need_restart: false + description_en: the start time of system daily merge procedure. + description_local: 每日定时冻结和合并的触发时刻 +- name: ignore_replay_checksum_error + require: false + type: BOOL + default: false + min_value: NULL + max_value: NULL + section: TRANS + need_restart: false + description_en: specifies whether error raised from the memtable replay checksum validation can be ignored. + description_local: 是否忽略回放事务日志时发生的校验和错误 +- name: user_block_cache_priority + require: false + type: INT + default: 1 + min_value: 1 + max_value: NULL + section: CACHE + need_restart: false + description_en: user block cache priority + description_local: 数据块缓存在缓存系统中的优先级 +- name: syslog_io_bandwidth_limit + require: false + type: CAPACITY_MB + default: 30MB + min_value: NULL + max_value: NULL + section: OBSERVER + need_restart: false + description_en: Syslog IO bandwidth limitation, exceeding syslog would be truncated. Use 0 to disable ERROR log. + description_local: 系统日志所能占用的磁盘IO带宽上限,超过带宽的系统日志将被丢弃 +- name: workers_per_cpu_quota + require: false + type: INT + default: 10 + min_value: 2 + max_value: 20 + section: TENANT + need_restart: false + description_en: the ratio(integer) between the number of system allocated workers vs the maximum number of threads that can be scheduled concurrently. + description_local: 每个CPU配额分配多少个工作线程 +- name: enable_record_trace_id + require: false + type: BOOL + default: true + min_value: NULL + max_value: NULL + section: OBSERVER + need_restart: false + description_en: specifies whether record app trace id is turned on. + description_local: 是否记录应用端设置的追踪ID +- name: config_additional_dir + require: false + type: PATH_LIST + default: etc2;etc3 + min_value: NULL + max_value: NULL + section: OBSERVER + need_restart: false + description_en: additional directories of configure file + description_local: 本地存储配置文件的多个目录,为了冗余存储多份配置文件 +- name: enable_syslog_recycle + require: false + essential: true + type: BOOL + default: false + min_value: NULL + max_value: NULL + section: OBSERVER + need_restart: false + description_en: specifies whether log file recycling is turned on + description_local: 是否自动回收系统日志 +- name: max_syslog_file_count + require: false + essential: true + type: INT + default: 0 + min_value: 0 + max_value: NULL + section: OBSERVER + need_restart: false + description_en: specifies the maximum number of the log files that can co-exist before the log file recycling kicks in. Each log file can occupy at most 256MB disk space. When this value is set to 0, no log file will be removed. + description_local: 系统日志自动回收复用时,最多保留多少个。值0表示不自动清理。 +- name: px_task_size + require: false + type: CAPACITY_MB + default: 2M + min_value: 2M + max_value: NULL + section: OBSERVER + need_restart: false + description_en: min task access size of px task + description_local: SQL并行查询引擎每个任务处理的数据量大小 +- name: replica_safe_remove_time + require: false + type: TIME + default: 2h + min_value: 1m + max_value: NULL + section: ROOT_SERVICE + need_restart: false + description_en: the time interval that replica not existed has not been modified beyond which a replica is considered can be safely removed + description_local: 已删除副本可以被清理的安全保留时间 +- name: builtin_db_data_verify_cycle + require: false + type: INT + default: 20 + min_value: 0 + max_value: 360 + section: OBSERVER + need_restart: false + description_en: check cycle of db data. + description_local: 数据坏块自检周期,单位为天。值0表示不检查。 +- name: system_cpu_quota + require: false + type: DOUBLE + default: 10 + min_value: 0 + max_value: 16 + section: TENANT + need_restart: false + description_en: the number of vCPUs allocated to the server tenant + description_local: 系统后台任务可使用CPU配额 +- name: enable_sys_table_ddl + require: false + type: BOOL + default: false + min_value: NULL + max_value: NULL + section: ROOT_SERVICE + need_restart: false + description_en: specifies whether a system table is allowed be to created manually. + description_local: 是否允许新建和修改系统表。主要在系统升级过程中使用。 +- name: merge_thread_count + require: false + type: INT + default: 0 + min_value: 0 + max_value: 256 + section: OBSERVER + need_restart: false + description_en: worker thread num for compaction + description_local: 用于合并的线程数 +- name: net_thread_count + require: false + type: INT + default: 0 + min_value: 0 + max_value: 128 + section: OBSERVER + need_restart: true + description_en: the number of rpc/mysql I/O threads for Libeasy. + description_local: 网络IO线程数 +- name: max_stale_time_for_weak_consistency + require: false + type: TIME + default: 5s + min_value: 5s + max_value: NULL + section: OBSERVER + need_restart: false + description_en: the max data stale time that observer can provide service when its parent is invalid. + description_local: 弱一致性读允许读到多旧的数据 +- name: backup_log_archive_option + require: false + type: SAFE_STRING + default: OPTIONAL + min_value: NULL + max_value: NULL + section: OBSERVER + need_restart: false + description_en: backup log archive option, support MANDATORY/OPTIONAL, COMPRESSION + description_local: 日志备份的参数 +- name: backup_concurrency + require: false + type: INT + default: 0 + min_value: 0 + max_value: 100 + section: OBSERVER + need_restart: false + description_en: backup concurrency limit. + description_local: observer备份基线的并发度 +- name: balancer_log_interval + require: false + type: TIME + default: 1m + min_value: 1s + max_value: NULL + section: LOAD_BALANCE + need_restart: false + description_en: the time interval between logging the load-balancing tasks statistics. + description_local: 负载均衡等后台任务线程打印统计日志的间隔时间 +- name: restore_concurrency + require: false + type: INT + default: 0 + min_value: 0 + max_value: 512 + section: OBSERVER + need_restart: false + description_en: the current work thread num of restore macro block. + description_local: 从备份恢复租户数据时最大并发度 +- name: micro_block_merge_verify_level + require: false + type: INT + default: 2 + min_value: 0 + max_value: 3 + section: OBSERVER + need_restart: false + description_en: specify what kind of verification should be done when merging micro block. 0, no verification will be done; 1, verify encoding algorithm, encoded micro block will be read to ensure data is correct; 2, verify encoding and compression algorithm, besides encoding verification, compressed block will be decompressed to ensure data is correct; 3, verify encoding, compression algorithm and lost write protect + + description_local: 控制合并时宏块的校验级别 +- name: bf_cache_miss_count_threshold + require: false + type: INT + default: 100 + min_value: 0 + max_value: NULL + section: CACHE + need_restart: false + description_en: bf cache miss count threshold, 0 means disable bf cache + description_local: 用于控制bloomfilter cache的触发次数,当宏块未命中次数达到这个值时,给创建bloomfilter缓存。0表示关闭。 +- name: weak_read_version_refresh_interval + require: false + type: TIME + default: 50ms + min_value: 0ms + max_value: NULL + section: OBSERVER + need_restart: false + description_en: the time interval to refresh cluster weak read version + description_local: 弱一致性读版本号的刷新周期,影响弱一致性读数据的延时;值为0时,表示不再刷新弱一致性读版本号,不提供单调读功能 +- name: large_query_worker_percentage + require: false + type: DOUBLE + default: 30 + min_value: 0 + max_value: 100 + section: TENANT + need_restart: false + description_en: the percentage of the workers reserved to serve large query request. + description_local: 预留给大查询处理的工作线程百分比 +- name: clog_transport_compress_all + require: false + type: BOOL + default: false + min_value: NULL + max_value: NULL + section: TRANS + need_restart: false + description_en: If this option is set to true, use compression for clog transport. The default is false(no compression) + description_local: 事务日志传输时是否压缩 +- name: flush_log_at_trx_commit + require: false + type: INT + default: 1 + min_value: 0 + max_value: 2 + section: TRANS + need_restart: false + description_en: 0 means commit transactions without waiting clog write to buffer cache, 1 means commit transactions after clog flush to disk, 2 means commit transactions after clog write to buffer cache + description_local: 事务提交时写事务日志策略。0表示不等待日志写入缓冲区,1表示等待日志写入磁盘,2表示等待日志写入缓冲区而不等落盘 +- name: global_major_freeze_residual_memory + require: false + type: INT + default: 40 + min_value: 1 + max_value: 99 + section: OBSERVER + need_restart: false + description_en: post global major freeze when observer memsotre free memory(plus memory hold by frozen memstore and blockcache) reach this limit. limit calc by memory_limit * (1 - system_memory_percentage/100) * global_major_freeze_residual_memory/100 + description_local: 当剩余内存小于这个百分比时,触发全局冻结 +- name: enable_sql_audit + require: false + type: BOOL + default: true + min_value: NULL + max_value: NULL + section: OBSERVER + need_restart: false + description_en: specifies whether SQL audit is turned on. + description_local: SQL审计功能开关 +- name: merger_switch_leader_duration_time + require: false + type: TIME + default: 3m + min_value: 0s + max_value: 30m + section: ROOT_SERVICE + need_restart: false + description_en: switch leader duration time for daily merge. + description_local: 合并时,批量切主的时间间隔 +- name: enable_record_trace_log + require: false + type: BOOL + default: false + min_value: NULL + max_value: NULL + section: OBSERVER + need_restart: false + description_en: specifies whether to always record the trace log. + description_local: 是否记录追踪日志 +- name: sys_bkgd_migration_change_member_list_timeout + require: false + type: TIME + default: 1h + min_value: 0s + max_value: 24h + section: OBSERVER + need_restart: false + description_en: the timeout for migration change member list retry. + description_local: 副本迁移时变更Paxos成员组操作的超时时间 +- name: rootservice_list + require: false + type: SAFE_STRING_LIST + default: + min_value: NULL + max_value: NULL + section: OBSERVER + need_restart: false + description_en: a list of servers which contains rootservice + description_local: RootService及其副本所在的机器列表 +- name: enable_syslog_wf + require: false + type: BOOL + default: true + min_value: NULL + max_value: NULL + section: OBSERVER + need_restart: false + description_en: specifies whether any log message with a log level higher than WARN would be printed into a separate file with a suffix of wf + description_local: 是否把WARN以上级别的系统日志打印到一个单独的日志文件中 +- name: global_index_build_single_replica_timeout + require: false + type: TIME + default: 48h + min_value: 1h + max_value: NULL + section: ROOT_SERVICE + need_restart: false + description_en: build single replica task timeout when rootservice schedule to build global index. + description_local: 建全局索引时,每个副本构建的超时时间 +- name: memstore_limit_percentage + require: false + type: INT + default: 50 + min_value: 1 + max_value: 99 + section: TENANT + need_restart: false + description_en: used in calculating the value of MEMSTORE_LIMIT + description_local: 租户用于memstore的内存占其总可用内存的百分比 +- name: minor_deferred_gc_time + require: false + type: TIME + default: 0s + min_value: 0s + max_value: 24h + section: OBSERVER + need_restart: false + description_en: sstable deferred gc time after merge + description_local: 合并之后SSTable延迟回收间隔 +- name: data_disk_usage_limit_percentage + require: false + type: INT + default: 90 + min_value: 50 + max_value: 100 + section: OBSERVER + need_restart: false + description_en: the safe use percentage of data disk + description_local: 数据文件最大可以写入的百分比,超过这个阈值后,禁止数据迁入 +- name: enable_perf_event + require: false + type: BOOL + default: true + min_value: NULL + max_value: NULL + section: OBSERVER + need_restart: false + description_en: specifies whether to enable perf event feature. + description_local: perf event调试特性开关 +- name: obconfig_url + require: false + type: STRING + default: + min_value: NULL + max_value: NULL + section: OBSERVER + need_restart: false + description_en: URL for OBConfig service + description_local: OBConfig服务的URL地址 +- name: cpu_quota_concurrency + require: false + type: DOUBLE + default: 4 + min_value: 1 + max_value: 10 + section: TENANT + need_restart: false + description_en: max allowed concurrency for 1 CPU quota + description_local: 租户每个CPU配额允许的最大并发数 +- name: zone_merge_order + require: false + type: SAFE_STRING + default: + min_value: NULL + max_value: NULL + section: DAILY_MERGE + need_restart: false + description_en: the order of zone start merge in daily merge + description_local: 轮转合并的时候,多个Zone的顺序。不指定的时候,由系统自动决定。 +- name: backup_recovery_window + require: false + type: TIME + default: 0 + min_value: 0 + max_value: NULL + section: OBSERVER + need_restart: false + description_en: backup expired day limit, 0 means not expired + description_local: 恢复窗口大小 +- name: default_row_format + require: false + type: SAFE_STRING + default: compact + min_value: NULL + max_value: NULL + section: OBSERVER + need_restart: false + description_en: default row format in mysql mode + description_local: MySQL模式下,建表时使用的默认行格式 +- name: stack_size + require: false + type: CAPACITY_MB + default: 1M + min_value: 512K + max_value: 20M + section: OBSERVER + need_restart: true + description_en: the size of routine execution stack + description_local: 程序函数调用栈大小 +- name: balancer_idle_time + require: false + type: TIME + default: 5m + min_value: 10s + max_value: NULL + section: LOAD_BALANCE + need_restart: false + description_en: the time interval between the schedules of the partition load-balancing task. + description_local: 负载均衡等后台任务线程空闲时的唤醒间隔时间 +- name: memory_limit + name_local: 最大运行内存 + require: false + essential: true + type: CAPACITY_MB + default: 0 + min_value: NULL + max_value: NULL + modify_limit: decrease + section: OBSERVER + need_restart: false + description_en: the size of the memory reserved for internal use(for testing purpose). Please enter an capacity, such as 8G + description_local: 可用总内存大小。请输入带容量带单位的整数,如8G +- name: system_memory + name_local: 集群系统内存 + essential: true + type: CAPACITY_MB + default: 30G + min_value: 0M + max_value: NULL + section: OBSERVER + need_restart: false + description_en: the memory reserved for internal use which cannot be allocated to any outer-tenant, and should be determined to guarantee every server functions normally. Please enter an capacity, such as 2G + description_local: 系统预留内存大小,不能分配给普通租户使用。请输入带容量带单位的整数,如2G +- name: __min_full_resource_pool_memory + require: true + type: INT + default: 2147483648 + min_value: 1073741824 + max_value: NULL + need_restart: false + description_en: the minimum memory limit of the resource pool + description_local: 资源池最小内存限制 +- name: virtual_table_location_cache_expire_time + require: false + type: TIME + default: 8s + min_value: 1s + max_value: NULL + section: LOCATION_CACHE + need_restart: false + description_en: expiration time for virtual table location info in partiton location cache. + description_local: 虚拟表的位置信息缓存过期时间 +- name: ssl_external_kms_info + require: false + type: SAFE_STRING + default: + min_value: NULL + max_value: NULL + section: OBSERVER + need_restart: false + description_en: when using the external key management center for ssl, this parameter will store some key management information + description_local: 配置ssl使用的主密钥管理服务 +- name: enable_sql_operator_dump + require: false + type: BOOL + default: true + min_value: NULL + max_value: NULL + section: OBSERVER + need_restart: false + description_en: specifies whether sql operators (sort/hash join/material/window function/interm result/...) allowed to write to disk + description_local: 是否允许SQL处理过程的中间结果写入磁盘以释放内存 +- name: enable_rich_error_msg + require: false + type: BOOL + default: false + min_value: NULL + max_value: NULL + section: OBSERVER + need_restart: false + description_en: specifies whether add ip:port, time and trace id to user error message. + description_local: 是否在客户端消息中添加服务器地址、时间、追踪ID等调试信息 +- name: log_archive_concurrency + require: false + type: INT + default: 0 + min_value: 0 + max_value: NULL + section: OBSERVER + need_restart: false + description_en: concurrency for log_archive_sender and log_archive_spiter + description_local: 日志归档并发度 +- name: server_balance_disk_tolerance_percent + require: false + type: INT + default: 1 + min_value: 1 + max_value: 100 + section: LOAD_BALANCE + need_restart: false + description_en: specifies the tolerance (in percentage) of the unbalance of the disk space utilization among all servers. The average disk space utilization is calculated by dividing the total space by the number of servers. server balancer will start a rebalancing task when the deviation between the average usage and some server load is greater than this tolerance + description_local: 节点负载均衡策略中,磁盘资源不均衡的容忍度 +- name: user_tab_col_stat_cache_priority + require: false + type: INT + default: 1 + min_value: 1 + max_value: NULL + section: CACHE + need_restart: false + description_en: user tab col stat cache priority + description_local: 统计数据缓存在缓存系统中的优先级 +- name: recyclebin_object_expire_time + require: false + type: TIME + default: 0s + min_value: 0s + max_value: NULL + section: ROOT_SERVICE + need_restart: false + description_en: recyclebin object expire time, default 0 that means auto purge recyclebin off. + description_local: 回收站对象的有效期,超过有效的对象将被回收;0表示关闭回收功能; +- name: minor_warm_up_duration_time + require: false + type: TIME + default: 30s + min_value: 0s + max_value: 60m + section: OBSERVER + need_restart: false + description_en: warm up duration time for minor freeze. + description_local: 小合并产生新转储文件的预热时间 +- name: migrate_concurrency + require: false + type: INT + default: 10 + min_value: 0 + max_value: 64 + section: OBSERVER + need_restart: false + description_en: set concurrency of migration, set upper limit to migrate_concurrency and set lower limit to migrate_concurrency/2 + description_local: 控制内部数据迁移的并发度 +- name: redundancy_level + require: false + type: SAFE_STRING + default: NORMAL + min_value: NULL + max_value: NULL + section: SSTABLE + need_restart: false + description_en: EXTERNAL, use extrernal redundancy; NORMAL, tolerate one disk failure, HIGH tolerate two disk failure if disk count is enough + description_local: OB内置本地磁盘RAID特性。暂勿使用 +- name: trx_2pc_retry_interval + require: false + type: TIME + default: 100ms + min_value: 1ms + max_value: 5000ms + section: TRANS + need_restart: false + description_en: the time interval between the retries in case of failure during a transactions two-phase commit phase + description_local: 两阶段提交失败时候自动重试的间隔 +- name: cpu_count + name_local: 系统CPU总数 + require: false + essential: true + type: INT + default: 0 + min_value: 0 + max_value: NULL + section: OBSERVER + need_restart: true + description_en: the number of CPUs in the system. If this parameter is set to zero, the number will be set according to sysconf; otherwise, this parameter is used. + description_local: 系统CPU总数,如果设置为0,将自动检测 +- name: devname + name_local: 网卡名 + essential: true + type: SAFE_STRING + min_value: NULL + max_value: NULL + need_restart: true + description_en: name of network adapter + description_local: 非必填, 服务进程绑定的网卡设备名, 默认通过配置的ip设置local_ip, 如果预检查失败可通过配置此项来指定网卡 +- name: local_ip + name_local: 本机ip + type: SAFE_STRING + min_value: NULL + max_value: NULL + need_restart: true + description_en: local ip address + description_local: 本机ip地址 +- name: appname + require: false + type: SAFE_STRING + default: obcluster + min_value: NULL + max_value: NULL + section: OBSERVER + need_redeploy: true + description_en: Name of the cluster + description_local: 本OceanBase集群名 +- name: use_large_pages + require: false + type: SAFE_STRING + default: false + min_value: NULL + max_value: NULL + section: OBSERVER + need_restart: true + description_en: used to manage the databases use of large pages, values are false, true, only + description_local: 控制内存大页的行为,"true"表示在操作系统开启内存大页并且有空闲大页时,数据库总是申请内存大页,否则申请普通内存页, "false"表示数据库不使用大页, "only"表示数据库总是分配大页 +- name: dtl_buffer_size + require: false + type: CAPACITY_MB + default: 64K + min_value: 4K + max_value: 2M + section: OBSERVER + need_restart: false + description_en: buffer size for DTL + description_local: SQL数据传输模块使用的缓存大小 +- name: server_balance_critical_disk_waterlevel + require: false + type: INT + default: 80 + min_value: 0 + max_value: 100 + section: LOAD_BALANCE + need_restart: false + description_en: disk water level to determine server balance strategy + description_local: 磁盘水位线超过该阈值时,负载均衡策略将倾向于优先考虑磁盘均衡 +- name: location_fetch_concurrency + require: false + type: INT + default: 20 + min_value: 1 + max_value: 1000 + section: LOCATION_CACHE + need_restart: false + description_en: the maximum number of the tasks which fetch the partition location information concurrently. + description_local: 位置缓存信息刷新的最大并发度 +- name: enable_async_syslog + require: false + type: BOOL + default: true + min_value: NULL + max_value: NULL + section: OBSERVER + need_restart: false + description_en: specifies whether use async syslog + description_local: 是否启用系统日志异步写 +- name: clog_sync_time_warn_threshold + require: false + type: TIME + default: 1s + min_value: 1ms + max_value: 10000ms + section: TRANS + need_restart: false + description_en: the time given to the commit log synchronization between a leader and its followers before a warning message is printed in the log file. + description_local: 事务日志同步耗时告警阈值,同步耗时超过该值产生WARN日志 +- name: location_cache_cpu_quota + require: false + type: DOUBLE + default: 5 + min_value: 0 + max_value: 10 + section: TENANT + need_restart: false + description_en: the number of vCPUs allocated for the requests regarding location info of the core tables. + description_local: 位置缓存模块使用的CPU配额 +- name: bf_cache_priority + require: false + type: INT + default: 1 + min_value: 1 + max_value: NULL + section: CACHE + need_restart: false + description_en: bloomfilter cache priority + description_local: 布隆过滤器占用缓存的优先级 +- name: merger_check_interval + require: false + type: TIME + default: 10m + min_value: 10s + max_value: 60m + section: DAILY_MERGE + need_restart: false + description_en: the time interval between the schedules of the task that checks on the progress of MERGE for each zone. + description_local: 合并状态检查线程的调度间隔 +- name: enable_rootservice_standalone + require: false + type: BOOL + default: false + min_value: NULL + max_value: NULL + section: ROOT_SERVICE + need_restart: false + description_en: specifies whether the SYS tenant is allowed to occupy an observer exclusively, thus running in the standalone mode. + description_local: 是否让系统租户和RootService独占observer节点 +- name: px_workers_per_cpu_quota + require: false + type: INT + default: 10 + min_value: 0 + max_value: 20 + section: TENANT + need_restart: false + description_en: the ratio between the number of system allocated px workers vs the maximum number of threads that can be scheduled concurrently. + description_local: 并行执行工作线程数的比例 +- name: large_query_threshold + require: false + type: TIME + default: 100ms + min_value: 1ms + max_value: NULL + section: TENANT + need_restart: false + description_en: threshold for execution time beyond which a request may be paused and rescheduled as large request + description_local: 一个查询执行时间超过该阈值会被判断为大查询,执行大查询调度策略 +- name: sys_bkgd_net_percentage + require: false + type: INT + default: 60 + min_value: 0 + max_value: 100 + section: OBSERVER + need_restart: false + description_en: the net percentage of sys background net. + description_local: 后台系统任务可占用网络带宽百分比 +- name: fuse_row_cache_priority + require: false + type: INT + default: 1 + min_value: 1 + max_value: NULL + section: CACHE + need_restart: false + description_en: fuse row cache priority + description_local: 融合行缓存在缓存系统中的优先级 +- name: rpc_timeout + require: false + type: TIME + default: 2s + min_value: NULL + max_value: NULL + section: RPC + need_restart: false + description_en: the time during which a RPC request is permitted to execute before it is terminated + description_local: 集群内部请求的超时时间 +- name: tenant_task_queue_size + require: false + type: INT + default: 65536 + min_value: 1024 + max_value: NULL + section: OBSERVER + need_restart: false + description_en: the size of the task queue for each tenant. + description_local: 每个租户的请求队列大小 +- name: resource_soft_limit + require: false + type: INT + default: 50 + min_value: 1 + max_value: 10000 + section: LOAD_BALANCE + need_restart: false + description_en: Used along with resource_hard_limit in unit allocation. If server utilization is less than resource_soft_limit, a policy of best fit will be used for unit allocation; otherwise, a least loadpolicy will be employed. Ultimately,system utilization should not be large than resource_hard_limit. + description_local: 当所有节点的资源水位低于该阈值时,不执行负载均衡 +- name: plan_cache_evict_interval + require: false + type: TIME + default: 1s + min_value: 0s + max_value: NULL + section: TENANT + need_restart: false + description_en: time interval for periodic plan cache eviction. + description_local: 执行计划缓存的淘汰间隔 +- name: server_balance_cpu_mem_tolerance_percent + require: false + type: INT + default: 5 + min_value: 1 + max_value: 100 + section: LOAD_BALANCE + need_restart: false + description_en: specifies the tolerance (in percentage) of the unbalance of the cpu/memory utilization among all servers. The average cpu/memory utilization is calculated by dividing the total cpu/memory by the number of servers. server balancer will start a rebalancing task when the deviation between the average usage and some server load is greater than this tolerance + description_local: 节点负载均衡策略中,CPU和内存资源不均衡的容忍度 +- name: autoinc_cache_refresh_interval + require: false + type: TIME + default: 3600s + min_value: 100ms + max_value: NULL + section: OBSERVER + need_restart: false + description_en: auto-increment service cache refresh sync_value in this interval + description_local: 自动刷新自增列值的时间间隔 +- name: all_server_list + require: false + type: SAFE_STRING + default: + min_value: NULL + max_value: NULL + section: LOCATION_CACHE + need_restart: false + description_en: all server addr in cluster + description_local: 集群中所有机器的列表,不建议人工修改 +- name: enable_rebalance + require: false + type: BOOL + default: true + min_value: NULL + max_value: NULL + section: LOAD_BALANCE + need_restart: false + description_en: specifies whether the partition load-balancing is turned on. + description_local: 自动负载均衡开关 +- name: internal_sql_execute_timeout + require: false + type: TIME + default: 30s + min_value: 1000us + max_value: 10m + section: OBSERVER + need_restart: false + description_en: the number of microseconds an internal DML request is permitted to execute before it is terminated. + description_local: 系统内部SQL请求的超时时间 +- name: user_row_cache_priority + require: false + type: INT + default: 1 + min_value: 1 + max_value: NULL + section: CACHE + need_restart: false + description_en: user row cache priority + description_local: 基线数据行缓存在缓存系统中的优先级 +- name: server_permanent_offline_time + require: false + type: TIME + default: 3600s + min_value: 20s + max_value: NULL + section: ROOT_SERVICE + need_restart: false + description_en: the time interval between any two heartbeats beyond which a server is considered to be permanently offline. + description_local: 节点心跳中断多久后认为其被“永久下线”,“永久下线”的节点上的数据副本需要被自动补足 +- name: schema_history_expire_time + require: false + type: TIME + default: 7d + min_value: 1m + max_value: 30d + section: OBSERVER + need_restart: false + description_en: the hour of expire time for schema history + description_local: 元数据历史数据过期时间 +- name: datafile_disk_percentage + require: false + type: INT + min_value: 0 + max_value: 99 + modify_limit: decrease + section: SSTABLE + need_restart: false + description_en: the percentage of disk space used by the data files. + description_local: data_dir所在磁盘将被OceanBase系统初始化用于存储数据,本配置项表示占用该磁盘总空间的百分比 +- name: default_compress_func + require: false + type: SAFE_STRING + default: zstd_1.3.8 + min_value: NULL + max_value: NULL + section: OBSERVER + need_restart: false + description_en: default compress function name for create new table + description_local: MySQL模式下,建表时使用的默认压缩算法 +- name: memory_chunk_cache_size + require: false + type: CAPACITY_MB + default: 0M + min_value: 0M + max_value: NULL + section: OBSERVER + need_restart: false + description_en: the maximum size of memory cached by memory chunk cache. + description_local: 内存分配器缓存的内存块容量。值为0的时候表示系统自适应。 +- name: ob_event_history_recycle_interval + require: false + type: TIME + default: 7d + min_value: 1d + max_value: 180d + section: ROOT_SERVICE + need_restart: false + description_en: the time to recycle event history. + description_local: OB事件表中事件条目的保存期限 +- name: enable_ddl + require: false + type: BOOL + default: true + min_value: NULL + max_value: NULL + section: ROOT_SERVICE + need_restart: false + description_en: specifies whether DDL operation is turned on. + description_local: 是否允许执行DDL +- name: balance_blacklist_failure_threshold + require: false + type: INT + default: 5 + min_value: 0 + max_value: 1000 + section: LOAD_BALANCE + need_restart: false + description_en: a balance task failed count to be putted into blacklist + description_local: 副本迁移等后台任务连续失败超过该阈值后,将被放入黑名单 +- name: wait_leader_batch_count + require: false + type: INT + default: 1024 + min_value: 128 + max_value: 5000 + section: ROOT_SERVICE + need_restart: false + description_en: leader batch count everytime leader coordinator wait. + description_local: RootService发送切主命令的批次大小 +- name: proxyro_password + require: false + type: STRING + default: '' + min_value: NULL + max_value: NULL + need_restart: false + description_en: password of observer proxyro user + description_local: proxyro用户的密码 +- name: root_password + require: false + type: STRING + default: '' + min_value: NULL + max_value: NULL + need_restart: false + description_en: password of observer root user + description_local: sys租户root用户的密码 +# todo: 等文档更新 +- name: sql_login_thread_count + require: false + type: INT + default: 0 + min_value: 0 + max_value: 32 + section: OBSERVER + need_restart: false + need_redeploy: false + description_en: 'the number of threads for sql login request. Range: [0, 32] in integer, 0 stands for use default thread count defined in TG.the default thread count for login request in TG is normal:6 mini-mode:2' + description_local: '' +- name: tcp_keepcnt + require: false + type: INT + default: 10 + min_value: 1 + max_value: NULL + section: OBSERVER + need_restart: false + need_redeploy: false + description_en: 'The maximum number of keepalive probes TCP should send before dropping the connection. Take effect for new established connections. Range: [1,+∞)' + description_local: 关闭一个非活跃连接之前的最大重试次数。 +- name: tcp_keepintvl + require: false + type: TIME + default: 6s + min_value: 1s + max_value: NULL + section: OBSERVER + need_restart: false + need_redeploy: false + description_en: 'The time (in seconds) between individual keepalive probes. Take effect for new established connections. Range: [1s, +∞]' + description_local: 开启客户端连接的探活机制后,前后两次探测之间的时间间隔,单位为秒。 +- name: tcp_keepidle + require: false + type: TIME + default: 7200s + min_value: 1s + max_value: NULL + section: OBSERVER + need_restart: false + need_redeploy: false + description_en: 'The time (in seconds) the connection needs to remain idle before TCP starts sending keepalive probe. Take effect for new established connections. Range: [1s, +∞]' + description_local: 客户端连接上服务器后,如果没有数据发送,多久后会发送 Keepalive 探测分组,单位为秒。 +- name: enable_tcp_keepalive + require: false + type: BOOL + default: true + min_value: NULL + max_value: NULL + section: OBSERVER + need_restart: false + need_redeploy: false + description_en: enable TCP keepalive for the TCP connection of sql protocol. Take effect for new established connections. + description_local: 开启或关闭客户端连接的探活机制。 +- name: ob_ratelimit_stat_period + require: false + type: TIME + default: 1s + min_value: 100ms + max_value: NULL + section: OBSERVER + need_restart: false + need_redeploy: false + description_en: "the time interval to update observer's maximum bandwidth to a certain region. " + description_local: OBServer 计算和更新最大带宽的时间间隔。 +- name: enable_ob_ratelimit + require: false + type: BOOL + default: false + min_value: NULL + max_value: NULL + section: OBSERVER + need_restart: false + need_redeploy: false + description_en: enable ratelimit between regions for RPC connection. + description_local: 开启或关闭客户端连接的探活机制。 +- name: schema_history_recycle_interval + require: false + type: TIME + default: 10m + min_value: 0s + max_value: NULL + section: LOAD_BALANCE + need_restart: false + need_redeploy: false + description_en: 'the time interval between the schedules of schema history recyle task. Range: [0s, +∞)' + description_local: 系统内部执行 schema 多版本记录回收任务的时间间隔。 +- name: backup_data_file_size + require: false + type: CAPACITY_MB + default: 4G + min_value: 512M + max_value: 4G + section: OBSERVER + need_restart: false + need_redeploy: false + description_en: 'backup data file size. Range: [512M, 4G] in integer' + description_local: 备份数据文件的容量。 +- name: data_storage_error_tolerance_time + require: false + type: TIME + default: 300s + min_value: 10s + max_value: 7200s + section: OBSERVER + need_restart: false + need_redeploy: false + description_en: time to tolerate disk read failure, after that, the disk status will be set error. Range [10s,7200s]. The default value is 300s + description_local: 数据盘状态设为 ERROR 状态的容忍时间。 +- name: data_storage_warning_tolerance_time + require: false + type: TIME + default: 30s + min_value: 10s + max_value: 300s + section: OBSERVER + need_restart: false + need_redeploy: false + description_en: time to tolerate disk read failure, after that, the disk status will be set warning. Range [10s,300s]. The default value is 30s + description_local: 数据盘状态设为 WARNING 状态的容忍时间。 +- name: index_block_cache_priority + require: false + type: INT + default: 10 + min_value: 1 + max_value: NULL + section: CACHE + need_restart: false + need_redeploy: false + description_en: index cache priority. Range:[1, ) + description_local: Tablet 映射缓存优先级。 +- name: opt_tab_stat_cache_priority + require: false + type: INT + default: 1 + min_value: 1 + max_value: NULL + section: CACHE + need_restart: false + need_redeploy: false + description_en: tab stat cache priority. Range:[1, ) + description_local: 统计信息缓存优先级。 +- name: tablet_ls_cache_priority + require: false + type: INT + default: 1000 + min_value: 1 + max_value: NULL + section: CACHE + need_restart: false + need_redeploy: false + description_en: tablet ls cache priority. Range:[1, ) + description_local: 元数据索引微块缓存优先级。 +- name: location_cache_refresh_sql_timeout + require: false + type: TIME + default: 1s + min_value: 1ms + max_value: NULL + section: LOCATION_CACHE + need_restart: false + need_redeploy: false + description_en: 'The timeout used for refreshing location cache by SQL. Range: [1ms, +∞)' + description_local: 通过 SQL 刷新 location_cache 的超时时间。 +- name: location_cache_refresh_rpc_timeout + require: false + type: TIME + default: 500ms + min_value: 1ms + max_value: NULL + section: LOCATION_CACHE + need_restart: false + need_redeploy: false + description_en: 'The timeout used for refreshing location cache by RPC. Range: [1ms, +∞)' + description_local: 通过 RPC 刷新 location_cache 的超时时间。 +- name: tablet_meta_table_check_interval + require: false + type: TIME + default: 30m + min_value: 1m + max_value: NULL + section: ROOT_SERVICE + need_restart: false + need_redeploy: false + description_en: 'the time interval that observer compares tablet meta table with local ls replica info and make adjustments to ensure the correctness of tablet meta table. Range: [1m,+∞)' + description_local: DBA_OB_TABLET_REPLICAS/CDB_OB_TABLET_REPLICAS 视图的后台巡检线程的检查间隔。 +- name: ls_meta_table_check_interval + require: false + type: TIME + default: 1s + min_value: 1ms + max_value: NULL + section: ROOT_SERVICE + need_restart: false + need_redeploy: false + description_en: 'the time interval that observer compares ls meta table with local ls replica info and make adjustments to ensure the correctness of ls meta table. Range: [1ms,+∞)' + description_local: DBA_OB_LS_LOCATIONS/CDB_OB_LS_LOCATIONS 视图的后台巡检线程的检查间隔。 +- name: tablet_meta_table_scan_batch_count + require: false + type: INT + default: 999 + min_value: 1 + max_value: 65536 + section: ROOT_SERVICE + need_restart: false + need_redeploy: false + description_en: the number of tablet replica info that will be read by each request on the tablet-related system tables during procedures such as load-balancing, daily merge, election and etc. Range:(0,65536] + description_local: Tablet meta table 迭代器使用过程中在内存里缓存的 Tablet 数量。 +- name: rdma_io_thread_count + require: false + type: INT + default: 0 + min_value: 0 + max_value: 8 + section: OBSERVER + need_restart: true + need_redeploy: false + description_en: 'the number of RDMA I/O threads for Libreasy. Range: [0, 8] in integer, 0 stands for RDMA being disabled.' + description_local: Libreasy 的 RDMA I/O 线程数。 +- name: production_mode + require: true + type: BOOL + default: true + min_value: NULL + max_value: NULL + section: OBSERVER + need_restart: false + need_redeploy: false + description_en: Production mode switch, default True. Adjust the memory_limit and __min_full_resource_pool_memory The lower bound of memory is adjusted to 16G and 2147483648 + description_local: 生产模式开关, 默认开启。开启后调整memory limit 和 __min_full_resource_pool_memory 下界调整为 16G 和 2147483648 +- name: ocp_meta_tenant + require: false + type: DICT + default: + tenant_name: ocp + max_cpu: 1 + memory_size: 2147483648 + need_redeploy: true + description_en: The tenant specifications for ocp meta db + description_local: ocp express的元数据库使用的租户规格 +- name: ocp_meta_tenant_max_cpu + name_local: OCP express元数据库租户的CPU数 + essential: true + require: false + type: INT + default: 1 + need_redeploy: true + description_en: The tenant cpu count for ocp meta db + description_local: ocp express的元数据库使用的CPU数量 +- name: ocp_meta_tenant_memory_size + name_local: OCP express元数据库租户内存 + essential: true + require: false + type: CAPACITY_MB + default: 2G + need_redeploy: true + description_en: The tenant memory size for ocp meta db + description_local: ocp express的元数据库使用的租户内存大小 +- name: ocp_meta_tenant_log_disk_size + name_local: OCP express元数据库租户日志磁盘大小 + essential: true + require: false + type: CAPACITY_MB + default: 6656M + need_redeploy: true + description_en: The tenant log disk size for ocp meta db + description_local: ocp express的元数据库使用的租户日志磁盘大小 +- name: ocp_meta_db + require: false + type: SAFE_STRING + default: ocp_express + need_redeploy: true + description_en: The database name for ocp meta db + description_local: ocp express的元数据库使用的数据库名 +- name: ocp_meta_username + require: false + type: SAFE_STRING + default: meta + need_redeploy: true + description_en: The database name for ocp meta db + description_local: ocp express的元数据库使用的数据库名 +- name: ocp_meta_password + require: false + type: STRING + default: oceanbase + need_redeploy: true + description_en: The database name for ocp meta db + description_local: ocp express的元数据库使用的数据库名 +- name: ocp_agent_monitor_password + require: false + type: STRING + default: '' + need_redeploy: true + description_en: The password for obagent monitor user + description_local: obagent 监控用户的密码 +- name: scenario + require: true + type: STRING + default: 'express_oltp' + need_restart: true + description_en: The current definition of five typical production environment scenarios, such as express_oltp, complex_oltp, olap, htap, kv. + description_local: 当前定义5种典型生产环境的场景:如express_oltp, complex_oltp, olap, htap, kv. +- name: ocp_monitor_tenant + require: false + type: DICT + default: + tenant_name: ocp_monitor + max_cpu: 1 + memory_size: 2147483648 + need_redeploy: true + description_en: The tenant specifications for ocp monitor db + description_local: ocp 的监控数据库使用的租户定义 +- name: ocp_monitor_tenant_max_cpu + name_local: OCP 监控数据库租户的CPU数 + essential: true + require: false + type: INT + default: 1 + need_redeploy: true + description_en: The tenant cpu count for ocp monitor db + description_local: ocp 监控数据库使用的CPU数量 +- name: ocp_monitor_tenant_memory_size + name_local: OCP 监控数据库租户内存 + essential: true + require: false + type: CAPACITY_MB + default: 2G + need_redeploy: true + description_en: The tenant memory size for ocp monitor db + description_local: ocp 监控数据库使用的租户内存大小 +- name: ocp_monitor_tenant_log_disk_size + name_local: OCP 监控数据库租户日志磁盘大小 + essential: true + require: false + type: CAPACITY_MB + default: 6656M + need_redeploy: true + description_en: The tenant log disk size for ocp monitor db + description_local: ocp 监控数据库使用的租户日志磁盘大小 +- name: ocp_monitor_db + require: false + type: SAFE_STRING + default: ocp_monitor + need_redeploy: true + description_en: The database name for ocp monitor db + description_local: ocp 的监控数据库使用的数据库名 +- name: ocp_monitor_username + require: false + type: SAFE_STRING + default: monitor + need_redeploy: true + description_en: The user name for ocp meta db + description_local: ocp 的监控数据库使用的用户名 +- name: ocp_monitor_password + require: false + type: STRING + default: oceanbase + need_redeploy: true + description_en: The password for ocp monitor db + description_local: ocp 的监控数据库使用的密码 \ No newline at end of file diff --git a/plugins/oceanbase/4.3.0.0/start.py b/plugins/oceanbase/4.3.0.0/start.py new file mode 100644 index 0000000..c26a9eb --- /dev/null +++ b/plugins/oceanbase/4.3.0.0/start.py @@ -0,0 +1,320 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function + +import json +import os.path +import time +import requests +from copy import deepcopy + +from _errno import EC_OBSERVER_FAIL_TO_START, EC_OBSERVER_FAIL_TO_START_WITH_ERR, EC_OBSERVER_FAILED_TO_REGISTER, \ + EC_OBSERVER_FAILED_TO_REGISTER_WITH_DETAILS, EC_OBSERVER_FAIL_TO_START_OCS + +from collections import OrderedDict + +from tool import NetUtil, ConfigUtil, FileUtil + + +def config_url(ocp_config_server, appname, cid): + if ocp_config_server[-1] == '?': + link_char = '' + elif ocp_config_server.find('?') == -1: + link_char = '?' + else: + link_char = '&' + cfg_url = '%s%sAction=ObRootServiceInfo&ObCluster=%s' % (ocp_config_server, link_char, appname) + proxy_cfg_url = '%s%sAction=GetObProxyConfig&ObRegionGroup=%s' % (ocp_config_server, link_char, appname) + # Command that clears the URL content for the cluster + cleanup_config_url_content = '%s%sAction=DeleteObRootServiceInfoByClusterName&ClusterName=%s' % (ocp_config_server, link_char, appname) + # Command that register the cluster information to the Config URL + register_to_config_url = '%s%sAction=ObRootServiceRegister&ObCluster=%s&ObClusterId=%s' % (ocp_config_server, link_char, appname, cid) + return cfg_url, cleanup_config_url_content, register_to_config_url + + +def init_config_server(ocp_config_server, appname, cid, force_delete, stdio): + def post(url): + stdio.verbose('post %s' % url) + response = requests.post(url) + if response.status_code != 200: + raise Exception('%s status code %s' % (url, response.status_code)) + return json.loads(response.text)['Code'] + + cfg_url, cleanup_config_url_content, register_to_config_url = config_url(ocp_config_server, appname, cid) + ret = post(register_to_config_url) + if ret != 200: + if not force_delete: + raise Exception('%s may have been registered in %s' % (appname, ocp_config_server)) + ret = post(cleanup_config_url_content) + if ret != 200: + raise Exception('failed to clean up the config url content, return code %s' % ret) + if post(register_to_config_url) != 200: + return False + return cfg_url + + +class EnvVariables(object): + + def __init__(self, environments, client): + self.environments = environments + self.client = client + self.env_done = {} + + def __enter__(self): + for env_key, env_value in self.environments.items(): + self.env_done[env_key] = self.client.get_env(env_key) + self.client.add_env(env_key, env_value, True) + + def __exit__(self, *args, **kwargs): + for env_key, env_value in self.env_done.items(): + if env_value is not None: + self.client.add_env(env_key, env_value, True) + else: + self.client.del_env(env_key) + + +def start(plugin_context, start_obshell=True, *args, **kwargs): + cluster_config = plugin_context.cluster_config + options = plugin_context.options + clients = plugin_context.clients + repositories = plugin_context.repositories + stdio = plugin_context.stdio + clusters_cmd = {} + root_servers = {} + global_config = cluster_config.get_global_conf() + appname = global_config['appname'] if 'appname' in global_config else None + cluster_id = global_config['cluster_id'] if 'cluster_id' in global_config else None + obconfig_url = global_config['obconfig_url'] if 'obconfig_url' in global_config else None + cfg_url = '' + if obconfig_url: + if not appname or not cluster_id: + stdio.error('need appname and cluster_id') + return + try: + cfg_url = init_config_server(obconfig_url, appname, cluster_id, getattr(options, 'force_delete', False), stdio) + if not cfg_url: + stdio.warn(EC_OBSERVER_FAILED_TO_REGISTER_WITH_DETAILS.format(appname, obconfig_url)) + except: + stdio.warn(EC_OBSERVER_FAILED_TO_REGISTER.format()) + elif 'ob-configserver' in cluster_config.depends and appname: + obc_cluster_config = cluster_config.get_depend_config('ob-configserver') + vip_address = obc_cluster_config.get('vip_address') + if vip_address: + obc_ip = vip_address + obc_port = obc_cluster_config.get('vip_port') + else: + server = cluster_config.get_depend_servers('ob-configserver')[0] + client = clients[server] + obc_ip = NetUtil.get_host_ip() if client.is_localhost() else server.ip + obc_port = obc_cluster_config.get('listen_port') + cfg_url = "http://{0}:{1}/services?Action=ObRootServiceInfo&ObCluster={2}".format(obc_ip, obc_port, appname) + + if cluster_config.added_servers: + scale_out = True + need_bootstrap = False + else: + scale_out = False + need_bootstrap = True + stdio.start_loading('Start observer') + for server in cluster_config.original_servers: + config = cluster_config.get_server_conf(server) + zone = config['zone'] + if zone not in root_servers: + root_servers[zone] = '%s:%s:%s' % (server.ip, config['rpc_port'], config['mysql_port']) + rs_list_opt = '-r \'%s\'' % ';'.join([root_servers[zone] for zone in root_servers]) + + path = '' + for repository in repositories: + if repository.name == cluster_config.name: + path = repository.repository_dir + break + optimize_data = {} + default_parameters_json = f'{path}/etc/default_parameter.json' + if os.path.exists(default_parameters_json): + with FileUtil.open(default_parameters_json, 'rb') as f: + optimize_data = json.load(f) + else: + stdio.verbose('default_parameter.json not found, no optimize ') + + def _optimize(): + for default_parameters in optimize_data: + if default_parameters['scenario'] == scenario: + if 'parameters' in default_parameters: + for parameter in default_parameters['parameters']['cluster']: + stdio.verbose('update %s to %s because of scenario' % (parameter['name'], parameter['value'])) + if not parameter['name'] in server_config: + server_config[parameter['name']] = parameter['value'] + + for server in cluster_config.servers: + client = clients[server] + server_config = cluster_config.get_server_conf(server) + home_path = server_config['home_path'] + scenario = server_config['scenario'] + + if not server_config.get('data_dir'): + server_config['data_dir'] = '%s/store' % home_path + + if not server_config.get('local_ip') and not server_config.get('devname'): + server_config['local_ip'] = server.ip + + if client.execute_command('ls %s/clog/tenant_1/' % server_config['data_dir']).stdout.strip(): + need_bootstrap = False + + remote_pid_path = '%s/run/observer.pid' % home_path + remote_pid = client.execute_command('cat %s' % remote_pid_path).stdout.strip() + if remote_pid: + if client.execute_command('ls /proc/%s' % remote_pid): + continue + + stdio.verbose('%s start command construction' % server) + if getattr(options, 'without_parameter', False) and client.execute_command('ls %s/etc/observer.config.bin' % home_path): + use_parameter = False + else: + use_parameter = True + cmd = [] + if use_parameter: + not_opt_str = OrderedDict({ + 'mysql_port': '-p', + 'rpc_port': '-P', + 'zone': '-z', + 'nodaemon': '-N', + 'appname': '-n', + 'cluster_id': '-c', + 'data_dir': '-d', + 'devname': '-i', + 'syslog_level': '-l', + 'ipv6': '-6', + 'mode': '-m', + 'scn': '-f', + 'local_ip': '-I' + }) + not_cmd_opt = [ + 'home_path', 'obconfig_url', 'root_password', 'proxyro_password', 'scenario', + 'redo_dir', 'clog_dir', 'ilog_dir', 'slog_dir', '$_zone_idc', 'production_mode', + 'ocp_monitor_tenant', 'ocp_monitor_username', 'ocp_monitor_password', 'ocp_monitor_db', + 'ocp_meta_tenant', 'ocp_meta_username', 'ocp_meta_password', 'ocp_meta_db', 'ocp_agent_monitor_password', 'ocp_root_password', 'obshell_port' + ] + optimize_scenario = ['express_oltp', 'complex_oltp', 'olap', 'htap', 'kv'] + if scenario in optimize_scenario: + _optimize() + get_value = lambda key: "'%s'" % server_config[key] if isinstance(server_config[key], str) else server_config[key] + opt_str = [] + for key in server_config: + if key not in not_cmd_opt and key not in not_opt_str and not key.startswith('ocp_meta_tenant_'): + value = get_value(key) + opt_str.append('%s=%s' % (key, value)) + if cfg_url: + opt_str.append('obconfig_url=\'%s\'' % cfg_url) + else: + cmd.append(rs_list_opt) + for key in not_opt_str: + if key in server_config: + value = get_value(key) + cmd.append('%s %s' % (not_opt_str[key], value)) + cmd.append('-o %s' % ','.join(opt_str)) + else: + cmd.append('-p %s' % server_config['mysql_port']) + + clusters_cmd[server] = 'cd %s; %s/bin/observer %s' % (home_path, home_path, ' '.join(cmd)) + for server in clusters_cmd: + environments = deepcopy(cluster_config.get_environments()) + client = clients[server] + server_config = cluster_config.get_server_conf(server) + stdio.verbose('starting %s observer', server) + if 'LD_LIBRARY_PATH' not in environments: + environments['LD_LIBRARY_PATH'] = '%s/lib:' % server_config['home_path'] + with EnvVariables(environments, client): + ret = client.execute_command(clusters_cmd[server]) + if not ret: + stdio.stop_loading('fail') + stdio.error(EC_OBSERVER_FAIL_TO_START_WITH_ERR.format(server=server, stderr=ret.stderr)) + return + stdio.stop_loading('succeed') + + start_obshell = start_obshell and not need_bootstrap and not scale_out + stdio.verbose('start_obshell: %s' % start_obshell) + if start_obshell: + for server in cluster_config.servers: + client = clients[server] + server_config = cluster_config.get_server_conf(server) + home_path = server_config['home_path'] + obshell_pid_path = '%s/run/obshell.pid' % home_path + obshell_pid = client.execute_command('cat %s' % obshell_pid_path).stdout.strip() + if obshell_pid and client.execute_command('ls /proc/%s' % obshell_pid): + stdio.verbose('%s obshell[pid: %s] started', server, obshell_pid) + else: + # start obshell + server_config = cluster_config.get_server_conf(server) + password = server_config.get('root_password', '') + client.add_env('OB_ROOT_PASSWORD', password if client._is_local else ConfigUtil.passwd_format(password)) + cmd = 'cd %s; %s/bin/obshell admin start --ip %s --port %s' % (server_config['home_path'], server_config['home_path'], server.ip, server_config['obshell_port']) + stdio.verbose('start obshell: %s' % cmd) + if not client.execute_command(cmd): + stdio.error('%s obshell failed', server) + return + + if not scale_out: + stdio.start_loading('observer program health check') + time.sleep(3) + failed = [] + for server in cluster_config.servers: + client = clients[server] + server_config = cluster_config.get_server_conf(server) + home_path = server_config['home_path'] + remote_pid_path = '%s/run/observer.pid' % home_path + stdio.verbose('%s program health check' % server) + remote_pid = client.execute_command('cat %s' % remote_pid_path).stdout.strip() + if remote_pid and client.execute_command('ls /proc/%s' % remote_pid): + stdio.verbose('%s observer[pid: %s] started', server, remote_pid) + else: + failed.append(EC_OBSERVER_FAIL_TO_START.format(server=server)) + if failed: + stdio.stop_loading('fail') + for msg in failed: + stdio.warn(msg) + return plugin_context.return_false() + else: + stdio.stop_loading('succeed') + + if start_obshell: + # check obshell health + failed = [] + stdio.start_loading('obshell program health check') + for server in cluster_config.servers: + client = clients[server] + server_config = cluster_config.get_server_conf(server) + home_path = server_config['home_path'] + obshell_pid_path = '%s/run/obshell.pid' % home_path + obshell_pid = client.execute_command('cat %s' % obshell_pid_path).stdout.strip() + if obshell_pid and client.execute_command('ls /proc/%s' % obshell_pid): + stdio.verbose('%s obshell[pid: %s] started', server, obshell_pid) + else: + failed.append(EC_OBSERVER_FAIL_TO_START_OCS.format(server=server)) # TODO: 增加obshell相关的错误吗 + if failed: + stdio.stop_loading('fail') + for msg in failed: + stdio.warn(msg) + return plugin_context.return_false() + else: + stdio.stop_loading('succeed') + + stdio.verbose('need_bootstrap: %s' % need_bootstrap) + return plugin_context.return_true(need_bootstrap=need_bootstrap) diff --git a/plugins/oceanbase/4.3.0.0/tenant_optimize.py b/plugins/oceanbase/4.3.0.0/tenant_optimize.py new file mode 100644 index 0000000..23c110e --- /dev/null +++ b/plugins/oceanbase/4.3.0.0/tenant_optimize.py @@ -0,0 +1,72 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + +import json +import os + +from tool import FileUtil + + +def tenant_optimize(plugin_context, tenant_cursor=None, *args, **kwargs): + cluster_config = plugin_context.cluster_config + clients = plugin_context.clients + stdio = plugin_context.stdio + repositories = plugin_context.repositories + tenant_cursor = plugin_context.get_return('create_tenant').get_return('tenant_cursor') if not tenant_cursor else tenant_cursor + + def _optimize(json_files): + for file in json_files: + if os.path.exists(file): + with FileUtil.open(file, 'rb') as f: + data = json.load(f) + for _ in data: + if _['scenario'] == scenario: + if 'variables' in _: + for tenant_system_variable in _['variables']['tenant']: + sql = f"SET GLOBAL {tenant_system_variable['name']} = {tenant_system_variable['value']};" + for cursor in tenant_cursor: + cursor.execute(sql) + if 'parameters' in _: + for tenant_default_parameter in _['parameters']['tenant']: + sql = f"ALTER SYSTEM SET {tenant_default_parameter['name']} = '{tenant_default_parameter['value']}';" + for cursor in tenant_cursor: + cursor.execute(sql) + return True + + if not tenant_cursor: + stdio.error('tenant cursor is None') + return plugin_context.return_false() + + path = '' + for repository in repositories: + if repository.name == cluster_config.name: + path = repository.repository_dir + break + + for server in cluster_config.servers: + client = clients[server] + server_config = cluster_config.get_server_conf(server) + scenario = server_config['scenario'] + system_variable_json = f'{path}/etc/default_system_variable.json' + default_parameters_json = f'{path}/etc/default_parameter.json' + + stdio.start_loading(f'optimize tenant with scenario: {scenario}') + if _optimize([system_variable_json, default_parameters_json]): + stdio.stop_loading('succeed') + return plugin_context.return_true() diff --git a/plugins/ocp-express/1.0.1/bootstrap.py b/plugins/ocp-express/1.0.1/bootstrap.py index 5066e2d..30fb259 100644 --- a/plugins/ocp-express/1.0.1/bootstrap.py +++ b/plugins/ocp-express/1.0.1/bootstrap.py @@ -23,7 +23,9 @@ import os -def bootstrap(plugin_context, cursor = None, start_env=None, *args, **kwargs): +def bootstrap(plugin_context, start_env=None, *args, **kwargs): + if not start_env: + raise Exception("start env is needed") clients = plugin_context.clients for server in start_env: client = clients[server] diff --git a/plugins/ocp-express/1.0.1/start.py b/plugins/ocp-express/1.0.1/start.py index 71be7f8..034a159 100644 --- a/plugins/ocp-express/1.0.1/start.py +++ b/plugins/ocp-express/1.0.1/start.py @@ -36,7 +36,8 @@ from Crypto.Signature import PKCS1_v1_5 as PKCS1_signature from Crypto.Cipher import PKCS1_OAEP as PKCS1_cipher -from _errno import EC_FAIL_TO_CONNECT, EC_SQL_EXECUTE_FAILED +from _errno import EC_SQL_EXECUTE_FAILED +from _types import Capacity, CapacityWithB PRI_KEY_FILE = '.ocp-express' @@ -50,35 +51,6 @@ from _stdio import SafeStdio -def parse_size(size): - _bytes = 0 - if not isinstance(size, str) or size.isdigit(): - _bytes = int(size) - else: - units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} - match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) - _bytes = int(match.group(1)) * units[match.group(2)] - return _bytes - - -def format_size(size, precision=1): - units = ['B', 'K', 'M', 'G'] - units_num = len(units) - 1 - idx = 0 - if precision: - div = 1024.0 - format = '%.' + str(precision) + 'f%s' - limit = 1024 - else: - div = 1024 - limit = 1024 - format = '%d%s' - while idx < units_num and size >= limit: - size /= div - idx += 1 - return format % (size, units[idx]) - - class Cursor(SafeStdio): def __init__(self, ip, port, user='root', tenant='sys', password='', database=None, stdio=None): @@ -316,6 +288,7 @@ def start(plugin_context, start_env=None, *args, **kwargs): options = plugin_context.options clients = plugin_context.clients stdio = plugin_context.stdio + added_components = cluster_config.get_deploy_added_components() if not start_env: start_env = prepare_parameters(cluster_config, stdio) @@ -380,12 +353,16 @@ def start(plugin_context, start_env=None, *args, **kwargs): ob_cursor = Cursor(ip=server_ip, port=server_port, user=jdbc_username, password=jdbc_password, database=database, stdio=stdio) jdbc_url = 'jdbc:oceanbase://{}:{}/{}'.format(server_ip, server_port, database) connected = True - if not bootstrap_flag: - if not ob_cursor.execute("update config_properties set `value`=NULL, default_value=NULL where `key`='ocp.version' or `key`='ocp.version.full'"): - stdio.warn("failed to update 'ocp.version' and 'ocp.version.full' to NULL in config_properties table") - if not ob_cursor.execute("update user set need_change_password=true where id='100' "): - stdio.warn("failed to update 'need_change_password' to true in user table") - + if 'ocp_express' in added_components: + if ob_cursor.execute("select * from config_properties limit 1", exc_level='verbose'): + if not ob_cursor.execute("update config_properties set `value`=NULL, default_value=NULL where `key`='ocp.version' or `key`='ocp.version.full'", exc_level='verbose'): + stdio.verbose("failed to update 'ocp.version' and 'ocp.version.full' to NULL in config_properties table") + if ob_cursor.execute("select * from iam_user limit 1", exc_level='verbose'): + if not ob_cursor.execute("update iam_user set need_change_password=true where id='100'", exc_level='verbose'): + stdio.verbose("failed to update 'need_change_password' to true in iam_user table") + if ob_cursor.execute("select * from user limit 1", exc_level='verbose'): + if not ob_cursor.execute("update user set need_change_password=true where id='100'", exc_level='verbose'): + stdio.verbose("failed to update 'need_change_password' to true in user table") break except: time.sleep(1) @@ -401,8 +378,9 @@ def start(plugin_context, start_env=None, *args, **kwargs): else: public_key_str = "" memory_size = server_config['memory_size'] - jvm_memory_option = "-Xms{0} -Xmx{0}".format(format_size(parse_size(memory_size) * 0.5, 0).lower()) + jvm_memory_option = "-Xms{0} -Xmx{0}".format(str(Capacity(Capacity(memory_size).btyes * 0.5, 0)).lower()) java_bin = server_config['java_bin'] + client.add_env('PATH', '%s/jre/bin:' % server_config['home_path']) cmd = '{java_bin} -jar {jvm_memory_option} -DJDBC_URL={jdbc_url} -DJDBC_USERNAME={jdbc_username}' \ ' -DPUBLIC_KEY={public_key} {home_path}/lib/ocp-express-server.jar --port={port}'.format( java_bin=java_bin, @@ -422,6 +400,9 @@ def start(plugin_context, start_env=None, *args, **kwargs): cmd += ' --bootstrap --progress-log={}'.format(os.path.join(log_dir, 'bootstrap.log')) for key in server_config: if key not in exclude_keys and key in config_mapper: + if key == 'logging_file_total_size_cap': + cmd += ' --with-property=ocp.logging.file.total.size.cap:{}'.format(CapacityWithB(server_config[key])) + continue cmd += ' --with-property={}:{}'.format(config_mapper[key], server_config[key]) elif not bootstrap_flag: cmd += ' --bootstrap --progress-log={}'.format(os.path.join(log_dir, 'bootstrap.log')) @@ -460,7 +441,7 @@ def start(plugin_context, start_env=None, *args, **kwargs): stdio.start_loading("ocp-express program health check") failed = [] servers = cluster_config.servers - count = 200 + count = 300 while servers and count: count -= 1 tmp_servers = [] @@ -500,6 +481,7 @@ def start(plugin_context, start_env=None, *args, **kwargs): return plugin_context.return_false() else: stdio.stop_loading('succeed') + plugin_context.set_variable('start_env', start_env) plugin_context.return_true(need_bootstrap=True) return False diff --git a/plugins/ocp-express/1.0.1/start_check.py b/plugins/ocp-express/1.0.1/start_check.py index 8c50a52..9854d3c 100644 --- a/plugins/ocp-express/1.0.1/start_check.py +++ b/plugins/ocp-express/1.0.1/start_check.py @@ -26,6 +26,7 @@ from copy import deepcopy from _rpm import Version import _errno as err +from _types import Capacity success = True @@ -53,35 +54,6 @@ def password_check(passwd): return True if re.match(pattern, passwd) else False -def parse_size(size): - _bytes = 0 - if not isinstance(size, str) or size.isdigit(): - _bytes = int(size) - else: - units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} - match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) - _bytes = int(match.group(1)) * units[match.group(2)] - return _bytes - - -def format_size(size, precision=1): - units = ['B', 'K', 'M', 'G'] - units_num = len(units) - 1 - idx = 0 - if precision: - div = 1024.0 - format = '%.' + str(precision) + 'f%s' - limit = 1024 - else: - div = 1024 - limit = 1024 - format = '%d%s' - while idx < units_num and size >= limit: - size /= div - idx += 1 - return format % (size, units[idx]) - - def get_mount_path(disk, _path): _mount_path = '/' for p in disk: @@ -234,7 +206,8 @@ def prepare_parameters(cluster_config, stdio): return env -def start_check(plugin_context, init_check_status=False, work_dir_check=False, work_dir_empty_check=True, strict_check=False, precheck=False, *args, **kwargs): +def start_check(plugin_context, init_check_status=False, work_dir_check=False, work_dir_empty_check=True, strict_check=False, precheck=False, + java_check=True, *args, **kwargs): def check_pass(item): status = check_status[server] if status[item].status == err.CheckStatus.WAIT: @@ -423,23 +396,25 @@ def critical(item, error, suggests=[]): check_pass('port') # java version check - for server in cluster_config.servers: - client = clients[server] - server_config = env[server] - java_bin = server_config['java_bin'] - ret = client.execute_command('{} -version'.format(java_bin)) - if not ret: - critical('java', err.EC_OCP_EXPRESS_JAVA_NOT_FOUND.format(server=server), [err.SUG_OCP_EXPRESS_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0')]) - continue - version_pattern = r'version\s+\"(\d+\.\d+.\d+)' - found = re.search(version_pattern, ret.stdout) or re.search(version_pattern, ret.stderr) - if not found: - error('java', err.EC_OCP_EXPRESS_JAVA_VERSION_ERROR.format(server=server, version='1.8.0'), [err.SUG_OCP_EXPRESS_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0'),]) - continue - java_major_version = found.group(1) - if Version(java_major_version) != Version('1.8.0'): - critical('java', err.EC_OCP_EXPRESS_JAVA_VERSION_ERROR.format(server=server, version='1.8.0'), [err.SUG_OCP_EXPRESS_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0'),]) - continue + if java_check: + for server in cluster_config.servers: + client = clients[server] + server_config = env[server] + java_bin = server_config['java_bin'] + client.add_env('PATH', '%s/jre/bin:' % server_config['home_path']) + ret = client.execute_command('{} -version'.format(java_bin)) + if not ret: + critical('java', err.EC_OCP_EXPRESS_JAVA_NOT_FOUND.format(server=server), [err.SUG_OCP_EXPRESS_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0')]) + continue + version_pattern = r'version\s+\"(\d+\.\d+.\d+)' + found = re.search(version_pattern, ret.stdout) or re.search(version_pattern, ret.stderr) + if not found: + error('java', err.EC_OCP_EXPRESS_JAVA_VERSION_ERROR.format(server=server, version='1.8.0'), [err.SUG_OCP_EXPRESS_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0'),]) + continue + java_major_version = found.group(1) + if Version(java_major_version) != Version('1.8.0'): + critical('java', err.EC_OCP_EXPRESS_JAVA_VERSION_ERROR.format(server=server, version='1.8.0'), [err.SUG_OCP_EXPRESS_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0'),]) + continue servers_memory = {} servers_disk = {} @@ -449,12 +424,12 @@ def critical(item, error, suggests=[]): for server in cluster_config.servers: client = clients[server] server_config = env[server] - memory_size = parse_size(server_config['memory_size']) + memory_size = Capacity(server_config['memory_size']).btyes if server_config.get('log_dir'): log_dir = server_config['log_dir'] else: log_dir = os.path.join(server_config['home_path'], 'log') - need_size = parse_size(server_config['logging_file_total_size_cap']) + need_size = Capacity(server_config['logging_file_total_size_cap']).btyes ip = server.ip if ip not in servers_client: servers_client[ip] = client @@ -496,17 +471,17 @@ def critical(item, error, suggests=[]): for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): if k in memory_key_map: key = memory_key_map[k] - server_memory_stats[key] = parse_size(str(v)) + server_memory_stats[key] = Capacity(str(v)).btyes mem_suggests = [err.SUG_OCP_EXPRESS_REDUCE_MEM.format()] if memory_needed * 0.5 > server_memory_stats['available']: for server in ip_servers[ip]: - error('mem', err.EC_OCP_EXPRESS_NOT_ENOUGH_MEMORY_AVAILABLE.format(ip=ip, available=format_size(server_memory_stats['available']), need=format_size(memory_needed)), suggests=mem_suggests) + error('mem', err.EC_OCP_EXPRESS_NOT_ENOUGH_MEMORY_AVAILABLE.format(ip=ip, available=Capacity(server_memory_stats['available']), need=Capacity(memory_needed)), suggests=mem_suggests) elif memory_needed > server_memory_stats['free'] + server_memory_stats['buffers'] + server_memory_stats['cached']: for server in ip_servers[ip]: - error('mem', err.EC_OCP_EXPRESS_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=format_size(server_memory_stats['free']), cached=format_size(server_memory_stats['buffers'] + server_memory_stats['cached']), need=format_size(memory_needed)), suggests=mem_suggests) + error('mem', err.EC_OCP_EXPRESS_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=Capacity(server_memory_stats['free']), cached=Capacity(server_memory_stats['buffers'] + server_memory_stats['cached']), need=Capacity(memory_needed)), suggests=mem_suggests) elif memory_needed > server_memory_stats['free']: for server in ip_servers[ip]: - alert('mem', err.EC_OCP_EXPRESS_NOT_ENOUGH_MEMORY.format(ip=ip, free=format_size(server_memory_stats['free']), need=format_size(memory_needed)), suggests=mem_suggests) + alert('mem', err.EC_OCP_EXPRESS_NOT_ENOUGH_MEMORY.format(ip=ip, free=Capacity(server_memory_stats['free']), need=Capacity(memory_needed)), suggests=mem_suggests) # disk check for ip in servers_disk: client = servers_client[ip] @@ -517,7 +492,7 @@ def critical(item, error, suggests=[]): mount_path = get_mount_path(disk_info, path) if disk_needed > disk_info[mount_path]['avail']: for server in ip_servers[ip]: - error('disk', err.EC_OCP_EXPRESS_NOT_ENOUGH_DISK.format(ip=ip, disk=mount_path, need=format_size(disk_needed), avail=format_size(disk_info[mount_path]['avail'])), suggests=[err.SUG_OCP_EXPRESS_REDUCE_DISK.format()]) + error('disk', err.EC_OCP_EXPRESS_NOT_ENOUGH_DISK.format(ip=ip, disk=mount_path, need=Capacity(disk_needed), avail=Capacity(disk_info[mount_path]['avail'])), suggests=[err.SUG_OCP_EXPRESS_REDUCE_DISK.format()]) else: stdio.warn(err.WC_OCP_EXPRESS_FAILED_TO_GET_DISK_INFO.format(ip)) @@ -526,7 +501,7 @@ def critical(item, error, suggests=[]): server_config = env[server] admin_passwd = server_config.get('admin_passwd') if not admin_passwd or not password_check(admin_passwd): - error('admin_passwd', err.EC_OCP_EXPRESS_ADMIN_PASSWD_ERROR.format(ip=server.ip, current=admin_passwd), suggests=[err.SUG_OCP_EXPRESS_EDIT_ADMIN_PASSWD_ERROR.format()]) + error('admin_passwd', err.EC_COMPONENT_PASSWD_ERROR.format(ip=server.ip, component='ocp-express', key='admin_passwd', rule='Must be 8 to 32 characters in length, containing at least 3 types from digits, lowercase letters, uppercase letters and the following special characters: ~!@#%^&*_-+=|(){{}}[]:;,.?/'), suggests=[err.SUG_OCP_EXPRESS_EDIT_ADMIN_PASSWD.format()]) plugin_context.set_variable('start_env', env) diff --git a/plugins/ocp-express/1.0/destroy.py b/plugins/ocp-express/1.0/destroy.py index ffd77ef..9ad83d2 100644 --- a/plugins/ocp-express/1.0/destroy.py +++ b/plugins/ocp-express/1.0/destroy.py @@ -20,12 +20,81 @@ from __future__ import absolute_import, division, print_function +import re +from copy import deepcopy + import _errno as err +from tool import Cursor global_ret = True + +def get_missing_required_parameters(parameters): + results = [] + for key in ["jdbc_url", "jdbc_password", "jdbc_username"]: + if parameters.get(key) is None: + results.append(key) + return results + + +def prepare_parameters(cluster_config, stdio): + # depends config + env = {} + depend_observer = False + depend_info = {} + ob_servers_conf = {} + root_servers = [] + for comp in ["oceanbase", "oceanbase-ce"]: + ob_zones = {} + if comp in cluster_config.depends: + depend_observer = True + observer_globals = cluster_config.get_depend_config(comp) + ocp_meta_keys = [ + "ocp_meta_tenant", "ocp_meta_db", "ocp_meta_username", "ocp_meta_password" + ] + for key in ocp_meta_keys: + value = observer_globals.get(key) + if value is not None: + depend_info[key] = value + ob_servers = cluster_config.get_depend_servers(comp) + + connect_infos = [] + for ob_server in ob_servers: + ob_servers_conf[ob_server] = ob_server_conf = cluster_config.get_depend_config(comp, ob_server) + connect_infos.append([ob_server.ip, ob_server_conf['mysql_port']]) + zone = ob_server_conf['zone'] + if zone not in ob_zones: + ob_zones[zone] = ob_server + depend_info['connect_infos'] = connect_infos + root_servers = ob_zones.values() + break + + for server in cluster_config.servers: + server_config = deepcopy(cluster_config.get_server_conf_with_default(server)) + original_server_config = cluster_config.get_original_server_conf(server) + missed_keys = get_missing_required_parameters(original_server_config) + if missed_keys: + if 'jdbc_url' in missed_keys and depend_observer: + server_config['connect_infos'] = depend_info.get('connect_infos') + server_config['ocp_meta_db'] = depend_info.get('ocp_meta_db') + if 'jdbc_username' in missed_keys and depend_observer: + server_config['jdbc_username'] = "{}@{}".format(depend_info['ocp_meta_username'], + depend_info.get('ocp_meta_tenant', {}).get("tenant_name")) + if 'jdbc_password' in missed_keys and depend_observer: + server_config['jdbc_password'] = depend_info['ocp_meta_password'] + env[server] = server_config + return env + + def destroy(plugin_context, *args, **kwargs): + def clean_database(cursor, database): + ret = cursor.execute("drop database {0}".format(database)) + if not ret: + global global_ret + global_ret = False + cursor.execute("create database if not exists {0}".format(database)) + def clean(path): client = clients[server] ret = client.execute_command('rm -fr %s' % path, timeout=-1) @@ -40,7 +109,11 @@ def clean(path): clients = plugin_context.clients stdio = plugin_context.stdio global global_ret - stdio.start_loading('ocp-express work dir cleaning') + + removed_components = cluster_config.get_deploy_removed_components() + clean_data = (not cluster_config.depends or len(removed_components) > 0 and len(removed_components.intersection({"oceanbase", "oceanbase-ce"})) == 0) and stdio.confirm("Would you like to clean meta data") + + stdio.start_loading('ocp-express cleaning') for server in cluster_config.servers: server_config = cluster_config.get_server_conf(server) stdio.verbose('%s work path cleaning', server) @@ -49,6 +122,44 @@ def clean(path): log_dir = server_config.get('log_dir') if log_dir: clean(log_dir) + + if clean_data: + stdio.verbose("clean metadb") + env = prepare_parameters(cluster_config, stdio) + for server in cluster_config.servers: + server_config = env[server] + jdbc_host, jdbc_port = "", 0 + if 'jdbc_url' in server_config: + matched = re.match(r"^jdbc:\S+://(\S+?)(|:\d+)/(\S+)", server_config['jdbc_url']) + if matched: + jdbc_host = matched.group(1) + jdbc_port = matched.group(2)[1:] + connect_infos = [[jdbc_host, jdbc_port]] + database = matched.group(3) + else: + stdio.error("failed to parse jdbc_url") + else: + connect_infos = server_config.get('connect_infos', '') + database = server_config.get('ocp_meta_db', '') + + connected = False + for connect_info in connect_infos: + try: + meta_cursor = Cursor(connect_info[0], connect_info[1], user=server_config['jdbc_username'], password=server_config['jdbc_password'], stdio=stdio) + connected = True + break + except: + continue + if connected: + try: + clean_database(meta_cursor, database) + except Exception: + stdio.error("failed to clean meta data") + global_ret = False + else: + stdio.error("failed to connect to ocp meta tenant") + global_ret = False + if global_ret: stdio.stop_loading('succeed') plugin_context.return_true() diff --git a/plugins/ocp-express/1.0/file_map.yaml b/plugins/ocp-express/1.0/file_map.yaml index 41844a1..8d1c5c6 100644 --- a/plugins/ocp-express/1.0/file_map.yaml +++ b/plugins/ocp-express/1.0/file_map.yaml @@ -1,6 +1,7 @@ - src_path: ./home/admin/ocp-express/lib/ocp-express-server.jar target_path: lib/ocp-express-server.jar - type: file + type: jar + require: openjdk-jre - src_path: ./home/admin/ocp-express/conf target_path: conf type: dir \ No newline at end of file diff --git a/plugins/ocp-express/1.0/requirement.yaml b/plugins/ocp-express/1.0/requirement.yaml new file mode 100644 index 0000000..1ecb7de --- /dev/null +++ b/plugins/ocp-express/1.0/requirement.yaml @@ -0,0 +1,3 @@ +openjdk-jre: + min_version: 1.8.0_161 + max_version: 1.8.1 diff --git a/plugins/ocp-express/1.0/start.py b/plugins/ocp-express/1.0/start.py index 5384eb1..814eed4 100644 --- a/plugins/ocp-express/1.0/start.py +++ b/plugins/ocp-express/1.0/start.py @@ -35,6 +35,9 @@ from Crypto.Signature import PKCS1_v1_5 as PKCS1_signature from Crypto.Cipher import PKCS1_OAEP as PKCS1_cipher +from _errno import EC_SQL_EXECUTE_FAILED + + PRI_KEY_FILE = '.ocp-express' PUB_KEY_FILE = '.ocp-express.pub' @@ -44,35 +47,7 @@ else: import pymysql as mysql from _stdio import SafeStdio - - -def parse_size(size): - _bytes = 0 - if not isinstance(size, str) or size.isdigit(): - _bytes = int(size) - else: - units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} - match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) - _bytes = int(match.group(1)) * units[match.group(2)] - return _bytes - - -def format_size(size, precision=1): - units = ['B', 'K', 'M', 'G'] - units_num = len(units) - 1 - idx = 0 - if precision: - div = 1024.0 - format = '%.' + str(precision) + 'f%s' - limit = 1024 - else: - div = 1024 - limit = 1024 - format = '%d%s' - while idx < units_num and size >= limit: - size /= div - idx += 1 - return format % (size, units[idx]) +from _types import Capacity, CapacityWithB class Cursor(SafeStdio): @@ -107,6 +82,22 @@ def _connect(self): self.db = mysql.connect(host=self.ip, user=self.user, port=int(self.port), password=str(self.password), database=self.database, cursorclass=mysql.cursors.DictCursor) self.cursor = self.db.cursor() + + def execute(self, sql, args=None, execute_func=None, raise_exception=False, exc_level='error', stdio=None): + try: + stdio.verbose('execute sql: %s. args: %s' % (sql, args)) + self.cursor.execute(sql, args) + if not execute_func: + return self.cursor + return getattr(self.cursor, execute_func)() + except Exception as e: + getattr(stdio, exc_level)(EC_SQL_EXECUTE_FAILED.format(sql=sql)) + if raise_exception is None: + raise_exception = self._raise_exception + if raise_exception: + stdio.exception('') + raise e + return False def generate_key(client, key_dir, stdio): @@ -297,6 +288,7 @@ def start(plugin_context, start_env=None, *args, **kwargs): options = plugin_context.options clients = plugin_context.clients stdio = plugin_context.stdio + added_components = plugin_context.cluster_config.get_deploy_added_components() if not start_env: start_env = prepare_parameters(cluster_config, stdio) @@ -330,11 +322,11 @@ def start(plugin_context, start_env=None, *args, **kwargs): port = server_config['port'] pid_path = os.path.join(home_path, 'run/ocp-express.pid') pids = client.execute_command("cat %s" % pid_path).stdout.strip() - bootstrap_flag = os.path.join(home_path, '.bootstrapped') + bootstrap_flag = client.execute_command('ls %s'%os.path.join(home_path, '.bootstrapped')) if pids and all([client.execute_command('ls /proc/%s' % pid) for pid in pids.split('\n')]): server_pid[server] = pids continue - if getattr(options, 'without_parameter', False) and client.execute_command('ls %s' % bootstrap_flag): + if getattr(options, 'without_parameter', False) and bootstrap_flag: use_parameter = False else: use_parameter = True @@ -359,9 +351,16 @@ def start(plugin_context, start_env=None, *args, **kwargs): server_ip = connect_info[0] server_port = connect_info[-1] try: - Cursor(ip=server_ip, port=server_port, user=jdbc_username, password=jdbc_password, database=database, stdio=stdio) + ob_cursor = Cursor(ip=server_ip, port=server_port, user=jdbc_username, password=jdbc_password, database=database, stdio=stdio) jdbc_url = 'jdbc:oceanbase://{}:{}/{}'.format(server_ip, server_port, database) connected = True + if 'ocp-express' in added_components: + if ob_cursor.execute("select * from config_properties limit 1", exc_level='verbose'): + if not ob_cursor.execute("update config_properties set `value`=NULL, default_value=NULL where `key`='ocp.version' or `key`='ocp.version.full'", exc_level='verbose'): + stdio.verbose("failed to update 'ocp.version' and 'ocp.version.full' to NULL in config_properties table") + if ob_cursor.execute("select * from iam_user limit 1", exc_level='verbose'): + if not ob_cursor.execute("update iam_user set need_change_password=true where id='100'", exc_level='verbose'): + stdio.verbose("failed to update 'need_change_password' to true in iam_user table") break except: time.sleep(1) @@ -378,12 +377,13 @@ def start(plugin_context, start_env=None, *args, **kwargs): else: public_key_str = "" memory_size = server_config['memory_size'] - jvm_memory_option = "-Xms{0} -Xmx{0}".format(format_size(parse_size(memory_size) * 0.5, 0).lower()) + jvm_memory_option = "-Xms{0} -Xmx{0}".format(str(Capacity(Capacity(memory_size).btyes * 0.5, 0)).lower()) extra_options = { "ocp.iam.encrypted-system-password": system_password } extra_options_str = ' '.join(["-D{}={}".format(k, v) for k, v in extra_options.items()]) java_bin = server_config['java_bin'] + client.add_env('PATH', '%s/jre/bin:' % server_config['home_path']) cmd = '{java_bin} -jar {jvm_memory_option} -DJDBC_URL={jdbc_url} -DJDBC_USERNAME={jdbc_username} -DJDBC_PASSWORD={jdbc_password} ' \ '-DPUBLIC_KEY={public_key} {extra_options_str} {home_path}/lib/ocp-express-server.jar --port={port}'.format( java_bin=java_bin, @@ -405,7 +405,12 @@ def start(plugin_context, start_env=None, *args, **kwargs): cmd += ' --bootstrap --progress-log={}'.format(os.path.join(log_dir, 'bootstrap.log')) for key in server_config: if key not in exclude_keys and key in config_mapper: + if key == 'logging_file_total_size_cap': + cmd += ' --with-property=ocp.logging.file.total.size.cap:{}'.format(CapacityWithB(server_config[key])) + continue cmd += ' --with-property={}:{}'.format(config_mapper[key], server_config[key]) + elif not bootstrap_flag: + cmd += ' --bootstrap --progress-log={}'.format(os.path.join(log_dir, 'bootstrap.log')) client.execute_command("cd {}; bash -c '{} > /dev/null 2>&1 &'".format(home_path, cmd)) ret = client.execute_command("ps -aux | grep '%s' | grep -v grep | awk '{print $2}' " % cmd) if ret: @@ -424,7 +429,7 @@ def start(plugin_context, start_env=None, *args, **kwargs): stdio.start_loading("ocp-express program health check") failed = [] servers = cluster_config.servers - count = 200 + count = 300 while servers and count: count -= 1 tmp_servers = [] @@ -464,6 +469,7 @@ def start(plugin_context, start_env=None, *args, **kwargs): return plugin_context.return_false() else: stdio.stop_loading('succeed') + plugin_context.set_variable('start_env', start_env) plugin_context.return_true(need_bootstrap=True) return False diff --git a/plugins/ocp-express/1.0/start_check.py b/plugins/ocp-express/1.0/start_check.py index ed884b3..3468440 100644 --- a/plugins/ocp-express/1.0/start_check.py +++ b/plugins/ocp-express/1.0/start_check.py @@ -26,6 +26,7 @@ from copy import deepcopy from _rpm import Version import _errno as err +from _types import Capacity success = True @@ -54,35 +55,6 @@ def password_check(passwd): return True -def parse_size(size): - _bytes = 0 - if not isinstance(size, str) or size.isdigit(): - _bytes = int(size) - else: - units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} - match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) - _bytes = int(match.group(1)) * units[match.group(2)] - return _bytes - - -def format_size(size, precision=1): - units = ['B', 'K', 'M', 'G'] - units_num = len(units) - 1 - idx = 0 - if precision: - div = 1024.0 - format = '%.' + str(precision) + 'f%s' - limit = 1024 - else: - div = 1024 - limit = 1024 - format = '%d%s' - while idx < units_num and size >= limit: - size /= div - idx += 1 - return format % (size, units[idx]) - - def get_mount_path(disk, _path): _mount_path = '/' for p in disk: @@ -235,7 +207,8 @@ def prepare_parameters(cluster_config, stdio): return env -def start_check(plugin_context, init_check_status=False, work_dir_check=False, work_dir_empty_check=True, strict_check=False, precheck=False, *args, **kwargs): +def start_check(plugin_context, init_check_status=False, work_dir_check=False, work_dir_empty_check=True, strict_check=False, precheck=False, + java_check=True, *args, **kwargs): def check_pass(item): status = check_status[server] if status[item].status == err.CheckStatus.WAIT: @@ -423,23 +396,25 @@ def critical(item, error, suggests=[]): check_pass('port') # java version check - for server in cluster_config.servers: - client = clients[server] - server_config = env[server] - java_bin = server_config['java_bin'] - ret = client.execute_command('{} -version'.format(java_bin)) - if not ret: - critical('java', err.EC_OCP_EXPRESS_JAVA_NOT_FOUND.format(server=server), [err.SUG_OCP_EXPRESS_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0')]) - continue - version_pattern = r'version\s+\"(\d+\.\d+.\d+)' - found = re.search(version_pattern, ret.stdout) or re.search(version_pattern, ret.stderr) - if not found: - error('java', err.EC_OCP_EXPRESS_JAVA_VERSION_ERROR.format(server=server, version='1.8.0'), [err.SUG_OCP_EXPRESS_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0'),]) - continue - java_major_version = found.group(1) - if Version(java_major_version) != Version('1.8.0'): - critical('java', err.EC_OCP_EXPRESS_JAVA_VERSION_ERROR.format(server=server, version='1.8.0'), [err.SUG_OCP_EXPRESS_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0'),]) - continue + if java_check: + for server in cluster_config.servers: + client = clients[server] + server_config = env[server] + java_bin = server_config['java_bin'] + client.add_env('PATH', '%s/jre/bin:' % server_config['home_path']) + ret = client.execute_command('{} -version'.format(java_bin)) + if not ret: + critical('java', err.EC_OCP_EXPRESS_JAVA_NOT_FOUND.format(server=server), [err.SUG_OCP_EXPRESS_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0')]) + continue + version_pattern = r'version\s+\"(\d+\.\d+.\d+)' + found = re.search(version_pattern, ret.stdout) or re.search(version_pattern, ret.stderr) + if not found: + error('java', err.EC_OCP_EXPRESS_JAVA_VERSION_ERROR.format(server=server, version='1.8.0'), [err.SUG_OCP_EXPRESS_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0'),]) + continue + java_major_version = found.group(1) + if Version(java_major_version) != Version('1.8.0'): + critical('java', err.EC_OCP_EXPRESS_JAVA_VERSION_ERROR.format(server=server, version='1.8.0'), [err.SUG_OCP_EXPRESS_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0'),]) + continue servers_memory = {} servers_disk = {} @@ -449,12 +424,12 @@ def critical(item, error, suggests=[]): for server in cluster_config.servers: client = clients[server] server_config = env[server] - memory_size = parse_size(server_config['memory_size']) + memory_size = Capacity(server_config['memory_size']).btyes if server_config.get('log_dir'): log_dir = server_config['log_dir'] else: log_dir = os.path.join(server_config['home_path'], 'log') - need_size = parse_size(server_config['logging_file_total_size_cap']) + need_size = Capacity(server_config['logging_file_total_size_cap']).btyes ip = server.ip if ip not in servers_client: servers_client[ip] = client @@ -496,17 +471,17 @@ def critical(item, error, suggests=[]): for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): if k in memory_key_map: key = memory_key_map[k] - server_memory_stats[key] = parse_size(str(v)) + server_memory_stats[key] = Capacity(str(v)).btyes mem_suggests = [err.SUG_OCP_EXPRESS_REDUCE_MEM.format()] if memory_needed * 0.5 > server_memory_stats['available']: for server in ip_servers[ip]: - error('mem', err.EC_OCP_EXPRESS_NOT_ENOUGH_MEMORY_AVAILABLE.format(ip=ip, available=format_size(server_memory_stats['available']), need=format_size(memory_needed)), suggests=mem_suggests) + error('mem', err.EC_OCP_EXPRESS_NOT_ENOUGH_MEMORY_AVAILABLE.format(ip=ip, available=Capacity(server_memory_stats['available']), need=Capacity(memory_needed)), suggests=mem_suggests) elif memory_needed > server_memory_stats['free'] + server_memory_stats['buffers'] + server_memory_stats['cached']: for server in ip_servers[ip]: - error('mem', err.EC_OCP_EXPRESS_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=format_size(server_memory_stats['free']), cached=format_size(server_memory_stats['buffers'] + server_memory_stats['cached']), need=format_size(memory_needed)), suggests=mem_suggests) + error('mem', err.EC_OCP_EXPRESS_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=Capacity(server_memory_stats['free']), cached=Capacity(server_memory_stats['buffers'] + server_memory_stats['cached']), need=Capacity(memory_needed)), suggests=mem_suggests) elif memory_needed > server_memory_stats['free']: for server in ip_servers[ip]: - alert('mem', err.EC_OCP_EXPRESS_NOT_ENOUGH_MEMORY.format(ip=ip, free=format_size(server_memory_stats['free']), need=format_size(memory_needed)), suggests=mem_suggests) + alert('mem', err.EC_OCP_EXPRESS_NOT_ENOUGH_MEMORY.format(ip=ip, free=Capacity(server_memory_stats['free']), need=Capacity(memory_needed)), suggests=mem_suggests) # disk check for ip in servers_disk: client = servers_client[ip] @@ -517,7 +492,7 @@ def critical(item, error, suggests=[]): mount_path = get_mount_path(disk_info, path) if disk_needed > disk_info[mount_path]['avail']: for server in ip_servers[ip]: - error('disk', err.EC_OCP_EXPRESS_NOT_ENOUGH_DISK.format(ip=ip, disk=mount_path, need=format_size(disk_needed), avail=format_size(disk_info[mount_path]['avail'])), suggests=[err.SUG_OCP_EXPRESS_REDUCE_DISK.format()]) + error('disk', err.EC_OCP_EXPRESS_NOT_ENOUGH_DISK.format(ip=ip, disk=mount_path, need=Capacity(disk_needed), avail=Capacity(disk_info[mount_path]['avail'])), suggests=[err.SUG_OCP_EXPRESS_REDUCE_DISK.format()]) else: stdio.warn(err.WC_OCP_EXPRESS_FAILED_TO_GET_DISK_INFO.format(ip)) plugin_context.set_variable('start_env', env) diff --git a/plugins/ocp-express/4.2.1/start.py b/plugins/ocp-express/4.2.1/start.py index de5f8a8..bdf5857 100644 --- a/plugins/ocp-express/4.2.1/start.py +++ b/plugins/ocp-express/4.2.1/start.py @@ -28,6 +28,7 @@ import sys from copy import deepcopy +from _types import Capacity, CapacityWithB from tool import FileUtil, YamlLoader, ConfigUtil from Crypto import Random @@ -36,6 +37,9 @@ from Crypto.Signature import PKCS1_v1_5 as PKCS1_signature from Crypto.Cipher import PKCS1_OAEP as PKCS1_cipher +from _errno import EC_SQL_EXECUTE_FAILED + + PRI_KEY_FILE = '.ocp-express' PUB_KEY_FILE = '.ocp-express.pub' @@ -47,35 +51,6 @@ from _stdio import SafeStdio -def parse_size(size): - _bytes = 0 - if not isinstance(size, str) or size.isdigit(): - _bytes = int(size) - else: - units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} - match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) - _bytes = int(match.group(1)) * units[match.group(2)] - return _bytes - - -def format_size(size, precision=1): - units = ['B', 'K', 'M', 'G'] - units_num = len(units) - 1 - idx = 0 - if precision: - div = 1024.0 - format = '%.' + str(precision) + 'f%s' - limit = 1024 - else: - div = 1024 - limit = 1024 - format = '%d%s' - while idx < units_num and size >= limit: - size /= div - idx += 1 - return format % (size, units[idx]) - - class Cursor(SafeStdio): def __init__(self, ip, port, user='root', tenant='sys', password='', database=None, stdio=None): @@ -109,6 +84,21 @@ def _connect(self): cursorclass=mysql.cursors.DictCursor) self.cursor = self.db.cursor() + def execute(self, sql, args=None, execute_func=None, raise_exception=False, exc_level='error', stdio=None): + try: + stdio.verbose('execute sql: %s. args: %s' % (sql, args)) + self.cursor.execute(sql, args) + if not execute_func: + return self.cursor + return getattr(self.cursor, execute_func)() + except Exception as e: + getattr(stdio, exc_level)(EC_SQL_EXECUTE_FAILED.format(sql=sql)) + if raise_exception is None: + raise_exception = self._raise_exception + if raise_exception: + stdio.exception('') + raise e + return False def generate_key(client, key_dir, stdio): rsa = RSA.generate(1024) @@ -298,6 +288,7 @@ def start(plugin_context, start_env=None, *args, **kwargs): options = plugin_context.options clients = plugin_context.clients stdio = plugin_context.stdio + added_components = cluster_config.get_deploy_added_components() if not start_env: start_env = prepare_parameters(cluster_config, stdio) @@ -359,9 +350,16 @@ def start(plugin_context, start_env=None, *args, **kwargs): server_ip = connect_info[0] server_port = connect_info[-1] try: - Cursor(ip=server_ip, port=server_port, user=jdbc_username, password=jdbc_password, database=database, stdio=stdio) + ob_cursor = Cursor(ip=server_ip, port=server_port, user=jdbc_username, password=jdbc_password, database=database, stdio=stdio) jdbc_url = 'jdbc:oceanbase://{}:{}/{}'.format(server_ip, server_port, database) connected = True + if 'ocp-express' in added_components: + if ob_cursor.execute("select * from config_properties limit 1", exc_level='verbose'): + if not ob_cursor.execute("update config_properties set `value`=NULL, default_value=NULL where `key`='ocp.version' or `key`='ocp.version.full'", exc_level='verbose'): + stdio.verbose("failed to update 'ocp.version' and 'ocp.version.full' to NULL in config_properties table") + if ob_cursor.execute("select * from user limit 1", exc_level='verbose'): + if not ob_cursor.execute("update user set need_change_password=true where id='100'", exc_level='verbose'): + stdio.verbose("failed to update 'need_change_password' to true in user table") break except: time.sleep(1) @@ -377,8 +375,9 @@ def start(plugin_context, start_env=None, *args, **kwargs): else: public_key_str = "" memory_size = server_config['memory_size'] - jvm_memory_option = "-Xms{0} -Xmx{0}".format(format_size(parse_size(memory_size) * 0.5, 0).lower()) + jvm_memory_option = "-Xms{0} -Xmx{0}".format(str(Capacity(Capacity(memory_size).btyes * 0.5, 0)).lower()) java_bin = server_config['java_bin'] + client.add_env('PATH', '%s/jre/bin:' % server_config['home_path']) cmd = '{java_bin} -jar {jvm_memory_option} -DJDBC_URL={jdbc_url} -DJDBC_USERNAME={jdbc_username}' \ ' -DPUBLIC_KEY={public_key} {home_path}/lib/ocp-express-server.jar --port={port}'.format( java_bin=java_bin, @@ -398,8 +397,12 @@ def start(plugin_context, start_env=None, *args, **kwargs): cmd += ' --bootstrap --progress-log={}'.format(os.path.join(log_dir, 'bootstrap.log')) for key in server_config: if key not in exclude_keys and key in config_mapper: + if key == 'logging_file_total_size_cap': + cmd += ' --with-property=ocp.logging.file.total.size.cap:{}'.format(CapacityWithB(server_config[key])) + continue cmd += ' --with-property={}:{}'.format(config_mapper[key], server_config[key]) - + elif not bootstrap_flag: + cmd += ' --bootstrap --progress-log={}'.format(os.path.join(log_dir, 'bootstrap.log')) data = { "cluster": { "name": server_config["cluster_name"], @@ -436,7 +439,7 @@ def start(plugin_context, start_env=None, *args, **kwargs): stdio.start_loading("ocp-express program health check") failed = [] servers = cluster_config.servers - count = 200 + count = 300 while servers and count: count -= 1 tmp_servers = [] @@ -476,6 +479,7 @@ def start(plugin_context, start_env=None, *args, **kwargs): return plugin_context.return_false() else: stdio.stop_loading('succeed') + plugin_context.set_variable('start_env', start_env) plugin_context.return_true(need_bootstrap=True) return False diff --git a/plugins/ocp-express/4.2.1/start_check.py b/plugins/ocp-express/4.2.1/start_check.py index d671b4d..0d2914a 100644 --- a/plugins/ocp-express/4.2.1/start_check.py +++ b/plugins/ocp-express/4.2.1/start_check.py @@ -26,6 +26,7 @@ from copy import deepcopy from _rpm import Version import _errno as err +from _types import Capacity success = True @@ -53,35 +54,6 @@ def password_check(passwd): return True if re.match(pattern, passwd) else False -def parse_size(size): - _bytes = 0 - if not isinstance(size, str) or size.isdigit(): - _bytes = int(size) - else: - units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} - match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) - _bytes = int(match.group(1)) * units[match.group(2)] - return _bytes - - -def format_size(size, precision=1): - units = ['B', 'K', 'M', 'G'] - units_num = len(units) - 1 - idx = 0 - if precision: - div = 1024.0 - format = '%.' + str(precision) + 'f%s' - limit = 1024 - else: - div = 1024 - limit = 1024 - format = '%d%s' - while idx < units_num and size >= limit: - size /= div - idx += 1 - return format % (size, units[idx]) - - def get_mount_path(disk, _path): _mount_path = '/' for p in disk: @@ -234,7 +206,8 @@ def prepare_parameters(cluster_config, stdio): return env -def start_check(plugin_context, init_check_status=False, work_dir_check=False, work_dir_empty_check=True, strict_check=False, precheck=False, *args, **kwargs): +def start_check(plugin_context, init_check_status=False, work_dir_check=False, work_dir_empty_check=True, strict_check=False, precheck=False, + java_check=True, *args, **kwargs): def check_pass(item): status = check_status[server] if status[item].status == err.CheckStatus.WAIT: @@ -423,23 +396,25 @@ def critical(item, error, suggests=[]): check_pass('port') # java version check - for server in cluster_config.servers: - client = clients[server] - server_config = env[server] - java_bin = server_config['java_bin'] - ret = client.execute_command('{} -version'.format(java_bin)) - if not ret: - critical('java', err.EC_OCP_EXPRESS_JAVA_NOT_FOUND.format(server=server), [err.SUG_OCP_EXPRESS_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0')]) - continue - version_pattern = r'version\s+\"(\d+\.\d+.\d+)' - found = re.search(version_pattern, ret.stdout) or re.search(version_pattern, ret.stderr) - if not found: - error('java', err.EC_OCP_EXPRESS_JAVA_VERSION_ERROR.format(server=server, version='1.8.0'), [err.SUG_OCP_EXPRESS_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0'),]) - continue - java_major_version = found.group(1) - if Version(java_major_version) != Version('1.8.0'): - critical('java', err.EC_OCP_EXPRESS_JAVA_VERSION_ERROR.format(server=server, version='1.8.0'), [err.SUG_OCP_EXPRESS_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0'),]) - continue + if java_check: + for server in cluster_config.servers: + client = clients[server] + server_config = env[server] + java_bin = server_config['java_bin'] + client.add_env('PATH', '%s/jre/bin:' % server_config['home_path']) + ret = client.execute_command('{} -version'.format(java_bin)) + if not ret: + critical('java', err.EC_OCP_EXPRESS_JAVA_NOT_FOUND.format(server=server), [err.SUG_OCP_EXPRESS_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0')]) + continue + version_pattern = r'version\s+\"(\d+\.\d+.\d+)' + found = re.search(version_pattern, ret.stdout) or re.search(version_pattern, ret.stderr) + if not found: + error('java', err.EC_OCP_EXPRESS_JAVA_VERSION_ERROR.format(server=server, version='1.8.0'), [err.SUG_OCP_EXPRESS_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0'),]) + continue + java_major_version = found.group(1) + if Version(java_major_version) != Version('1.8.0'): + critical('java', err.EC_OCP_EXPRESS_JAVA_VERSION_ERROR.format(server=server, version='1.8.0'), [err.SUG_OCP_EXPRESS_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0'),]) + continue servers_memory = {} servers_disk = {} @@ -449,12 +424,12 @@ def critical(item, error, suggests=[]): for server in cluster_config.servers: client = clients[server] server_config = env[server] - memory_size = parse_size(server_config['memory_size']) + memory_size = Capacity(server_config['memory_size']).btyes if server_config.get('log_dir'): log_dir = server_config['log_dir'] else: log_dir = os.path.join(server_config['home_path'], 'log') - need_size = parse_size(server_config['logging_file_total_size_cap']) + need_size = Capacity(server_config['logging_file_total_size_cap']).btyes ip = server.ip if ip not in servers_client: servers_client[ip] = client @@ -496,17 +471,17 @@ def critical(item, error, suggests=[]): for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): if k in memory_key_map: key = memory_key_map[k] - server_memory_stats[key] = parse_size(str(v)) + server_memory_stats[key] = Capacity(str(v)).btyes mem_suggests = [err.SUG_OCP_EXPRESS_REDUCE_MEM.format()] if memory_needed * 0.5 > server_memory_stats['available']: for server in ip_servers[ip]: - error('mem', err.EC_OCP_EXPRESS_NOT_ENOUGH_MEMORY_AVAILABLE.format(ip=ip, available=format_size(server_memory_stats['available']), need=format_size(memory_needed)), suggests=mem_suggests) + error('mem', err.EC_OCP_EXPRESS_NOT_ENOUGH_MEMORY_AVAILABLE.format(ip=ip, available=Capacity(server_memory_stats['available']), need=Capacity(memory_needed)), suggests=mem_suggests) elif memory_needed > server_memory_stats['free'] + server_memory_stats['buffers'] + server_memory_stats['cached']: for server in ip_servers[ip]: - error('mem', err.EC_OCP_EXPRESS_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=format_size(server_memory_stats['free']), cached=format_size(server_memory_stats['buffers'] + server_memory_stats['cached']), need=format_size(memory_needed)), suggests=mem_suggests) + error('mem', err.EC_OCP_EXPRESS_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=Capacity(server_memory_stats['free']), cached=Capacity(server_memory_stats['buffers'] + server_memory_stats['cached']), need=Capacity(memory_needed)), suggests=mem_suggests) elif memory_needed > server_memory_stats['free']: for server in ip_servers[ip]: - alert('mem', err.EC_OCP_EXPRESS_NOT_ENOUGH_MEMORY.format(ip=ip, free=format_size(server_memory_stats['free']), need=format_size(memory_needed)), suggests=mem_suggests) + alert('mem', err.EC_OCP_EXPRESS_NOT_ENOUGH_MEMORY.format(ip=ip, free=Capacity(server_memory_stats['free']), need=Capacity(memory_needed)), suggests=mem_suggests) # disk check for ip in servers_disk: client = servers_client[ip] @@ -517,7 +492,7 @@ def critical(item, error, suggests=[]): mount_path = get_mount_path(disk_info, path) if disk_needed > disk_info[mount_path]['avail']: for server in ip_servers[ip]: - error('disk', err.EC_OCP_EXPRESS_NOT_ENOUGH_DISK.format(ip=ip, disk=mount_path, need=format_size(disk_needed), avail=format_size(disk_info[mount_path]['avail'])), suggests=[err.SUG_OCP_EXPRESS_REDUCE_DISK.format()]) + error('disk', err.EC_OCP_EXPRESS_NOT_ENOUGH_DISK.format(ip=ip, disk=mount_path, need=Capacity(disk_needed), avail=Capacity(disk_info[mount_path]['avail'])), suggests=[err.SUG_OCP_EXPRESS_REDUCE_DISK.format()]) else: stdio.warn(err.WC_OCP_EXPRESS_FAILED_TO_GET_DISK_INFO.format(ip)) @@ -526,7 +501,7 @@ def critical(item, error, suggests=[]): server_config = env[server] admin_passwd = server_config.get('admin_passwd') if not admin_passwd or not password_check(admin_passwd): - error('admin_passwd', err.EC_OCP_EXPRESS_ADMIN_PASSWD_ERROR.format(ip=server.ip, current=admin_passwd), suggests=[err.SUG_OCP_EXPRESS_EDIT_ADMIN_PASSWD_ERROR.format()]) + error('admin_passwd', err.EC_COMPONENT_PASSWD_ERROR.format(ip=server.ip, component='ocp-express', key='admin_passwd', rule='Must be 8 to 32 characters in length, containing at least 3 types from digits, lowercase letters, uppercase letters and the following special characters: ~!@#%^&*_-+=|(){{}}[]:;,.?/'), suggests=[err.SUG_OCP_EXPRESS_EDIT_ADMIN_PASSWD.format()]) plugin_context.set_variable('start_env', env) diff --git a/plugins/ocp-express/4.2.2/start.py b/plugins/ocp-express/4.2.2/start.py new file mode 100644 index 0000000..c236551 --- /dev/null +++ b/plugins/ocp-express/4.2.2/start.py @@ -0,0 +1,469 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function + +import json +import os +import re +import time +import base64 +import sys +from copy import deepcopy + +from tool import FileUtil, YamlLoader, ConfigUtil + +from Crypto import Random +from Crypto.Hash import SHA +from Crypto.PublicKey import RSA +from Crypto.Signature import PKCS1_v1_5 as PKCS1_signature +from Crypto.Cipher import PKCS1_OAEP as PKCS1_cipher +from _types import Capacity, CapacityWithB + +PRI_KEY_FILE = '.ocp-express' +PUB_KEY_FILE = '.ocp-express.pub' + + +if sys.version_info.major == 2: + import MySQLdb as mysql +else: + import pymysql as mysql +from _stdio import SafeStdio + + +class Cursor(SafeStdio): + + def __init__(self, ip, port, user='root', tenant='sys', password='', database=None, stdio=None): + self.stdio = stdio + self.ip = ip + self.port = port + self._user = user + self.tenant = tenant + self.password = password + self.database = database + self.cursor = None + self.db = None + self._connect() + + @property + def user(self): + if "@" in self._user: + return self._user + if self.tenant: + return "{}@{}".format(self._user, self.tenant) + else: + return self._user + + def _connect(self): + self.stdio.verbose('connect %s -P%s -u%s -p%s' % (self.ip, self.port, self.user, self.password)) + if sys.version_info.major == 2: + self.db = mysql.connect(host=self.ip, user=self.user, port=int(self.port), passwd=str(self.password), database=self.database) + self.cursor = self.db.cursor(cursorclass=mysql.cursors.DictCursor) + else: + self.db = mysql.connect(host=self.ip, user=self.user, port=int(self.port), password=str(self.password), database=self.database, + cursorclass=mysql.cursors.DictCursor) + self.cursor = self.db.cursor() + + +def generate_key(client, key_dir, stdio): + rsa = RSA.generate(1024) + private_key = rsa + public_key = rsa.publickey() + client.write_file(private_key.exportKey(pkcs=8), os.path.join(key_dir, PRI_KEY_FILE), mode='wb', stdio=stdio) + client.write_file(public_key.exportKey(pkcs=8), os.path.join(key_dir, PUB_KEY_FILE), mode='wb', stdio=stdio) + return private_key, public_key + + +def get_key(client, key_dir, stdio): + private_key_file = os.path.join(key_dir, PRI_KEY_FILE) + ret = client.execute_command("cat {}".format(private_key_file)) + if not ret: + return generate_key(client, key_dir, stdio) + private_key = RSA.importKey(ret.stdout.strip()) + public_key_file = os.path.join(key_dir, PUB_KEY_FILE) + ret = client.execute_command("cat {}".format(public_key_file)) + if not ret: + return generate_key(client, key_dir, stdio) + public_key = RSA.importKey(ret.stdout.strip()) + return private_key, public_key + + +def get_plain_public_key(public_key): + if isinstance(public_key, RSA.RsaKey): + public_key = public_key.exportKey(pkcs=8).decode() + elif isinstance(public_key, bytes): + public_key = public_key.decode() + public_key = public_key.replace("-----BEGIN PRIVATE KEY-----", "").replace("-----END PRIVATE KEY-----", "").replace("-----BEGIN PUBLIC KEY-----", "").replace("-----END PUBLIC KEY-----", "").replace("\n", "") + return public_key + + +def rsa_private_sign(passwd, private_key): + signer = PKCS1_cipher.new(private_key) + sign = signer.encrypt(passwd.encode("utf-8")) + # digest = SHA.new() + # digest.update(passwd.encode("utf8")) + # sign = signer.sign(digest) + signature = base64.b64encode(sign) + signature = signature.decode('utf-8') + return signature + + +def get_port_socket_inode(client, port, stdio): + port = hex(port)[2:].zfill(4).upper() + cmd = "bash -c 'cat /proc/net/{tcp*,udp*}' | awk -F' ' '{print $2,$10}' | grep '00000000:%s' | awk -F' ' '{print $2}' | uniq" % port + res = client.execute_command(cmd) + if not res or not res.stdout.strip(): + return False + stdio.verbose(res.stdout) + return res.stdout.strip().split('\n') + + +def confirm_port(client, pid, port, stdio): + socket_inodes = get_port_socket_inode(client, port, stdio) + if not socket_inodes: + return False + ret = client.execute_command("ls -l /proc/%s/fd/ |grep -E 'socket:\[(%s)\]'" % (pid, '|'.join(socket_inodes))) + if ret and ret.stdout.strip(): + return True + return False + + +def get_missing_required_parameters(parameters): + results = [] + for key in ["jdbc_url", "jdbc_password", "jdbc_username", "cluster_name", "ob_cluster_id", "root_sys_password", + "server_addresses", "agent_username", "agent_password", "ocp_root_password"]: + if parameters.get(key) is None: + results.append(key) + return results + + +def prepare_parameters(cluster_config, stdio): + # depends config + env = {} + depend_observer = False + depend_info = {} + ob_servers_conf = {} + root_servers = [] + for comp in ["oceanbase", "oceanbase-ce"]: + ob_zones = {} + if comp in cluster_config.depends: + depend_observer = True + observer_globals = cluster_config.get_depend_config(comp) + ocp_meta_keys = [ + "ocp_meta_tenant", "ocp_meta_db", "ocp_meta_username", "ocp_meta_password", "appname", "cluster_id", "root_password", "ocp_root_password" + ] + for key in ocp_meta_keys: + value = observer_globals.get(key) + if value is not None: + depend_info[key] = value + ob_servers = cluster_config.get_depend_servers(comp) + + connect_infos = [] + for ob_server in ob_servers: + ob_servers_conf[ob_server] = ob_server_conf = cluster_config.get_depend_config(comp, ob_server) + connect_infos.append([ob_server.ip, ob_server_conf['mysql_port']]) + zone = ob_server_conf['zone'] + if zone not in ob_zones: + ob_zones[zone] = ob_server + depend_info['connect_infos'] = connect_infos + root_servers = ob_zones.values() + break + for comp in ['obproxy', 'obproxy-ce']: + if comp in cluster_config.depends: + obproxy_servers = cluster_config.get_depend_servers(comp) + obproxy_server = obproxy_servers[0] + obproxy_server_config = cluster_config.get_depend_config(comp, obproxy_server) + depend_info['server_ip'] = obproxy_server.ip + depend_info['mysql_port'] = obproxy_server_config['listen_port'] + break + if 'obagent' in cluster_config.depends: + obagent_servers = cluster_config.get_depend_servers('obagent') + server_addresses = [] + for obagent_server in obagent_servers: + obagent_server_config_without_default = cluster_config.get_depend_config('obagent', obagent_server, with_default=False) + obagent_server_config = cluster_config.get_depend_config('obagent', obagent_server) + username = obagent_server_config['http_basic_auth_user'] + password = obagent_server_config['http_basic_auth_password'] + if 'obagent_username' not in depend_info: + depend_info['obagent_username'] = username + elif depend_info['obagent_username'] != username: + stdio.error('The http basic auth of obagent is inconsistent') + return + if 'obagent_password' not in depend_info: + depend_info['obagent_password'] = password + elif depend_info['obagent_password'] != password: + stdio.error('The http basic auth of obagent is inconsistent') + return + if obagent_server_config_without_default.get('sql_port'): + sql_port = obagent_server_config['sql_port'] + elif ob_servers_conf.get(obagent_server) and ob_servers_conf[obagent_server].get('mysql_port'): + sql_port = ob_servers_conf[obagent_server]['mysql_port'] + else: + continue + if obagent_server_config_without_default.get('rpc_port'): + svr_port = obagent_server_config['rpc_port'] + elif ob_servers_conf.get(obagent_server) and ob_servers_conf[obagent_server].get('rpc_port'): + svr_port = ob_servers_conf[obagent_server]['rpc_port'] + else: + continue + server_addresses.append({ + "address": obagent_server.ip, + "svrPort": svr_port, + "sqlPort": sql_port, + "withRootServer": obagent_server in root_servers, + "agentMgrPort": obagent_server_config.get('mgragent_http_port', 0), + "agentMonPort": obagent_server_config.get('monagent_http_port', 0) + }) + depend_info['server_addresses'] = server_addresses + + for server in cluster_config.servers: + server_config = deepcopy(cluster_config.get_server_conf_with_default(server)) + original_server_config = cluster_config.get_original_server_conf(server) + missed_keys = get_missing_required_parameters(original_server_config) + if missed_keys: + if 'jdbc_url' in missed_keys and depend_observer: + if depend_info.get('server_ip'): + server_config['jdbc_url'] = 'jdbc:oceanbase://{}:{}/{}'.format(depend_info['server_ip'], depend_info['mysql_port'], depend_info['ocp_meta_db']) + else: + server_config['connect_infos'] = depend_info.get('connect_infos') + server_config['ocp_meta_db'] = depend_info.get('ocp_meta_db') + server_config['jdbc_url'] = '' + if 'jdbc_username' in missed_keys and depend_observer: + server_config['jdbc_username'] = "{}@{}".format(depend_info['ocp_meta_username'], + depend_info.get('ocp_meta_tenant', {}).get("tenant_name")) + depends_key_maps = { + "jdbc_password": "ocp_meta_password", + "cluster_name": "appname", + "ob_cluster_id": "cluster_id", + "root_sys_password": "root_password", + "agent_username": "obagent_username", + "agent_password": "obagent_password", + "server_addresses": "server_addresses", + "ocp_root_password": "ocp_root_password", + "ocp_meta_tenant": "ocp_meta_tenant" + } + for key in depends_key_maps: + if key in missed_keys: + if depend_info.get(depends_key_maps[key]) is not None: + server_config[key] = depend_info[depends_key_maps[key]] + env[server] = server_config + return env + + +def start(plugin_context, start_env=None, *args, **kwargs): + cluster_config = plugin_context.cluster_config + options = plugin_context.options + clients = plugin_context.clients + stdio = plugin_context.stdio + + if not start_env: + start_env = prepare_parameters(cluster_config, stdio) + if not start_env: + return plugin_context.return_false() + + + + exclude_keys = ["home_path", "port", "jdbc_url", "jdbc_username", "jdbc_password", "cluster_name", "ob_cluster_id", + "root_sys_password", "server_addresses", "agent_username", "agent_password", "memory_size", "ocp_root_password", "ocp_meta_tenant"] + + repository_dir = None + for repository in plugin_context.repositories: + if repository.name == cluster_config.name: + repository_dir = repository.repository_dir + break + with FileUtil.open(os.path.join(repository_dir, 'conf/ocp-express-config-mapper.yaml')) as f: + data = YamlLoader(stdio=stdio).load(f) + config_mapper = data.get('config_mapper', {}) + server_pid = {} + success = True + stdio.start_loading("Start ocp-express") + for server in cluster_config.servers: + client = clients[server] + server_config = start_env[server] + home_path = server_config['home_path'] + jdbc_url = server_config['jdbc_url'] + jdbc_username = server_config['jdbc_username'] + jdbc_password = server_config['jdbc_password'] + port = server_config['port'] + pid_path = os.path.join(home_path, 'run/ocp-express.pid') + pids = client.execute_command("cat %s" % pid_path).stdout.strip() + bootstrap_flag = client.execute_command('ls %s' % os.path.join(home_path, '.bootstrapped')) + if pids and all([client.execute_command('ls /proc/%s' % pid) for pid in pids.split('\n')]): + server_pid[server] = pids + continue + if getattr(options, 'without_parameter', False) and client.execute_command('ls %s' % bootstrap_flag): + use_parameter = False + else: + use_parameter = True + # check meta db connect before start + if jdbc_url: + matched = re.match(r"^jdbc:\S+://(\S+?)(|:\d+)/(\S+)", jdbc_url) + if not matched: + stdio.error("Invalid jdbc url: %s" % jdbc_url) + return + ip = matched.group(1) + sql_port = matched.group(2)[1:] + database = matched.group(3) + connect_infos = [[ip, sql_port]] + else: + connect_infos = server_config.get('connect_infos', '') + database = server_config.get('ocp_meta_db', '') + connected = False + retries = 300 + while not connected and retries: + for connect_info in connect_infos: + retries -= 1 + server_ip = connect_info[0] + server_port = connect_info[-1] + try: + Cursor(ip=server_ip, port=server_port, user=jdbc_username, password=jdbc_password, database=database, stdio=stdio) + jdbc_url = 'jdbc:oceanbase://{}:{}/{}'.format(server_ip, server_port, database) + connected = True + break + except: + time.sleep(1) + if not connected: + success = False + stdio.error("{}: failed to connect meta db".format(server)) + continue + + if server_config.get('encrypt_password', False): + private_key, public_key = get_key(client, os.path.join(home_path, 'conf'), stdio) + public_key_str = get_plain_public_key(public_key) + jdbc_password = rsa_private_sign(jdbc_password, private_key) + else: + public_key_str = "" + memory_size = server_config['memory_size'] + jvm_memory_option = "-Xms{0} -Xmx{0}".format(str(Capacity(Capacity(memory_size).btyes * 0.5, 0)).lower()) + java_bin = server_config['java_bin'] + client.add_env('PATH', '%s/jre/bin:' % server_config['home_path']) + cmd = '{java_bin} -jar {jvm_memory_option} -DJDBC_URL={jdbc_url} -DJDBC_USERNAME={jdbc_username}' \ + ' -DPUBLIC_KEY={public_key} {home_path}/lib/ocp-express-server.jar --port={port}'.format( + java_bin=java_bin, + home_path=home_path, + port=port, + jdbc_url=jdbc_url, + jdbc_username=jdbc_username, + public_key=public_key_str, + jvm_memory_option=jvm_memory_option + ) + if "log_dir" not in server_config: + log_dir = os.path.join(home_path, 'log') + else: + log_dir = server_config["log_dir"] + server_config["logging_file_name"] = os.path.join(log_dir, 'ocp-express.log') + if use_parameter: + cmd += ' --bootstrap --progress-log={}'.format(os.path.join(log_dir, 'bootstrap.log')) + for key in server_config: + if key not in exclude_keys and key in config_mapper: + if key == 'logging_file_total_size_cap': + cmd += ' --with-property=ocp.logging.file.total.size.cap:{}'.format(CapacityWithB(server_config[key])) + continue + cmd += ' --with-property={}:{}'.format(config_mapper[key], server_config[key]) + + obCredentials = [] + if server_config.get('ocp_root_password'): + obCredentials = [{ + "tenantName": server_config["ocp_meta_tenant"]["tenant_name"], + "username": "root", + "password": server_config['ocp_root_password'], + }] + data = { + "cluster": { + "name": server_config["cluster_name"], + "obClusterId": server_config["ob_cluster_id"], + "rootSysPassword": server_config["root_sys_password"], + "serverAddresses": server_config["server_addresses"], + "obCredentials": obCredentials if not client.execute_command('ls %s' % bootstrap_flag) else [] + }, + "agentUsername": server_config["agent_username"], + "agentPassword": server_config["agent_password"], + "agentAuthType": "OCP_DIGEST" + } + + admin_passwd = cluster_config.get_global_conf_with_default().get("admin_passwd", '') + + client.add_env('OCP_EXPRESS_INIT_PROPERTIES', json.dumps(data) if client._is_local else ConfigUtil.passwd_format(json.dumps(data)), rewrite=True) + client.add_env('OCP_EXPRESS_ADMIN_PASSWD', admin_passwd if client._is_local else ConfigUtil.passwd_format(admin_passwd), rewrite=True) + client.add_env('JDBC_PASSWORD', jdbc_password if client._is_local else ConfigUtil.passwd_format(jdbc_password), rewrite=True) + + client.execute_command("cd {}; bash -c '{} > /dev/null 2>&1 &'".format(home_path, cmd)) + ret = client.execute_command("ps -aux | grep '%s' | grep -v grep | awk '{print $2}' " % cmd) + if ret: + server_pid[server] = ret.stdout.strip() + if not server_pid[server]: + stdio.error("failed to start {} ocp express".format(server)) + success = False + continue + client.write_file(server_pid[server], os.path.join(home_path, 'run/ocp-express.pid')) + if success: + stdio.stop_loading('succeed') + else: + stdio.stop_loading('fail') + return plugin_context.return_false() + + stdio.start_loading("ocp-express program health check") + failed = [] + servers = cluster_config.servers + count = 200 + while servers and count: + count -= 1 + tmp_servers = [] + for server in servers: + server_config = cluster_config.get_server_conf(server) + client = clients[server] + stdio.verbose('%s program health check' % server) + pids_stat = {} + for pid in server_pid[server].split("\n"): + pids_stat[pid] = None + if not client.execute_command('ls /proc/{}'.format(pid)): + pids_stat[pid] = False + continue + confirm = confirm_port(client, pid, int(server_config["port"]), stdio) + if confirm: + pids_stat[pid] = True + break + if any(pids_stat.values()): + for pid in pids_stat: + if pids_stat[pid]: + stdio.verbose('%s ocp-express[pid: %s] started', server, pid) + continue + if all([stat is False for stat in pids_stat.values()]): + failed.append('failed to start {} ocp-express'.format(server)) + elif count: + tmp_servers.append(server) + stdio.verbose('failed to start %s ocp-express, remaining retries: %d' % (server, count)) + else: + failed.append('failed to start {} ocp-express'.format(server)) + servers = tmp_servers + if servers and count: + time.sleep(3) + if failed: + stdio.stop_loading('failed') + for msg in failed: + stdio.error(msg) + return plugin_context.return_false() + else: + stdio.stop_loading('succeed') + plugin_context.set_variable('start_env', start_env) + plugin_context.return_true(need_bootstrap=True) + + return False + diff --git a/plugins/ocp-express/4.2.2/start_check.py b/plugins/ocp-express/4.2.2/start_check.py new file mode 100644 index 0000000..308200d --- /dev/null +++ b/plugins/ocp-express/4.2.2/start_check.py @@ -0,0 +1,545 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function + +import re +import os + +from copy import deepcopy +from _rpm import Version +import _errno as err + +success = True + + +def get_missing_required_parameters(parameters): + results = [] + for key in ["jdbc_url", "jdbc_password", "jdbc_username", "cluster_name", "ob_cluster_id", "root_sys_password", + "server_addresses", "agent_username", "agent_password", "ocp_root_password", "ocp_meta_tenant"]: + if parameters.get(key) is None: + results.append(key) + return results + + +def get_port_socket_inode(client, port): + port = hex(port)[2:].zfill(4).upper() + cmd = "bash -c 'cat /proc/net/{udp*,tcp*}' | awk -F' ' '{if($4==\"0A\") print $2,$4,$10}' | grep ':%s' | awk -F' ' '{print $3}' | uniq" % port + res = client.execute_command(cmd) + if not res or not res.stdout.strip(): + return False + return res.stdout.strip().split('\n') + + +def password_check(passwd): + pattern = r'''^(?=(.*[a-z]){2,})(?=(.*[A-Z]){2,})(?=(.*\d){2,})(?=(.*[~!@#%^&*_\-+=|(){}\[\]:;,.?/]){2,})[A-Za-z\d~!@#%^&*_\-+=|(){}\[\]:;,.?/]{8,32}$''' + return True if re.match(pattern, passwd) else False + + +def parse_size(size): + _bytes = 0 + if not isinstance(size, str) or size.isdigit(): + _bytes = int(size) + else: + units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} + match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) + _bytes = int(match.group(1)) * units[match.group(2)] + return _bytes + + +def format_size(size, precision=1): + units = ['B', 'K', 'M', 'G'] + units_num = len(units) - 1 + idx = 0 + if precision: + div = 1024.0 + format = '%.' + str(precision) + 'f%s' + limit = 1024 + else: + div = 1024 + limit = 1024 + format = '%d%s' + while idx < units_num and size >= limit: + size /= div + idx += 1 + return format % (size, units[idx]) + + +def get_mount_path(disk, _path): + _mount_path = '/' + for p in disk: + if p in _path: + if len(p) > len(_mount_path): + _mount_path = p + return _mount_path + + +def get_disk_info_by_path(path, client, stdio): + disk_info = {} + ret = client.execute_command('df --block-size=1024 {}'.format(path)) + if ret: + for total, used, avail, puse, path in re.findall(r'(\d+)\s+(\d+)\s+(\d+)\s+(\d+%)\s+(.+)', ret.stdout): + disk_info[path] = {'total': int(total) << 10, 'avail': int(avail) << 10, 'need': 0} + stdio.verbose('get disk info for path {}, total: {} avail: {}'.format(path, disk_info[path]['total'], disk_info[path]['avail'])) + return disk_info + + +def get_disk_info(all_paths, client, stdio): + overview_ret = True + disk_info = get_disk_info_by_path('', client, stdio) + if not disk_info: + overview_ret = False + disk_info = get_disk_info_by_path('/', client, stdio) + if not disk_info: + disk_info['/'] = {'total': 0, 'avail': 0, 'need': 0} + all_path_success = {} + for path in all_paths: + all_path_success[path] = False + cur_path = path + while cur_path not in disk_info: + disk_info_for_current_path = get_disk_info_by_path(cur_path, client, stdio) + if disk_info_for_current_path: + disk_info.update(disk_info_for_current_path) + all_path_success[path] = True + break + else: + cur_path = os.path.dirname(cur_path) + if overview_ret or all(all_path_success.values()): + return disk_info + + +def prepare_parameters(cluster_config, stdio): + # depends config + env = {} + depend_observer = False + depend_info = {} + ob_servers_conf = {} + root_servers = [] + for comp in ["oceanbase", "oceanbase-ce"]: + ob_zones = {} + if comp in cluster_config.depends: + depend_observer = True + observer_globals = cluster_config.get_depend_config(comp) + ocp_meta_keys = [ + "ocp_meta_tenant", "ocp_meta_db", "ocp_meta_username", "ocp_meta_password", "appname", "cluster_id", "root_password", "ocp_root_password" + ] + for key in ocp_meta_keys: + value = observer_globals.get(key) + if value is not None: + depend_info[key] = value + ob_servers = cluster_config.get_depend_servers(comp) + connect_infos = [] + for ob_server in ob_servers: + ob_servers_conf[ob_server] = ob_server_conf = cluster_config.get_depend_config(comp, ob_server) + connect_infos.append([ob_server.ip, ob_server_conf['mysql_port']]) + zone = ob_server_conf['zone'] + if zone not in ob_zones: + ob_zones[zone] = ob_server + depend_info['connect_infos'] = connect_infos + root_servers = ob_zones.values() + break + for comp in ['obproxy', 'obproxy-ce']: + if comp in cluster_config.depends: + obproxy_servers = cluster_config.get_depend_servers(comp) + obproxy_server = obproxy_servers[0] + obproxy_server_config = cluster_config.get_depend_config(comp, obproxy_server) + depend_info['server_ip'] = obproxy_server.ip + depend_info['mysql_port'] = obproxy_server_config['listen_port'] + break + if 'obagent' in cluster_config.depends: + obagent_servers = cluster_config.get_depend_servers('obagent') + server_addresses = [] + for obagent_server in obagent_servers: + obagent_server_config_without_default = cluster_config.get_depend_config('obagent', obagent_server, with_default=False) + obagent_server_config = cluster_config.get_depend_config('obagent', obagent_server) + username = obagent_server_config['http_basic_auth_user'] + password = obagent_server_config['http_basic_auth_password'] + if 'obagent_username' not in depend_info: + depend_info['obagent_username'] = username + elif depend_info['obagent_username'] != username: + stdio.error('The http basic auth of obagent is inconsistent') + return + if 'obagent_password' not in depend_info: + depend_info['obagent_password'] = password + elif depend_info['obagent_password'] != password: + stdio.error('The http basic auth of obagent is inconsistent') + return + if obagent_server_config_without_default.get('sql_port'): + sql_port = obagent_server_config['sql_port'] + elif ob_servers_conf.get(obagent_server) and ob_servers_conf[obagent_server].get('mysql_port'): + sql_port = ob_servers_conf[obagent_server]['mysql_port'] + else: + continue + if obagent_server_config_without_default.get('rpc_port'): + svr_port = obagent_server_config['rpc_port'] + elif ob_servers_conf.get(obagent_server) and ob_servers_conf[obagent_server].get('rpc_port'): + svr_port = ob_servers_conf[obagent_server]['rpc_port'] + else: + continue + server_addresses.append({ + "address": obagent_server.ip, + "svrPort": svr_port, + "sqlPort": sql_port, + "withRootServer": obagent_server in root_servers, + "agentMgrPort": obagent_server_config.get('mgragent_http_port', 0), + "agentMonPort": obagent_server_config.get('monagent_http_port', 0) + }) + depend_info['server_addresses'] = server_addresses + + for server in cluster_config.servers: + server_config = deepcopy(cluster_config.get_server_conf_with_default(server)) + original_server_config = cluster_config.get_original_server_conf(server) + missed_keys = get_missing_required_parameters(original_server_config) + if missed_keys: + if 'jdbc_url' in missed_keys and depend_observer: + if depend_info.get('server_ip'): + server_config['jdbc_url'] = 'jdbc:oceanbase://{}:{}/{}'.format(depend_info['server_ip'], depend_info['mysql_port'], depend_info['ocp_meta_db']) + else: + server_config['connect_infos'] = depend_info.get('connect_infos') + server_config['ocp_meta_db'] = depend_info.get('ocp_meta_db') + server_config['jdbc_url'] = '' + if 'jdbc_username' in missed_keys and depend_observer: + server_config['jdbc_username'] = "{}@{}".format(depend_info['ocp_meta_username'], depend_info.get('ocp_meta_tenant', {}).get("tenant_name")) + depends_key_maps = { + "jdbc_password": "ocp_meta_password", + "cluster_name": "appname", + "ob_cluster_id": "cluster_id", + "root_sys_password": "root_password", + "agent_username": "obagent_username", + "agent_password": "obagent_password", + "server_addresses": "server_addresses", + "ocp_root_password": "ocp_root_password", + "ocp_meta_tenant": "ocp_meta_tenant" + } + for key in depends_key_maps: + if key in missed_keys: + if depend_info.get(depends_key_maps[key]) is not None: + server_config[key] = depend_info[depends_key_maps[key]] + env[server] = server_config + return env + + +def start_check(plugin_context, init_check_status=False, work_dir_check=False, work_dir_empty_check=True, strict_check=False, precheck=False, + java_check=True, *args, **kwargs): + def check_pass(item): + status = check_status[server] + if status[item].status == err.CheckStatus.WAIT: + status[item].status = err.CheckStatus.PASS + def check_fail(item, error, suggests=[]): + status = check_status[server][item] + if status.status == err.CheckStatus.WAIT: + status.error = error + status.suggests = suggests + status.status = err.CheckStatus.FAIL + def wait_2_pass(): + status = check_status[server] + for item in status: + check_pass(item) + def alert(item, error, suggests=[]): + global success + if strict_check: + success = False + check_fail(item, error, suggests) + stdio.error(error) + else: + stdio.warn(error) + def error(item, _error, suggests=[]): + global success + if plugin_context.dev_mode: + stdio.warn(_error) + else: + success = False + check_fail(item, _error, suggests) + stdio.error(_error) + def critical(item, error, suggests=[]): + global success + success = False + check_fail(item, error, suggests) + stdio.error(error) + + cluster_config = plugin_context.cluster_config + option = plugin_context.options + clients = plugin_context.clients + stdio = plugin_context.stdio + global success + success = True + + check_status = {} + plugin_context.set_variable('start_check_status', check_status) + for server in cluster_config.servers: + check_status[server] = { + 'port': err.CheckStatus(), + 'java': err.CheckStatus(), + 'disk': err.CheckStatus(), + 'mem': err.CheckStatus(), + 'oceanbase version': err.CheckStatus(), + 'obagent version': err.CheckStatus(), + 'admin_passwd': err.CheckStatus(), + } + if work_dir_check: + check_status[server]['dir'] = err.CheckStatus() + if init_check_status: + return plugin_context.return_true(start_check_status=check_status) + + stdio.start_loading('Check before start ocp-express') + env = prepare_parameters(cluster_config, stdio) + if not env: + return plugin_context.return_false() + versions_check = { + "oceanbase version": { + 'comps': ['oceanbase', 'oceanbase-ce'], + 'min_version': Version('4.0') + }, + "obagent version": { + 'comps': ['obagent'], + 'min_version': Version('4.2.1') + } + } + repo_versions = {} + for repository in plugin_context.repositories: + repo_versions[repository.name] = repository.version + + for check_item in versions_check: + for comp in versions_check[check_item]['comps']: + if comp not in cluster_config.depends: + continue + depend_comp_version = repo_versions.get(comp) + if depend_comp_version is None: + stdio.verbose('failed to get {} version, skip version check'.format(comp)) + continue + min_version = versions_check[check_item]['min_version'] + if depend_comp_version < min_version: + critical(check_item, err.EC_OCP_EXPRESS_DEPENDS_COMP_VERSION.format(ocp_express_version=cluster_config.version, comp=comp, comp_version=min_version)) + + server_port = {} + servers_dirs = {} + servers_check_dirs = {} + for server in cluster_config.servers: + client = clients[server] + server_config = env[server] + missed_keys = get_missing_required_parameters(server_config) + if missed_keys: + stdio.error(err.EC_NEED_CONFIG.format(server=server, component=cluster_config.name, miss_keys=missed_keys)) + success = False + continue + home_path = server_config['home_path'] + if not precheck: + remote_pid_path = '%s/run/ocp-express.pid' % home_path + remote_pid = client.execute_command('cat %s' % remote_pid_path).stdout.strip() + if remote_pid: + if client.execute_command('ls /proc/%s' % remote_pid): + stdio.verbose('%s is running, skip' % server) + wait_2_pass() + continue + + if work_dir_check: + ip = server.ip + stdio.verbose('%s dir check' % server) + if ip not in servers_dirs: + servers_dirs[ip] = {} + servers_check_dirs[ip] = {} + dirs = servers_dirs[ip] + check_dirs = servers_check_dirs[ip] + original_server_conf = cluster_config.get_server_conf(server) + + keys = ['home_path', 'log_dir'] + for key in keys: + path = server_config.get(key) + suggests = [err.SUG_CONFIG_CONFLICT_DIR.format(key=key, server=server)] + if path in dirs and dirs[path]: + critical('dir', err.EC_CONFIG_CONFLICT_DIR.format(server1=server, path=path, server2=dirs[path]['server'], key=dirs[path]['key']), suggests) + dirs[path] = { + 'server': server, + 'key': key, + } + if key not in original_server_conf: + continue + empty_check = work_dir_empty_check + while True: + if path in check_dirs: + if check_dirs[path] != True: + critical('dir', check_dirs[path], suggests) + break + + if client.execute_command('bash -c "[ -a %s ]"' % path): + is_dir = client.execute_command('[ -d {} ]'.format(path)) + has_write_permission = client.execute_command('[ -w {} ]'.format(path)) + if is_dir and has_write_permission: + if empty_check: + ret = client.execute_command('ls %s' % path) + if not ret or ret.stdout.strip(): + check_dirs[path] = err.EC_FAIL_TO_INIT_PATH.format(server=server, key=key, msg=err.InitDirFailedErrorMessage.NOT_EMPTY.format(path=path)) + else: + check_dirs[path] = True + else: + check_dirs[path] = True + else: + if not is_dir: + check_dirs[path] = err.EC_FAIL_TO_INIT_PATH.format(server=server, key=key, msg=err.InitDirFailedErrorMessage.NOT_DIR.format(path=path)) + else: + check_dirs[path] = err.EC_FAIL_TO_INIT_PATH.format(server=server, key=key, msg=err.InitDirFailedErrorMessage.PERMISSION_DENIED.format(path=path)) + else: + path = os.path.dirname(path) + empty_check = False + + port = server_config['port'] + ip = server.ip + if ip not in server_port: + server_port[ip] = {} + ports = server_port[ip] + if port in server_port[ip]: + critical( + 'port', + err.EC_CONFIG_CONFLICT_PORT.format(server1=server, port=port, server2=ports[port]['server'], + key=ports[port]['key']), + [err.SUG_PORT_CONFLICTS.format()] + ) + continue + ports[port] = { + 'server': server, + 'key': 'port' + } + if get_port_socket_inode(client, port): + critical( + 'port', + err.EC_CONFLICT_PORT.format(server=ip, port=port), + [err.SUG_USE_OTHER_PORT.format()] + ) + continue + check_pass('port') + + # java version check + if java_check: + for server in cluster_config.servers: + client = clients[server] + server_config = env[server] + java_bin = server_config['java_bin'] + client.add_env('PATH', '%s/jre/bin:' % server_config['home_path']) + ret = client.execute_command('{} -version'.format(java_bin)) + if not ret: + critical('java', err.EC_OCP_EXPRESS_JAVA_NOT_FOUND.format(server=server), [err.SUG_OCP_EXPRESS_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0')]) + continue + version_pattern = r'version\s+\"(\d+\.\d+.\d+)' + found = re.search(version_pattern, ret.stdout) or re.search(version_pattern, ret.stderr) + if not found: + error('java', err.EC_OCP_EXPRESS_JAVA_VERSION_ERROR.format(server=server, version='1.8.0'), [err.SUG_OCP_EXPRESS_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0'),]) + continue + java_major_version = found.group(1) + if Version(java_major_version) != Version('1.8.0'): + critical('java', err.EC_OCP_EXPRESS_JAVA_VERSION_ERROR.format(server=server, version='1.8.0'), [err.SUG_OCP_EXPRESS_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0'),]) + continue + + servers_memory = {} + servers_disk = {} + servers_client = {} + ip_servers = {} + + for server in cluster_config.servers: + client = clients[server] + server_config = env[server] + memory_size = parse_size(server_config['memory_size']) + if server_config.get('log_dir'): + log_dir = server_config['log_dir'] + else: + log_dir = os.path.join(server_config['home_path'], 'log') + need_size = parse_size(server_config['logging_file_total_size_cap']) + ip = server.ip + if ip not in servers_client: + servers_client[ip] = client + if ip not in servers_memory: + servers_memory[ip] = { + 'need': memory_size, + 'server_num': 1 + } + else: + servers_memory[ip]['need'] += memory_size + servers_memory[ip]['server_num'] += 1 + if ip not in servers_disk: + servers_disk[ip] = {} + if log_dir not in servers_disk[ip]: + servers_disk[ip][log_dir] = need_size + else: + servers_disk[ip][log_dir] += need_size + if ip not in ip_servers: + ip_servers[ip] = [server] + else: + ip_servers[ip].append(server) + # memory check + for ip in servers_memory: + client = servers_client[ip] + memory_needed = servers_memory[ip]['need'] + ret = client.execute_command('cat /proc/meminfo') + if ret: + server_memory_stats = {} + memory_key_map = { + 'MemTotal': 'total', + 'MemFree': 'free', + 'MemAvailable': 'available', + 'Buffers': 'buffers', + 'Cached': 'cached' + } + for key in memory_key_map: + server_memory_stats[memory_key_map[key]] = 0 + + for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): + if k in memory_key_map: + key = memory_key_map[k] + server_memory_stats[key] = parse_size(str(v)) + mem_suggests = [err.SUG_OCP_EXPRESS_REDUCE_MEM.format()] + if memory_needed * 0.5 > server_memory_stats['available']: + for server in ip_servers[ip]: + error('mem', err.EC_OCP_EXPRESS_NOT_ENOUGH_MEMORY_AVAILABLE.format(ip=ip, available=format_size(server_memory_stats['available']), need=format_size(memory_needed)), suggests=mem_suggests) + elif memory_needed > server_memory_stats['free'] + server_memory_stats['buffers'] + server_memory_stats['cached']: + for server in ip_servers[ip]: + error('mem', err.EC_OCP_EXPRESS_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=format_size(server_memory_stats['free']), cached=format_size(server_memory_stats['buffers'] + server_memory_stats['cached']), need=format_size(memory_needed)), suggests=mem_suggests) + elif memory_needed > server_memory_stats['free']: + for server in ip_servers[ip]: + alert('mem', err.EC_OCP_EXPRESS_NOT_ENOUGH_MEMORY.format(ip=ip, free=format_size(server_memory_stats['free']), need=format_size(memory_needed)), suggests=mem_suggests) + # disk check + for ip in servers_disk: + client = servers_client[ip] + disk_info = get_disk_info(all_paths=servers_disk[ip], client=client, stdio=stdio) + if disk_info: + for path in servers_disk[ip]: + disk_needed = servers_disk[ip][path] + mount_path = get_mount_path(disk_info, path) + if disk_needed > disk_info[mount_path]['avail']: + for server in ip_servers[ip]: + error('disk', err.EC_OCP_EXPRESS_NOT_ENOUGH_DISK.format(ip=ip, disk=mount_path, need=format_size(disk_needed), avail=format_size(disk_info[mount_path]['avail'])), suggests=[err.SUG_OCP_EXPRESS_REDUCE_DISK.format()]) + else: + stdio.warn(err.WC_OCP_EXPRESS_FAILED_TO_GET_DISK_INFO.format(ip)) + + # admin_passwd check + for server in cluster_config.servers: + server_config = env[server] + admin_passwd = server_config.get('admin_passwd') + if not admin_passwd or not password_check(admin_passwd): + error('admin_passwd', err.EC_COMPONENT_PASSWD_ERROR.format(ip=server.ip, component='ocp-express', key='admin_passwd', rule='Must be 8 to 32 characters in length, containing at least 3 types from digits, lowercase letters, uppercase letters and the following special characters: ~!@#%^&*_-+=|(){{}}[]:;,.?/'), suggests=[err.SUG_OCP_EXPRESS_EDIT_ADMIN_PASSWD.format()]) + + plugin_context.set_variable('start_env', env) + + for server in cluster_config.servers: + wait_2_pass() + + if success: + stdio.stop_loading('succeed') + plugin_context.return_true() + else: + stdio.stop_loading('fail') diff --git a/plugins/ocp-server/4.2.1/bootstrap.py b/plugins/ocp-server/4.2.1/bootstrap.py index 3b88250..dccb653 100644 --- a/plugins/ocp-server/4.2.1/bootstrap.py +++ b/plugins/ocp-server/4.2.1/bootstrap.py @@ -20,6 +20,16 @@ from __future__ import absolute_import, division, print_function +import os -def bootstrap(plugin_context, *args, **kwargs): - return True + +def bootstrap(plugin_context, start_env=None, *args, **kwargs): + if not start_env: + raise Exception("start env is needed") + clients = plugin_context.clients + for server in start_env: + server_config = start_env[server] + bootstrap_flag = os.path.join(server_config['home_path'], '.bootstrapped') + client = clients[server] + client.execute_command('touch %s' % bootstrap_flag) + return plugin_context.return_true() diff --git a/plugins/ocp-server/4.2.1/connect.py b/plugins/ocp-server/4.2.1/connect.py index 232f9b1..96299ef 100644 --- a/plugins/ocp-server/4.2.1/connect.py +++ b/plugins/ocp-server/4.2.1/connect.py @@ -39,16 +39,17 @@ def __init__(self, code, content): def __bool__(self): return self.code == 200 - def __init__(self, ip, port, username=None, password=None): + def __init__(self, ip, port, username=None, password=None, component_name=None, base_url=None): + self.auth = None self.ip = ip self.port = port self.username = username self.password = password - self.url_prefix = "http://{ip}:{port}/".format(ip=self.ip, port=self.port) + self.url_prefix = "http://{ip}:{port}".format(ip=self.ip, port=self.port) if not base_url else base_url.strip('/') + self.component_name = component_name if self.username: self.auth = HTTPBasicAuth(username=username, password=password) - else: - self.auth = None + def status(self, stdio=None): ocp_status_ok = False @@ -58,7 +59,7 @@ def status(self, stdio=None): while time.time() - now < check_wait_time: stdio.verbose("query ocp to check...") count += 1 - resp = self._request('GET', 'api/v2/time', stdio=stdio) + resp = self._request('GET', '/api/v2/time', stdio=stdio) try: if resp.code == 200 or count >= 10: ocp_status_ok = True @@ -74,29 +75,59 @@ def status(self, stdio=None): return True def info(self, stdio=None): - resp = self._request('GET', 'api/v2/info', stdio=stdio) + resp = self._request('GET', '/api/v2/info', stdio=stdio) + if resp.code == 200: + return resp.content + + def take_over_precheck(self, data, stdio=None): + resp = self._request('POST', '/api/v2/ob/clusters/takeOverPreCheck', data=data, stdio=stdio) + if resp.code == 200: + return resp.content + + def get_host_types(self, stdio=None): + resp = self._request('GET', '/api/v2/compute/hostTypes', stdio=stdio) if resp.code == 200: return resp.content - def task_over_precheck(self, data, stdio=None): - resp = self._request('POST', 'api/v2/ob/clusters/takeOverPreCheck', data=data, stdio=stdio) + def create_host_type(self, data, stdio=None): + resp = self._request('POST', '/api/v2/compute/hostTypes', data=data, stdio=stdio) if resp.code == 200: return resp.content + else: + msg = resp.content + if 'error' in resp.content and 'message' in resp.content['error']: + msg = resp.content['error']['message'] + raise Exception("failed to create host type: %s" % msg) - def compute_host_types(self, data, stdio=None): - resp = self._request('POST', 'api/v2/compute/hostTypes', data=data, stdio=stdio) + def list_credentials(self, stdio=None): + resp = self._request('GET', '/api/v2/profiles/me/credentials', stdio=stdio) if resp.code == 200: return resp.content + else: + msg = resp.content + if 'error' in resp.content and 'message' in resp.content['error']: + msg = resp.content['error']['message'] + raise Exception("failed to query credentials: %s" % msg) - def profiles_credentials(self, data, stdio=None): - resp = self._request('POST', 'api/v2/profiles/me/credentials', data=data, stdio=stdio) + def create_credential(self, data, stdio=None): + resp = self._request('POST', '/api/v2/profiles/me/credentials', data=data, stdio=stdio) if resp.code == 200: return resp.content + else: + msg = resp.content + if 'error' in resp.content and 'message' in resp.content['error']: + msg = resp.content['error']['message'] + raise Exception("failed to create credential: %s" % msg) - def task_over(self, data, stdio=None): - resp = self._request('POST', 'api/v2/ob/clusters/takeOver', data=data, stdio=stdio) + def take_over(self, data, stdio=None): + resp = self._request('POST', '/api/v2/ob/clusters/takeOver', data=data, stdio=stdio) if resp.code == 200: return resp.content + else: + msg = resp.content + if 'error' in resp.content and 'message' in resp.content['error']: + msg = resp.content['error']['message'] + raise Exception("failed to do take over: %s" % msg) def _request(self, method, api, data=None, retry=5, stdio=None): url = self.url_prefix + api @@ -116,7 +147,7 @@ def _request(self, method, api, data=None, retry=5, stdio=None): return_code = 500 content = str(e) if return_code != 200: - stdio.verbose("request ocp-server failed: %s" % content) + stdio.verbose("request %s failed: %s" % (self.component_name, content)) try: content = json.loads(content.decode()) except: @@ -131,20 +162,27 @@ def return_true(**kwargs): return plugin_context.return_true(**kwargs) cluster_config = plugin_context.cluster_config + options = plugin_context.options stdio = plugin_context.stdio - if target_server: + address = getattr(options, 'address', '') + user = getattr(options, 'user', '') + password = getattr(options, 'password', '') + servers = cluster_config.servers + if address: + stdio.start_loading('Connect to {}'.format(address)) + elif target_server: servers = [target_server] - stdio.start_loading('Connect to ocp-server ({})'.format(target_server)) + stdio.start_loading('Connect to {} ({})'.format(cluster_config.name, target_server)) else: servers = cluster_config.servers - stdio.start_loading('Connect to ocp-server') + stdio.start_loading('Connect to %s' % cluster_config.name) cursors = {} for server in servers: config = cluster_config.get_server_conf(server) - username = 'admin' - password = config['admin_password'] - stdio.verbose('connect ocp-server ({}:{} by user {})'.format(server.ip, config['port'], username)) - cursor = OcpServerCursor(ip=server.ip, port=config['port'], username=username, password=password) + username = 'admin' if not address else user + password = config['admin_password'] if not address else password + stdio.verbose('connect {} ({}:{} by user {})'.format(cluster_config.name, server.ip, config['port'], username)) + cursor = OcpServerCursor(ip=server.ip, port=config['port'], username=username, password=password, component_name=cluster_config.name, base_url=address) if cursor.status(stdio=stdio): cursors[server] = cursor if not cursors: diff --git a/plugins/ocp-server/4.2.1/destroy.py b/plugins/ocp-server/4.2.1/destroy.py index 0483b25..93ccc8c 100644 --- a/plugins/ocp-server/4.2.1/destroy.py +++ b/plugins/ocp-server/4.2.1/destroy.py @@ -20,12 +20,22 @@ from __future__ import absolute_import, division, print_function +import re import _errno as err +from tool import Cursor global_ret = True def destroy(plugin_context, *args, **kwargs): + + def clean_database(cursor, database): + ret = cursor.execute("drop database {0}".format(database)) + if not ret: + global global_ret + global_ret = False + cursor.execute("create database if not exists {0}".format(database)) + def clean(path): client = clients[server] ret = client.execute_command('sudo rm -fr %s/*' % path, timeout=-1) @@ -40,7 +50,10 @@ def clean(path): clients = plugin_context.clients stdio = plugin_context.stdio global global_ret - stdio.start_loading('ocp-server work dir cleaning') + removed_components = cluster_config.get_deploy_removed_components() + clean_data = (not cluster_config.depends or len(removed_components) > 0 and len(removed_components.intersection({"oceanbase", "oceanbase-ce"})) == 0) and stdio.confirm("Would you like to clean meta data") + + stdio.start_loading('ocp-server cleaning') for server in cluster_config.servers: server_config = cluster_config.get_server_conf(server) stdio.verbose('%s work path cleaning', server) @@ -51,13 +64,28 @@ def clean(path): path = server_config.get(key) if path: clean(path) - if global_ret: - # if ocp depends on oceanbase, then clean tenant info - if 'oceanbase-ce' in cluster_config.depends or 'oceanbase' in cluster_config.depends: - cluster_config.update_component_attr("meta_tenant", "", save=True) - cluster_config.update_component_attr("monitor_tenant", "", save=True) + + if clean_data: + jdbc_host, jdbc_port = "", 0 + matched = re.match(r"^jdbc:\S+://(\S+?)(|:\d+)/(\S+)", cluster_config.get_global_conf_with_default()['jdbc_url']) + if matched: + jdbc_host = matched.group(1) + jdbc_port = matched.group(2)[1:] else: - stdio.warn('OCP successfully destroyed, please check and delete the tenant manually') + stdio.error("failed to parse jdbc_url") + global_conf = cluster_config.get_global_conf_with_default() + stdio.verbose("clean metadb") + try: + meta_cursor = Cursor(jdbc_host, jdbc_port, user=global_conf['ocp_meta_username'], tenant=global_conf['ocp_meta_tenant']['tenant_name'], password=global_conf['ocp_meta_password'], stdio=stdio) + clean_database(meta_cursor, global_conf['ocp_meta_db']) + stdio.verbose("clean monitordb") + monitor_cursor = Cursor(jdbc_host, jdbc_port, user=global_conf['ocp_monitor_username'], tenant=global_conf['ocp_monitor_tenant']['tenant_name'], password=global_conf['ocp_monitor_password'], stdio=stdio) + clean_database(monitor_cursor, global_conf['ocp_monitor_db']) + except Exception: + stdio.error("failed to clean meta and monitor data") + global_ret = False + + if global_ret: stdio.stop_loading('succeed') return plugin_context.return_true() else: diff --git a/plugins/ocp-server/4.2.1/display.py b/plugins/ocp-server/4.2.1/display.py index 5733c4e..dc723eb 100644 --- a/plugins/ocp-server/4.2.1/display.py +++ b/plugins/ocp-server/4.2.1/display.py @@ -120,7 +120,7 @@ def display(plugin_context, cursor, *args, **kwargs): 'url': url, 'status': 'active' if api_cursor and api_cursor.status(stdio) else 'inactive' }) - stdio.print_list(results, ['url', 'username', 'password', 'status'], lambda x: [x['url'], 'admin', server_config['admin_password'], x['status']], title='ocp-server') + stdio.print_list(results, ['url', 'username', 'password', 'status'], lambda x: [x['url'], 'admin', server_config['admin_password'], x['status']], title='%s' % cluster_config.name) active_result = [r for r in results if r['status'] == 'active'] info_dict = active_result[0] if len(active_result) > 0 else None if info_dict is not None: diff --git a/plugins/ocp-server/4.2.1/file_map.yaml b/plugins/ocp-server/4.2.1/file_map.yaml index c2e8fc3..fb656d8 100644 --- a/plugins/ocp-server/4.2.1/file_map.yaml +++ b/plugins/ocp-server/4.2.1/file_map.yaml @@ -1,4 +1,5 @@ - src_path: ./home/admin/ocp-server/lib/ocp-server-ce-$version-$release_simple.jar target_path: lib/ocp-server.jar - type: file + type: jar + require: openjdk-jre install_method: cp diff --git a/plugins/ocp-server/4.2.1/generate_config.py b/plugins/ocp-server/4.2.1/generate_config.py index 6cd6327..d2ec372 100644 --- a/plugins/ocp-server/4.2.1/generate_config.py +++ b/plugins/ocp-server/4.2.1/generate_config.py @@ -19,11 +19,14 @@ from __future__ import absolute_import, division, print_function +from collections import defaultdict +from tool import ConfigUtil -def generate_config(plugin_context, auto_depend=False, generate_config_mini=False, return_generate_keys=False, *args, **kwargs): + +def generate_config(plugin_context, auto_depend=False, generate_config_mini=False, return_generate_keys=False, *args, **kwargs): if return_generate_keys: - return plugin_context.return_true(generate_keys=['memory_size', 'log_dir', 'logging_file_max_history']) + return plugin_context.return_true(generate_keys=['memory_size', 'log_dir', 'logging_file_max_history', 'admin_password']) cluster_config = plugin_context.cluster_config stdio = plugin_context.stdio @@ -31,7 +34,8 @@ def generate_config(plugin_context, auto_depend=False, generate_config_mini=Fal generate_configs = {'global': {}} plugin_context.set_variable('generate_configs', generate_configs) stdio.start_loading('Generate ocp server configuration') - min_memory_size = '752M' + min_memory_size = '2G' + generate_random_password(cluster_config) if auto_depend: for comps in depend_comps: @@ -40,25 +44,25 @@ def generate_config(plugin_context, auto_depend=False, generate_config_mini=Fal break global_config = cluster_config.get_global_conf() if generate_config_mini: - if 'memory_size' not in global_config: - cluster_config.update_global_conf('memory_size', min_memory_size) + stdio.error('Deploying ocp-server is not supported in demo mode.') + return plugin_context.return_false() - auto_set_memory = False if 'memory_size' not in global_config: - for server in cluster_config.servers: - server_config = cluster_config.get_server_conf(server) - if 'memory_size' not in server_config: - auto_set_memory = True - if auto_set_memory: - observer_num = 0 - for comp in ['oceanbase', 'oceanbase-ce']: - if comp in cluster_config.depends: - observer_num = len(cluster_config.get_depend_servers(comp)) - if not observer_num: - stdio.warn('The component oceanbase/oceanbase-ce is not in the depends, the memory size cannot be calculated, and a fixed value of {} is used'.format(min_memory_size)) - cluster_config.update_global_conf('memory_size', min_memory_size) - else: - cluster_config.update_global_conf('memory_size', '%dM' % (512 + (observer_num + 3) * 60)) + cluster_config.update_global_conf('memory_size', min_memory_size) + # write required memory into resource namespace + resource = plugin_context.namespace.get_variable("required_resource") + if resource is None: + resource = defaultdict(lambda: defaultdict(dict)) + plugin_context.namespace.set_variable("required_resource", resource) + for server in cluster_config.servers: + resource[cluster_config.name]['memory'][server.ip] = cluster_config.get_global_conf_with_default()['memory_size'] stdio.stop_loading('succeed') - return plugin_context.return_true() \ No newline at end of file + return plugin_context.return_true() + + +def generate_random_password(cluster_config): + add_components = cluster_config.get_deploy_added_components() + global_config = cluster_config.get_original_global_conf() + if cluster_config.name in add_components and 'admin_password' not in global_config: + cluster_config.update_global_conf('admin_password', ConfigUtil.get_random_pwd_by_rule(punctuation_length=2, punctuation_chars='~^*{}[]_-+'), False) \ No newline at end of file diff --git a/plugins/ocp-server/4.2.1/init.py b/plugins/ocp-server/4.2.1/init.py index 313e390..df81d30 100644 --- a/plugins/ocp-server/4.2.1/init.py +++ b/plugins/ocp-server/4.2.1/init.py @@ -25,6 +25,7 @@ import _errno as err from const import CONST_OBD_HOME +from ssh import LocalClient OBD_INSTALL_PRE = os.environ.get('OBD_INSTALL_PRE', '/') @@ -55,6 +56,16 @@ def _ocp_lib(client, home_path, soft_dir='', stdio=None): client.put_file(rpm, os.path.join(home_path, 'ocp-server/lib/', name)) if soft_dir: client.put_file(rpm, os.path.join(soft_dir, name)) + max_ob_pkg = LocalClient.execute_command('find %s/mirror/ -type f -name "oceanbase-*.rpm" -exec readlink -f {} \; | grep -v "oceanbase.*libs" | grep -v "oceanbase.*utils" | sort -V | tail -n 1' % OBD_HOME, stdio=stdio).stdout.strip() + max_odp_pkg = LocalClient.execute_command('find %s/mirror/ -type f -name "obproxy-*.rpm" -exec readlink -f {} \; | sort -V | tail -n 1' % OBD_HOME, stdio=stdio).stdout.strip() + name = os.path.basename(max_ob_pkg) + client.put_file(max_ob_pkg, os.path.join(home_path, 'ocp-server/lib/', name)) + if soft_dir: + client.put_file(max_ob_pkg, os.path.join(soft_dir, name)) + name = os.path.basename(max_odp_pkg) + client.put_file(max_odp_pkg, os.path.join(home_path, 'ocp-server/lib/', name)) + if soft_dir: + client.put_file(max_odp_pkg, os.path.join(soft_dir, name)) def init(plugin_context, upgrade=False, *args, **kwargs): @@ -75,8 +86,9 @@ def init(plugin_context, upgrade=False, *args, **kwargs): plugin_context.return_true() return - stdio.start_loading('Initializes ocp-server work home') + stdio.start_loading('Initializes %s work home' % cluster_config.name) servers_dirs = {} + cp_lib = 0 for server in cluster_config.servers: server_config = cluster_config.get_server_conf(server) client = clients[server] @@ -150,7 +162,9 @@ def init(plugin_context, upgrade=False, *args, **kwargs): continue link_path = os.path.join(home_path, 'log') client.execute_command("if [ ! '%s' -ef '%s' ]; then ln -sf %s %s; fi" % (log_dir, link_path, log_dir, link_path)) - _ocp_lib(client, home_path, soft_dir, stdio) + if cp_lib < 1: + _ocp_lib(client, home_path, soft_dir, stdio) + cp_lib += 1 if launch_user: res_home = client.execute_command("sudo chown -R %s %s" % (launch_user, home_path)) res_log = client.execute_command("sudo chown -R %s %s" % (launch_user, log_dir)) diff --git a/plugins/ocp-server/4.2.1/parameter.yaml b/plugins/ocp-server/4.2.1/parameter.yaml index 702f657..1f353ab 100644 --- a/plugins/ocp-server/4.2.1/parameter.yaml +++ b/plugins/ocp-server/4.2.1/parameter.yaml @@ -11,7 +11,7 @@ require: true essential: true type: INT - default: 8180 + default: 8080 need_restart: true description_en: the port of ocp server. description_local: OCP server使用的端口 @@ -316,14 +316,14 @@ require: false type: OB_USER default: meta - need_redeploy: true + need_restart: true description_en: The user name for ocp meta db description_local: ocp server的元数据库使用的用户名 - name: ocp_meta_password require: true type: STRING default: oceanbase - need_redeploy: true + need_restart: true description_en: The password for ocp meta db description_local: ocp server的元数据库使用的密码 - name: ocp_meta_db @@ -337,13 +337,14 @@ require: false type: OB_USER default: monitor_user + need_restart: true description_en: The username for obagent monitor user description_local: obagent 监控用户的用户名 - name: ocp_monitor_password require: false type: STRING default: oceanbase - need_redeploy: true + need_restart: true description_en: The password for obagent monitor password description_local: obagent 监控用户的密码 - name: ocp_monitor_db @@ -358,6 +359,5 @@ type: WEB_URL default: '' need_restart: true - need_redeploy: true description_en: The url for ocp server description_local: ocp server的连接串 diff --git a/plugins/ocp-server/4.2.1/requirement.yaml b/plugins/ocp-server/4.2.1/requirement.yaml new file mode 100644 index 0000000..1ecb7de --- /dev/null +++ b/plugins/ocp-server/4.2.1/requirement.yaml @@ -0,0 +1,3 @@ +openjdk-jre: + min_version: 1.8.0_161 + max_version: 1.8.1 diff --git a/plugins/ocp-server/4.2.1/restart.py b/plugins/ocp-server/4.2.1/restart.py index 19e2a80..d164b20 100644 --- a/plugins/ocp-server/4.2.1/restart.py +++ b/plugins/ocp-server/4.2.1/restart.py @@ -84,7 +84,7 @@ def call_plugin(self, plugin, **kwargs): def connect(self, cluster_config): if self.cursors is None: - self.sub_io.start_loading('Connect to ocp express') + self.sub_io.start_loading('Connect to %s' % cluster_config.name) ret = self.call_plugin(self.connect_plugin, cluster_config=cluster_config) if not ret: self.sub_io.stop_loading('fail') @@ -126,7 +126,7 @@ def restart(self): if not self.call_plugin(self.start_check_plugin, clients=clients, cluster_config=cluster_config): self.stdio.stop_loading('stop_loading', 'fail') return False - if not self.call_plugin(self.start_plugin, clients=clients, cluster_config=cluster_config, local_home_path=self.local_home_path, need_bootstrap=need_bootstrap, repository_dir_map=self.repository_dir_map): + if not self.call_plugin(self.start_plugin, source_option='restart', clients=clients, cluster_config=cluster_config, local_home_path=self.local_home_path, need_bootstrap=need_bootstrap, repository_dir_map=self.repository_dir_map): self.rollback() self.stdio.stop_loading('stop_loading', 'fail') return False diff --git a/plugins/ocp-server/4.2.1/scale_in_check.py b/plugins/ocp-server/4.2.1/scale_in_check.py index b2d9e06..a0d222e 100644 --- a/plugins/ocp-server/4.2.1/scale_in_check.py +++ b/plugins/ocp-server/4.2.1/scale_in_check.py @@ -24,5 +24,5 @@ def scale_in_check(plugin_context, *args, **kwargs): cluster_config = plugin_context.cluster_config stdio = plugin_context.stdio - stdio.error("Not support component `%s`".format(cluster_config.name)) + stdio.error("Not support component {}".format(cluster_config.name)) return plugin_context.return_false() diff --git a/plugins/ocp-server/4.2.1/start.py b/plugins/ocp-server/4.2.1/start.py index 97626e0..790c20f 100644 --- a/plugins/ocp-server/4.2.1/start.py +++ b/plugins/ocp-server/4.2.1/start.py @@ -29,11 +29,8 @@ from const import CONST_OBD_HOME from optparse import Values -from tool import Cursor, FileUtil, YamlLoader -from _rpm import Version -from _plugin import PluginManager -from _errno import EC_OBSERVER_CAN_NOT_MIGRATE_IN -from _deploy import DeployStatus +from tool import Cursor +from _types import Capacity, CapacityWithB OBD_INSTALL_PRE = os.environ.get('OBD_INSTALL_PRE', '/') @@ -93,34 +90,6 @@ } -def parse_size(size): - _bytes = 0 - if isinstance(size, str): - size = size.strip() - if not isinstance(size, str) or size.isdigit(): - _bytes = int(size) - else: - units = {"B": 1, "K": 1 << 10, "M": 1 << 20, "G": 1 << 30, "T": 1 << 40} - match = re.match(r'^(0|[1-9][0-9]*)\s*([B,K,M,G,T])$', size.upper()) - _bytes = int(match.group(1)) * units[match.group(2)] - return _bytes - - -def format_size(size, precision=1): - units = ['B', 'K', 'M', 'G', 'T', 'P'] - idx = 0 - if precision: - div = 1024.0 - format = '%.' + str(precision) + 'f%s' - else: - div = 1024 - format = '%d%s' - while idx < 5 and size >= 1024: - size /= 1024.0 - idx += 1 - return format % (size, units[idx]) - - def exec_sql_in_tenant(sql, cursor, tenant, password, mode, retries=10, args=None): user = 'SYS' if mode == 'oracle' else 'root' tenant_cursor = cursor.new_cursor(tenant=tenant, user=user, password=password) @@ -169,7 +138,6 @@ def get_ocp_depend_config(cluster_config, stdio): depend_observer = False depend_info = {} ob_servers_conf = {} - root_servers = [] for comp in ["oceanbase", "oceanbase-ce"]: ob_zones = {} if comp in cluster_config.depends: @@ -180,7 +148,14 @@ def get_ocp_depend_config(cluster_config, stdio): if 'server_ip' not in depend_info: depend_info['server_ip'] = ob_server.ip depend_info['mysql_port'] = ob_server_conf['mysql_port'] - depend_info['root_password'] = ob_server_conf['root_password'] + depend_info['meta_tenant'] = ob_server_conf['ocp_meta_tenant']['tenant_name'] + depend_info['meta_user'] = ob_server_conf['ocp_meta_username'] + depend_info['meta_password'] = ob_server_conf['ocp_meta_password'] + depend_info['meta_db'] = ob_server_conf['ocp_meta_db'] + depend_info['monitor_tenant'] = ob_server_conf['ocp_monitor_tenant']['tenant_name'] + depend_info['monitor_user'] = ob_server_conf['ocp_monitor_username'] + depend_info['monitor_password'] = ob_server_conf['ocp_monitor_password'] + depend_info['monitor_db'] = ob_server_conf['ocp_monitor_db'] zone = ob_server_conf['zone'] if zone not in ob_zones: ob_zones[zone] = ob_server @@ -195,23 +170,26 @@ def get_ocp_depend_config(cluster_config, stdio): break for server in cluster_config.servers: - server_config = deepcopy(cluster_config.get_server_conf_with_default(server)) - original_server_config = cluster_config.get_original_server_conf(server) + default_server_config = deepcopy(cluster_config.get_server_conf_with_default(server)) + server_config = deepcopy(cluster_config.get_server_conf(server)) + original_server_config = cluster_config.get_original_server_conf_with_global(server) missed_keys = get_missing_required_parameters(original_server_config) if missed_keys: if 'jdbc_url' in missed_keys and depend_observer: - server_config['jdbc_url'] = 'jdbc:oceanbase://{}:{}/{}'.format(depend_info['server_ip'], - depend_info['mysql_port'], - server_config['ocp_meta_db']) - server_config['jdbc_username'] = '%s@%s' % ( - server_config['ocp_meta_username'], server_config['ocp_meta_tenant']['tenant_name']) - server_config['jdbc_password'] = server_config['ocp_meta_password'] - server_config['root_password'] = depend_info.get('root_password', '') - env[server] = server_config + default_server_config['jdbc_url'] = 'jdbc:oceanbase://{}:{}/{}'.format(depend_info['server_ip'], depend_info['mysql_port'], depend_info['meta_db'] if not original_server_config.get('ocp_meta_db', None) else original_server_config['ocp_meta_db']) if not original_server_config.get('jdbc_url', None) else original_server_config['jdbc_url'] + default_server_config['ocp_meta_username'] = depend_info['meta_user'] if not original_server_config.get('ocp_meta_username', None) else original_server_config['ocp_meta_username'] + default_server_config['ocp_meta_tenant']['tenant_name'] = depend_info['meta_tenant'] if not original_server_config.get('ocp_meta_tenant', None) else original_server_config['ocp_meta_tenant']['tenant_name'] + default_server_config['ocp_meta_password'] = depend_info['meta_password'] if not original_server_config.get('ocp_meta_password', None) else original_server_config['ocp_meta_password'] + default_server_config['ocp_meta_db'] = depend_info['meta_db'] if not original_server_config.get('ocp_meta_db', None) else original_server_config['ocp_meta_db'] + default_server_config['ocp_monitor_username'] = depend_info['monitor_user'] if not original_server_config.get('ocp_monitor_username', None) else original_server_config['ocp_monitor_username'] + default_server_config['ocp_monitor_tenant']['tenant_name'] = depend_info['monitor_tenant'] if not original_server_config.get('ocp_monitor_tenant', None) else original_server_config['ocp_monitor_tenant']['tenant_name'] + default_server_config['ocp_monitor_password'] = depend_info['monitor_password'] if not original_server_config.get('ocp_monitor_password', None) else original_server_config['ocp_monitor_password'] + default_server_config['ocp_monitor_db'] = depend_info['monitor_db'] if not original_server_config.get('ocp_monitor_db', None) else original_server_config['ocp_monitor_db'] + env[server] = default_server_config return env -def start(plugin_context, start_env=None, cursor='', sys_cursor1='', without_parameter=False, *args, **kwargs): +def start(plugin_context, start_env=None, source_option='start', without_parameter=False, *args, **kwargs): def get_option(key, default=''): value = getattr(options, key, default) if not value: @@ -223,7 +201,7 @@ def get_parsed_option(key, default=''): if value is None: return value try: - parsed_value = parse_size(value) + parsed_value = Capacity(value).btyes except: stdio.exception("") raise Exception("Invalid option {}: {}".format(key, value)) @@ -233,121 +211,55 @@ def error(*arg, **kwargs): stdio.error(*arg, **kwargs) stdio.stop_loading('fail') - def _ocp_lib(client, home_path, soft_dir='', stdio=None): - stdio.verbose('cp rpm & pos') - OBD_HOME = os.path.join(os.environ.get(CONST_OBD_HOME, os.getenv('HOME')), '.obd') - for rpm in glob(os.path.join(OBD_HOME, 'mirror/local/*ocp-agent-*.rpm')): - name = os.path.basename(rpm) - client.put_file(rpm, os.path.join(home_path, 'ocp-server/lib/', name)) - if soft_dir: - client.put_file(rpm, os.path.join(soft_dir, name)) - def start_cluster(times=0): - jdbc_host = jdbc_port = jdbc_url = jdbc_username = jdbc_password = jdbc_public_key = cursor = monitor_user = monitor_tenant = monitor_memory_size = monitor_max_cpu = monitor_password = monitor_db = tenant_plugin = '' - for server in cluster_config.servers: - server_config = start_env[server] - # check meta db connect before start - jdbc_url = server_config['jdbc_url'] - jdbc_username = server_config['jdbc_username'] - jdbc_password = server_config['jdbc_password'] - root_password = server_config.get('root_password', '') - cursor = get_option('metadb_cursor', '') - cursor = kwargs.get('metadb_cursor', '') if cursor == '' else cursor - matched = re.match(r"^jdbc:\S+://(\S+?)(|:\d+)/(\S+)", jdbc_url) - stdio.verbose('metadb connect check') - if matched: - jdbc_host = matched.group(1) - jdbc_port = matched.group(2)[1:] - jdbc_database = matched.group(3) - password = root_password if root_password else jdbc_password - retries = 10 - while not cursor and retries and not cluster_config.get_component_attr("meta_tenant"): - try: - retries -= 1 - time.sleep(2) - cursor = Cursor(ip=jdbc_host, port=jdbc_port, user='root@sys', password=password, stdio=stdio) - except: - pass + jdbc_host = jdbc_port = jdbc_url = jdbc_username = jdbc_password = jdbc_public_key = meta_user = meta_tenant = meta_password = monitor_user = monitor_tenant = monitor_password = monitor_db = '' + server_config = start_env[cluster_config.servers[0]] + # check meta db connect before start + jdbc_url = server_config['jdbc_url'] + jdbc_username = "{0}@{1}".format(server_config['ocp_meta_username'], server_config['ocp_meta_tenant']['tenant_name']) + jdbc_password = server_config['ocp_meta_password'] + meta_user = server_config['ocp_meta_username'] + meta_tenant = server_config['ocp_meta_tenant']['tenant_name'] + meta_password = server_config['ocp_meta_password'] + monitor_user = server_config['ocp_monitor_username'] + monitor_tenant = server_config['ocp_monitor_tenant']['tenant_name'] + monitor_password = server_config['ocp_monitor_password'] + monitor_db = server_config['ocp_monitor_db'] + + matched = re.match(r"^jdbc:\S+://(\S+?)(|:\d+)/(\S+)", jdbc_url) + stdio.verbose('metadb connect check') + if matched: + jdbc_host = matched.group(1) + jdbc_port = matched.group(2)[1:] + else: + stdio.error("jdbc_url is not valid") + return False global_config = cluster_config.get_global_conf() site_url = global_config.get('ocp_site_url', '') soft_dir = global_config.get('soft_dir', '') - meta_user = global_config.get('ocp_meta_username', 'root') - meta_tenant = global_config.get('ocp_meta_tenant')['tenant_name'] - meta_max_cpu = global_config['ocp_meta_tenant'].get('max_cpu', 2) - meta_memory_size = global_config['ocp_meta_tenant'].get('memory_size', '2G') - meta_password = global_config.get('ocp_meta_password', '') - meta_db = global_config.get('ocp_meta_db', 'meta_database') - if global_config.get('ocp_monitor_tenant'): - monitor_user = global_config.get('ocp_monitor_username', 'root') - monitor_tenant = global_config['ocp_monitor_tenant']['tenant_name'] - monitor_max_cpu = global_config['ocp_monitor_tenant'].get('max_cpu', 2) - monitor_memory_size = global_config['ocp_monitor_tenant'].get('memory_size', '4G') - monitor_password = global_config.get('ocp_monitor_password', '') - monitor_db = global_config.get('ocp_monitor_db', 'monitor_database') - if not times and deploy_status == DeployStatus.STATUS_DEPLOYED and not cluster_config.get_component_attr("meta_tenant"): - setattr(options, 'tenant_name', meta_tenant) - setattr(options, 'max_cpu', meta_max_cpu) - setattr(options, 'memory_size', parse_size(meta_memory_size)) - setattr(options, 'database', meta_db) - setattr(options, 'db_username', meta_user) - setattr(options, 'db_password', '') - setattr(options, 'create_if_not_exists', True) - setattr(options, "variables", "ob_tcp_invited_nodes='%'") - sql = 'select ob_version() as ob_version;' - res = cursor.fetchone(sql) - if not res: - error('fail to get ob version') - version = Version(res['ob_version']) - stdio.verbose('meta version: %s' % version) - stdio.verbose('Search create_tenant plugin for oceanbase-ce-%s' % version) - tenant_plugin = PluginManager(kwargs.get('local_home_path')).get_best_py_script_plugin('create_tenant', 'oceanbase-ce', version) - stdio.verbose('Found for %s oceanbase-ce-%s' % (tenant_plugin, version)) - if not tenant_plugin(namespace, namespaces, deploy_name, deploy_status, repositories, components, clients, cluster_config, cmds, options, stdio, cursor=cursor): - return plugin_context.return_false() - cluster_config.update_component_attr("meta_tenant", meta_tenant, save=True) - meta_cursor = Cursor(jdbc_host, jdbc_port, meta_user, meta_tenant, '', stdio) - if meta_user != 'root': - sql = f"""ALTER USER root IDENTIFIED BY %s""" - meta_cursor.execute(sql, args=[meta_password], raise_exception=False, exc_level='verbose') - sql = f"""ALTER USER {meta_user} IDENTIFIED BY %s""" + + meta_cursor = Cursor(jdbc_host, jdbc_port, meta_user, meta_tenant, meta_password, stdio) + if meta_user != 'root': + sql = f"""ALTER USER root IDENTIFIED BY %s""" meta_cursor.execute(sql, args=[meta_password], raise_exception=False, exc_level='verbose') - meta_cursor = Cursor(jdbc_host, jdbc_port, meta_user, meta_tenant, str(meta_password), stdio) - plugin_context.set_variable('meta_cursor', meta_cursor) - - if not times and deploy_status == DeployStatus.STATUS_DEPLOYED and not cluster_config.get_component_attr("monitor_tenant"): - setattr(options, 'tenant_name', monitor_tenant) - setattr(options, 'max_cpu', monitor_max_cpu) - setattr(options, 'memory_size', parse_size(monitor_memory_size)) - setattr(options, 'database', monitor_db) - setattr(options, 'db_username', monitor_user) - setattr(options, 'db_password', '') - setattr(options, "variables", "ob_tcp_invited_nodes='%'") - if not tenant_plugin(namespace, namespaces, deploy_name, deploy_status, repositories, components, clients, cluster_config, cmds, options, stdio, cursor=cursor): - return plugin_context.return_false() - cluster_config.update_component_attr("monitor_tenant", monitor_tenant, save=True) - monitor_cursor = Cursor(jdbc_host, jdbc_port, monitor_user, monitor_tenant, '', stdio) - if monitor_user != 'root': - sql = f"""ALTER USER root IDENTIFIED BY %s""" - monitor_cursor.execute(sql, args=[monitor_password], raise_exception=False, exc_level='verbose') - sql = f"""ALTER USER {monitor_user} IDENTIFIED BY %s""" - monitor_cursor.execute(sql, args=[monitor_password], raise_exception=False, exc_level='verbose') + plugin_context.set_variable('meta_cursor', meta_cursor) - if meta_tenant not in jdbc_username: - jdbc_username = meta_user + '@' + meta_tenant - jdbc_url = jdbc_url.rsplit('/', 1)[0] + '/' + meta_db - jdbc_password = meta_password + monitor_cursor = Cursor(jdbc_host, jdbc_port, monitor_user, monitor_tenant, monitor_password, stdio) + if monitor_user != 'root': + sql = f"""ALTER USER root IDENTIFIED BY %s""" + monitor_cursor.execute(sql, args=[monitor_password], raise_exception=False, exc_level='verbose') + plugin_context.set_variable('monitor_cursor', monitor_cursor) server_pid = {} success = True node_num = 1 - stdio.start_loading("Start ocp-server") + stdio.start_loading("Start %s" % cluster_config.name) for server in cluster_config.servers: client = clients[server] server_config = start_env[server] home_path = server_config['home_path'] launch_user = server_config.get('launch_user', None) - _ocp_lib(client, home_path, soft_dir, stdio) system_password = server_config["system_password"] port = server_config['port'] pid_path = os.path.join(home_path, 'run/ocp-server.pid') @@ -362,12 +274,13 @@ def start_cluster(times=0): jvm_memory_option = "-Xms{0} -Xmx{1}".format(memory_xms, memory_xmx) else: memory_size = server_config.get('memory_size', '1G') - jvm_memory_option = "-Xms{0} -Xmx{0}".format(format_size(parse_size(memory_size), 0).lower()) + jvm_memory_option = "-Xms{0} -Xmx{0}".format(str(Capacity(memory_size)).lower()) extra_options = { "ocp.iam.encrypted-system-password": system_password } extra_options_str = ' '.join(["-D{}={}".format(k, v) for k, v in extra_options.items()]) java_bin = server_config['java_bin'] + client.add_env('PATH', '%s/jre/bin:' % server_config['home_path']) cmd = f'{java_bin} -Dfile.encoding=UTF-8 -jar {jvm_memory_option} {extra_options_str} {home_path}/lib/ocp-server.jar --bootstrap' jar_cmd = copy.deepcopy(cmd) if "log_dir" not in server_config: @@ -392,6 +305,9 @@ def start_cluster(times=0): f' --with-property=ocp.monitordb.password:\'{monitor_password}\'' \ f' --with-property=ocp.monitordb.database:{monitor_db}' if key not in EXCLUDE_KEYS and key in CONFIG_MAPPER: + if key == 'logging_file_total_size_cap': + cmd += ' --with-property=ocp.logging.file.total.size.cap:{}'.format(CapacityWithB(server_config[key])) + continue cmd += ' --with-property={}:{}'.format(CONFIG_MAPPER[key], server_config[key]) if site_url: cmd += ' --with-property=ocp.site.url:{}'.format(site_url) @@ -431,7 +347,7 @@ def start_cluster(times=0): stdio.stop_loading('fail') return plugin_context.return_false() - stdio.start_loading("ocp-server program health check") + stdio.start_loading("%s program health check" % cluster_config.name) failed = [] servers = server_pid.keys() count = 40 @@ -458,15 +374,15 @@ def start_cluster(times=0): if any(pids_stat.values()): for pid in pids_stat: if pids_stat[pid]: - stdio.verbose('%s ocp-server[pid: %s] started', server, pid) + stdio.verbose('%s %s[pid: %s] started', server, cluster_config.name, pid) continue if all([stat is False for stat in pids_stat.values()]): - failed.append('failed to start {} ocp-server'.format(server)) + failed.append('failed to start {} {}'.format(server, cluster_config.name)) elif count: tmp_servers.append(server) - stdio.verbose('failed to start %s ocp-server, remaining retries: %d' % (server, count)) + stdio.verbose('failed to start %s %s, remaining retries: %d' % (server, cluster_config.name, count)) else: - failed.append('failed to start {} ocp-server'.format(server)) + failed.append('failed to start {} {}'.format(server, cluster_config.name)) servers = tmp_servers if servers and count: time.sleep(15) @@ -496,15 +412,15 @@ def stop_cluster(): if pid and client.execute_command('sudo ' + cmd if launch_user else cmd): cmd = 'ls /proc/{}/fd'.format(pid) if client.execute_command('sudo ' + cmd if launch_user else cmd): - stdio.verbose('{} ocp-server[pid: {}] stopping...'.format(server, pid)) + stdio.verbose('{} {}[pid: {}] stopping...'.format(server, cluster_config.name, pid)) cmd = 'kill -9 {}'.format(pid) client.execute_command('sudo ' + cmd if launch_user else cmd) return True else: - stdio.verbose('failed to stop ocp-server[pid:{}] in {}, permission deny'.format(pid, server)) + stdio.verbose('failed to stop {}[pid:{}] in {}, permission deny'.format(cluster_config.name, pid, server)) success = False else: - stdio.verbose('{} ocp-server is not running'.format(server)) + stdio.verbose('{} {} is not running'.format(server, cluster_config.name)) if not success: stdio.stop_loading('fail') return plugin_context.return_true() @@ -539,13 +455,15 @@ def stop_cluster(): if not without_parameter and not get_option('without_parameter', ''): if not start_cluster(): - stdio.error('start ocp-server failed') + stdio.error('start %s failed' % cluster_config.name) return plugin_context.return_false() if not stop_cluster(): - stdio.error('stop ocp-server failed') + stdio.error('stop %s failed' % cluster_config.name) return plugin_context.return_false() if not start_cluster(1): - stdio.error('start ocp-server failed') + stdio.error('start %s failed' % cluster_config.name) return plugin_context.return_false() time.sleep(20) - return plugin_context.return_true() + plugin_context.set_variable('start_env', start_env) + + return plugin_context.return_true(need_bootstrap=True) diff --git a/plugins/ocp-server/4.2.1/start_check.py b/plugins/ocp-server/4.2.1/start_check.py index cc1fe30..2dd8439 100644 --- a/plugins/ocp-server/4.2.1/start_check.py +++ b/plugins/ocp-server/4.2.1/start_check.py @@ -26,19 +26,18 @@ import datetime from copy import deepcopy +from _deploy import DeployStatus from _rpm import Version import _errno as err -from ssh import SshConfig, SshClient from tool import Cursor -from const import CONST_OBD_HOME -from _deploy import DeployStatus +from _types import Capacity success = True def get_missing_required_parameters(parameters): results = [] - for key in ["jdbc_url", "jdbc_password", "jdbc_username"]: + for key in ["jdbc_url"]: if parameters.get(key) is None: results.append(key) return results @@ -59,35 +58,6 @@ def password_check(passwd): return True -def parse_size(size): - _bytes = 0 - if not isinstance(size, str) or size.isdigit(): - _bytes = int(size) - else: - units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} - match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) - _bytes = int(match.group(1)) * units[match.group(2)] - return _bytes - - -def format_size(size, precision=1): - units = ['B', 'K', 'M', 'G'] - units_num = len(units) - 1 - idx = 0 - if precision: - div = 1024.0 - format = '%.' + str(precision) + 'f%s' - limit = 1024 - else: - div = 1024 - limit = 1024 - format = '%d%s' - while idx < units_num and size >= limit: - size /= div - idx += 1 - return format % (size, units[idx]) - - def get_mount_path(disk, _path): _mount_path = '/' for p in disk: @@ -147,7 +117,14 @@ def get_ocp_depend_config(cluster_config, stdio): if 'server_ip' not in depend_info: depend_info['server_ip'] = ob_server.ip depend_info['mysql_port'] = ob_server_conf['mysql_port'] - depend_info['root_password'] = ob_server_conf['root_password'] + depend_info['meta_tenant'] = ob_server_conf['ocp_meta_tenant']['tenant_name'] + depend_info['meta_user'] = ob_server_conf['ocp_meta_username'] + depend_info['meta_password'] = ob_server_conf['ocp_meta_password'] + depend_info['meta_db'] = ob_server_conf['ocp_meta_db'] + depend_info['monitor_tenant'] = ob_server_conf['ocp_monitor_tenant']['tenant_name'] + depend_info['monitor_user'] = ob_server_conf['ocp_monitor_username'] + depend_info['monitor_password'] = ob_server_conf['ocp_monitor_password'] + depend_info['monitor_db'] = ob_server_conf['ocp_monitor_db'] zone = ob_server_conf['zone'] if zone not in ob_zones: ob_zones[zone] = ob_server @@ -162,27 +139,30 @@ def get_ocp_depend_config(cluster_config, stdio): break for server in cluster_config.servers: - server_config = deepcopy(cluster_config.get_server_conf_with_default(server)) - original_server_config = cluster_config.get_original_server_conf(server) + default_server_config = deepcopy(cluster_config.get_server_conf_with_default(server)) + server_config = deepcopy(cluster_config.get_server_conf(server)) + original_server_config = cluster_config.get_original_server_conf_with_global(server) missed_keys = get_missing_required_parameters(original_server_config) if missed_keys: if 'jdbc_url' in missed_keys and depend_observer: - server_config['jdbc_url'] = 'jdbc:oceanbase://{}:{}/{}'.format(depend_info['server_ip'], - depend_info['mysql_port'], - server_config['ocp_meta_db']) - server_config['jdbc_username'] = '%s@%s' % ( - server_config['ocp_meta_username'], server_config['ocp_meta_tenant']['tenant_name']) - server_config['jdbc_password'] = server_config['ocp_meta_password'] - server_config['root_password'] = depend_info['root_password'] - env[server] = server_config + default_server_config['jdbc_url'] = 'jdbc:oceanbase://{}:{}/{}'.format(depend_info['server_ip'], depend_info['mysql_port'], depend_info['meta_db'] if not original_server_config.get('ocp_meta_db', None) else original_server_config['ocp_meta_db']) if not original_server_config.get('jdbc_url', None) else original_server_config['jdbc_url'] + default_server_config['ocp_meta_username'] = depend_info['meta_user'] if not original_server_config.get('ocp_meta_username', None) else original_server_config['ocp_meta_username'] + default_server_config['ocp_meta_tenant']['tenant_name'] = depend_info['meta_tenant'] if not original_server_config.get('ocp_meta_tenant', None) else original_server_config['ocp_meta_tenant']['tenant_name'] + default_server_config['ocp_meta_password'] = depend_info['meta_password'] if not original_server_config.get('ocp_meta_password', None) else original_server_config['ocp_meta_password'] + default_server_config['ocp_meta_db'] = depend_info['meta_db'] if not original_server_config.get('ocp_meta_db', None) else original_server_config['ocp_meta_db'] + default_server_config['ocp_monitor_username'] = depend_info['monitor_user'] if not original_server_config.get('ocp_monitor_username', None) else original_server_config['ocp_monitor_username'] + default_server_config['ocp_monitor_tenant']['tenant_name'] = depend_info['monitor_tenant'] if not original_server_config.get('ocp_monitor_tenant', None) else original_server_config['ocp_monitor_tenant']['tenant_name'] + default_server_config['ocp_monitor_password'] = depend_info['monitor_password'] if not original_server_config.get('ocp_monitor_password', None) else original_server_config['ocp_monitor_password'] + default_server_config['ocp_monitor_db'] = depend_info['monitor_db'] if not original_server_config.get('ocp_monitor_db', None) else original_server_config['ocp_monitor_db'] + env[server] = default_server_config return env - def execute_cmd(server_config, cmd): return cmd if not server_config.get('launch_user', None) else 'sudo ' + cmd -def start_check(plugin_context, init_check_status=False, work_dir_check=False, work_dir_empty_check=True, strict_check=False, precheck=False, source_option="start", *args, **kwargs): +def start_check(plugin_context, init_check_status=False, work_dir_check=False, work_dir_empty_check=True, strict_check=False, precheck=False, + source_option="start", java_check=True, *args, **kwargs): def check_pass(item): status = check_status[server] @@ -219,6 +199,11 @@ def critical(item, error, suggests=[]): success = False check_fail(item, error, suggests) stdio.error(error) + def get_option(key, default=''): + value = getattr(options, key, default) + if not value: + value = default + return value cluster_config = plugin_context.cluster_config options = plugin_context.options @@ -242,14 +227,15 @@ def critical(item, error, suggests=[]): 'launch user': err.CheckStatus(), 'sudo nopasswd': err.CheckStatus(), 'tenant': err.CheckStatus(), - 'clockdiff': err.CheckStatus() + 'clockdiff': err.CheckStatus(), + 'admin_password': err.CheckStatus() } if work_dir_check: check_status[server]['dir'] = err.CheckStatus() if init_check_status: return plugin_context.return_true(start_check_status=check_status) - stdio.start_loading('Check before start ocp-server') + stdio.start_loading('Check before start %s' % cluster_config.name) env = get_ocp_depend_config(cluster_config, stdio) if not env: return plugin_context.return_false() @@ -301,12 +287,9 @@ def critical(item, error, suggests=[]): wait_2_pass() continue - if not cluster_config.depends: + if not cluster_config.depends and not precheck: # check meta db connect before start jdbc_url = server_config['jdbc_url'] - jdbc_username = server_config['jdbc_username'] - jdbc_password = server_config['jdbc_password'] - root_password = server_config.get('root_password', '') matched = re.match(r"^jdbc:\S+://(\S+?)(|:\d+)/(\S+)", jdbc_url) cursor = getattr(options, 'metadb_cursor', '') cursor = kwargs.get('metadb_cursor', '') if cursor == '' else cursor @@ -315,17 +298,17 @@ def critical(item, error, suggests=[]): jdbc_host = matched.group(1) jdbc_port = matched.group(2)[1:] jdbc_database = matched.group(3) - password = root_password if root_password else jdbc_password connected = False retries = 10 while not connected and retries: retries -= 1 try: - cursor = Cursor(ip=jdbc_host, port=jdbc_port, user=jdbc_username, password=jdbc_password, + cursor = Cursor(ip=jdbc_host, port=jdbc_port, user="{0}@{1}".format(server_config['ocp_meta_username'], server_config['ocp_meta_tenant']['tenant_name']), password=server_config['ocp_meta_password'], stdio=stdio) connected = True + stdio.verbose('check cursor passed') except: - jdbc_username = 'root' + stdio.verbose('check cursor failed') time.sleep(1) if not connected: success = False @@ -345,16 +328,6 @@ def critical(item, error, suggests=[]): if not abs((now - ob_time).total_seconds()) < 180: critical('time check', err.EC_OCP_SERVER_TIME_SHIFT.format(server=server)) - # tenant check - if plugin_context in {DeployStatus.STATUS_DEPLOYED, DeployStatus.STATUS_CONFIGURED}: - sql = "select * from oceanbase.DBA_OB_TENANTS where TENANT_NAME = %s" - meta_tenant = server_config.get('ocp_meta_tenant')['tenant_name'] - if not cluster_config.get_component_attr("meta_tenant") and cursor.fetchone(sql, [meta_tenant]): - error('tenant', err.EC_OCP_SERVER_TENANT_ALREADY_EXISTS.format(tenant_name=meta_tenant)) - monitor_tenant = server_config.get('ocp_monitor_tenant')['tenant_name'] - if not cluster_config.get_component_attr("monitor_tenant") and cursor.fetchone(sql, [monitor_tenant]): - error('tenant', err.EC_OCP_SERVER_TENANT_ALREADY_EXISTS.format(tenant_name=monitor_tenant)) - # user check stdio.verbose('user check ') ocp_user = server_config.get('launch_user', '') @@ -445,23 +418,25 @@ def critical(item, error, suggests=[]): try: # java version check - stdio.verbose('java check ') - java_bin = server_config.get('java_bin', '/usr/bin/java') - ret = client.execute_command(execute_cmd(server_config, '{} -version'.format(java_bin))) - stdio.verbose('java version %s' % ret) - if not ret: - critical('java', err.EC_OCP_SERVER_JAVA_NOT_FOUND.format(server=server), [err.SUG_OCP_SERVER_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0')]) - version_pattern = r'version\s+\"(\d+\.\d+\.\d+)(\_\d+)' - found = re.search(version_pattern, ret.stdout) or re.search(version_pattern, ret.stderr) - if not found: - error('java', err.EC_OCP_SERVER_JAVA_VERSION_ERROR.format(server=server, version='1.8.0'), [err.SUG_OCP_SERVER_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0'),]) - else: - java_major_version = found.group(1) - stdio.verbose('java_major_version %s' % java_major_version) - java_update_version = found.group(2)[1:] - stdio.verbose('java_update_version %s' % java_update_version) - if Version(java_major_version) != Version('1.8.0') or int(java_update_version) < 161: - critical('java', err.EC_OCP_SERVER_JAVA_VERSION_ERROR.format(server=server, version='1.8.0'), [err.SUG_OCP_SERVER_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0'),]) + if java_check: + stdio.verbose('java check ') + java_bin = server_config.get('java_bin', '/usr/bin/java') + client.add_env('PATH', '%s/jre/bin:' % server_config['home_path']) + ret = client.execute_command(execute_cmd(server_config, '{} -version'.format(java_bin))) + stdio.verbose('java version %s' % ret) + if not ret: + critical('java', err.EC_OCP_SERVER_JAVA_NOT_FOUND.format(server=server), [err.SUG_OCP_SERVER_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0')]) + version_pattern = r'version\s+\"(\d+\.\d+\.\d+)(\_\d+)' + found = re.search(version_pattern, ret.stdout) or re.search(version_pattern, ret.stderr) + if not found: + error('java', err.EC_OCP_SERVER_JAVA_VERSION_ERROR.format(server=server, version='1.8.0'), [err.SUG_OCP_SERVER_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0'),]) + else: + java_major_version = found.group(1) + stdio.verbose('java_major_version %s' % java_major_version) + java_update_version = found.group(2)[1:] + stdio.verbose('java_update_version %s' % java_update_version) + if Version(java_major_version) != Version('1.8.0') or int(java_update_version) < 161: + critical('java', err.EC_OCP_SERVER_JAVA_VERSION_ERROR.format(server=server, version='1.8.0'), [err.SUG_OCP_SERVER_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0'),]) except Exception as e: stdio.error(e) error('java', err.EC_OCP_SERVER_JAVA_VERSION_ERROR.format(server=server, version='1.8.0'), @@ -470,18 +445,20 @@ def critical(item, error, suggests=[]): try: # clockdiff status check stdio.verbose('clockdiff check ') - clockdiff_bin = 'which clockdiff' - if client.execute_command(clockdiff_bin): + clockdiff_cmd = 'clockdiff -o 127.0.0.1' + if client.execute_command(clockdiff_cmd): check_pass('clockdiff') else: if not client.execute_command('sudo -n true'): critical('clockdiff', err.EC_OCP_SERVER_CLOCKDIFF_NOT_EXISTS.format(server=server)) - ret = client.execute_command('sudo ' + clockdiff_bin) + ret = client.execute_command('sudo ' + clockdiff_cmd) if not ret: critical('clockdiff', err.EC_OCP_SERVER_CLOCKDIFF_NOT_EXISTS.format(server=server)) - client.execute_command('which clockdiff | xargs sudo chmod u+s') - client.execute_command("which clockdiff | xargs sudo setcap 'cap_net_raw+ep'") + clockdiff_bin = 'type -P clockdiff' + res = client.execute_command(clockdiff_bin).stdout + client.execute_command('sudo chmod u+s %s' % res) + client.execute_command("sudo setcap 'cap_net_raw+ep' %s" % res) except Exception as e: stdio.error(e) critical('clockdiff', err.EC_OCP_SERVER_CLOCKDIFF_NOT_EXISTS.format(server=server)) @@ -492,12 +469,12 @@ def critical(item, error, suggests=[]): ip_servers = {} MIN_MEMORY_VALUE = 1073741824 - memory_size = parse_size(server_config.get('memory_size', '1G')) + memory_size = Capacity(server_config.get('memory_size', '1G')).btyes if server_config.get('log_dir'): log_dir = server_config['log_dir'] else: log_dir = os.path.join(server_config['home_path'], 'log') - need_size = parse_size(server_config.get('logging_file_total_size_cap', '1G')) + need_size = Capacity(server_config.get('logging_file_total_size_cap', '1G')).btyes ip = server.ip if ip not in servers_client: servers_client[ip] = client @@ -540,17 +517,17 @@ def critical(item, error, suggests=[]): for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): if k in memory_key_map: key = memory_key_map[k] - server_memory_stats[key] = parse_size(str(v)) + server_memory_stats[key] = Capacity(str(v)).btyes mem_suggests = [err.SUG_OCP_SERVER_REDUCE_MEM.format()] if memory_needed > server_memory_stats['available']: for server in ip_servers[ip]: - error('mem', err.EC_OCP_SERVER_NOT_ENOUGH_MEMORY_AVAILABLE.format(ip=ip, available=format_size(server_memory_stats['available']), need=format_size(memory_needed)), suggests=mem_suggests) + error('mem', err.EC_OCP_SERVER_NOT_ENOUGH_MEMORY_AVAILABLE.format(ip=ip, available=str(Capacity(server_memory_stats['available'])), need=str(Capacity(memory_needed))), suggests=mem_suggests) elif memory_needed > server_memory_stats['free'] + server_memory_stats['buffers'] + server_memory_stats['cached']: for server in ip_servers[ip]: - error('mem', err.EC_OCP_SERVER_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=format_size(server_memory_stats['free']), cached=format_size(server_memory_stats['buffers'] + server_memory_stats['cached']), need=format_size(memory_needed)), suggests=mem_suggests) + error('mem', err.EC_OCP_SERVER_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=str(Capacity(server_memory_stats['free'])), cached=str(Capacity(server_memory_stats['buffers'] + server_memory_stats['cached'])), need=str(Capacity(memory_needed))), suggests=mem_suggests) elif server_memory_stats['free'] < MIN_MEMORY_VALUE: for server in ip_servers[ip]: - alert('mem', err.EC_OCP_SERVER_NOT_ENOUGH_MEMORY.format(ip=ip, free=format_size(server_memory_stats['free']), need=format_size(memory_needed)), suggests=mem_suggests) + alert('mem', err.EC_OCP_SERVER_NOT_ENOUGH_MEMORY.format(ip=ip, free=str(Capacity(server_memory_stats['free'])), need=str(Capacity(memory_needed))), suggests=mem_suggests) # disk check stdio.verbose('disk check ') for ip in servers_disk: @@ -562,9 +539,18 @@ def critical(item, error, suggests=[]): mount_path = get_mount_path(disk_info, path) if disk_needed > disk_info[mount_path]['avail']: for server in ip_servers[ip]: - error('disk', err.EC_OCP_SERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=mount_path, need=format_size(disk_needed), avail=format_size(disk_info[mount_path]['avail'])), suggests=[err.SUG_OCP_SERVER_REDUCE_DISK.format()]) + error('disk', err.EC_OCP_SERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=mount_path, need=str(Capacity(disk_needed)), avail=str(Capacity(disk_info[mount_path]['avail']))), suggests=[err.SUG_OCP_SERVER_REDUCE_DISK.format()]) else: stdio.warn(err.WC_OCP_SERVER_FAILED_TO_GET_DISK_INFO.format(ip)) + # admin_passwd check + bootstrap_flag = os.path.join(home_path, '.bootstrapped') + if deploy_status == DeployStatus.STATUS_DEPLOYED and not client.execute_command('ls %s' % bootstrap_flag) and not get_option('skip_password_check', False): + for server in cluster_config.servers: + server_config = env[server] + admin_passwd = server_config['admin_password'] + if not admin_passwd or not password_check(admin_passwd): + error('admin_password', err.EC_COMPONENT_PASSWD_ERROR.format(ip=server.ip, component='ocp', key='admin_password', rule='Must be 8 to 32 characters in length, and must contain at least two digits, two uppercase letters, two lowercase letters, and two of the following special characters:~!@#%^&*_-+=|(){{}}[]:;,.?/)'), suggests=[err.SUG_OCP_EXPRESS_EDIT_ADMIN_PASSWD.format()]) + plugin_context.set_variable('start_env', env) for server in cluster_config.servers: diff --git a/plugins/ocp-server/4.2.1/stop.py b/plugins/ocp-server/4.2.1/stop.py index 3356743..5561622 100644 --- a/plugins/ocp-server/4.2.1/stop.py +++ b/plugins/ocp-server/4.2.1/stop.py @@ -49,7 +49,7 @@ def stop(plugin_context, *args, **kwargs): clients = plugin_context.clients stdio = plugin_context.stdio servers = {} - stdio.start_loading('Stop ocp-server') + stdio.start_loading('Stop %s' % cluster_config.name) success = True for server in cluster_config.servers: server_config = cluster_config.get_server_conf(server) @@ -64,14 +64,14 @@ def stop(plugin_context, *args, **kwargs): if pid and client.execute_command('sudo ' + cmd if launch_user else cmd): cmd = 'ls /proc/{}/fd'.format(pid) if client.execute_command('sudo ' + cmd if launch_user else cmd): - stdio.verbose('{} ocp-server[pid: {}] stopping...'.format(server, pid)) + stdio.verbose('{} {}[pid: {}] stopping...'.format(server, cluster_config.name, pid)) cmd = 'kill -9 {}'.format(pid) client.execute_command('sudo ' + cmd if launch_user else cmd) else: - stdio.verbose('failed to stop ocp-server[pid:{}] in {}, permission deny'.format(pid, server)) + stdio.verbose('failed to stop {}[pid:{}] in {}, permission deny'.format(cluster_config.name, pid, server)) success = False else: - stdio.verbose('{} ocp-server is not running'.format(server)) + stdio.verbose('{} {} is not running'.format(server, cluster_config.name)) if not success: stdio.stop_loading('fail') return plugin_context.return_true() diff --git a/plugins/ocp-server/4.2.1/takeover.py b/plugins/ocp-server/4.2.1/takeover.py index 2cecd1a..1ffd7af 100644 --- a/plugins/ocp-server/4.2.1/takeover.py +++ b/plugins/ocp-server/4.2.1/takeover.py @@ -33,207 +33,81 @@ from tool import Cursor, FileUtil, YamlLoader from _errno import EC_OBSERVER_CAN_NOT_MIGRATE_IN -class OcpCursor(object): - class Response(object): - - def __init__(self, code, content): - self.code = code - self.content = content - - def __bool__(self): - return self.code == 200 - - def __init__(self, base_url="http://localhost:8080", username=None, password=None): - self.base_url = base_url.strip("/") - self.auth = None - self.username=username - self.password=password - if self.username: - self.auth = HTTPBasicAuth(username=username, password=password) - - def status(self, stdio=None): - resp = self._request('GET', '/api/v2/time', stdio=stdio) - ocp_status_ok = False - now = time.time() - check_wait_time = 180 - while time.time() - now < check_wait_time: - stdio.verbose("query ocp to check...") - try: - if resp.code == 200: - ocp_status_ok = True - break - except Exception: - stdio.verbose("ocp still not active") - time.sleep(5) - if ocp_status_ok: - stdio.verbose("check ocp server status ok") - return True - else: - stdio.verbose("ocp still not ok, check failed") - raise Exception("ocp still not ok, check failed") - - def info(self, stdio=None): - resp = self._request('GET', '/api/v2/info', stdio=stdio) - if resp.code == 200: - return resp.content - else: - raise Exception("failed to query ocp info") - - def take_over_precheck(self, data, stdio=None): - resp = self._request('POST', '/api/v2/ob/clusters/takeOverPreCheck', data=data, stdio=stdio) - if resp.code == 200: - return resp.content - else: - raise Exception("takeover precheck failed") - def get_host_types(self, stdio=None): - resp = self._request('GET', '/api/v2/compute/hostTypes', stdio=stdio) - if resp.code == 200: - return resp.content - else: - msg = resp.content - if 'error' in resp.content and 'message' in resp.content['error']: - msg = resp.content['error']['message'] - raise Exception("failed to query host types: %s" % msg) - - def create_host_type(self, data, stdio=None): - resp = self._request('POST', '/api/v2/compute/hostTypes', data=data, stdio=stdio) - if resp.code == 200: - return resp.content - else: - msg = resp.content - if 'error' in resp.content and 'message' in resp.content['error']: - msg = resp.content['error']['message'] - raise Exception("failed to create host type: %s" % msg) - - def list_credentials(self, stdio=None): - resp = self._request('GET', '/api/v2/profiles/me/credentials', stdio=stdio) - if resp.code == 200: - return resp.content - else: - msg = resp.content - if 'error' in resp.content and 'message' in resp.content['error']: - msg = resp.content['error']['message'] - raise Exception("failed to query credentials: %s" % msg) - - def create_credential(self, data, stdio=None): - resp = self._request('POST', '/api/v2/profiles/me/credentials', data=data, stdio=stdio) - if resp.code == 200: - return resp.content - else: - msg = resp.content - if 'error' in resp.content and 'message' in resp.content['error']: - msg = resp.content['error']['message'] - raise Exception("failed to create credential: %s" % msg) - - def take_over(self, data, stdio=None): - resp = self._request('POST', '/api/v2/ob/clusters/takeOver', data=data, stdio=stdio) - if resp.code == 200: - return resp.content - else: - msg = resp.content - if 'error' in resp.content and 'message' in resp.content['error']: - msg = resp.content['error']['message'] - raise Exception("failed to do take over: %s" % msg) - - def _request(self, method, api, data=None, retry=5, stdio=None): - url = self.base_url + api - headers = {"Content-Type": "application/json"} - try: - if data is not None: - data = json.dumps(data) - stdio.verbose('send http request method: {}, url: {}, data: {}'.format(method, url, data)) - resp = requests.request(method, url, data=data, verify=False, headers=headers, auth=self.auth) - return_code = resp.status_code - content = resp.content - except Exception as e: - if retry: - retry -= 1 - return self._request(method=method, api=api, data=data, retry=retry, stdio=stdio) - stdio.exception("") - return_code = 500 - content = str(e) - if return_code != 200: - stdio.verbose("request ocp-server failed: %s" % content) - try: - content = json.loads(content.decode()) - except: - pass - return self.Response(code=return_code, content=content) - -def takeover(plugin_context, *args, **kwargs): - stdio = plugin_context.stdio +def takeover(plugin_context, cursors=None, *args, **kwargs): try: - _do_takeover(plugin_context, *args, **kwargs) - except Exception as ex: - stdio.error("do takeover got exception:%s", ex) - return plugin_context.return_false() - -def _do_takeover(plugin_context, *args, **kwargs): - # init variables, include get obcluster info from deploy config - cluster_config = plugin_context.cluster_config - clients = plugin_context.clients - options = plugin_context.options - stdio = plugin_context.stdio - stdio.verbose(vars(cluster_config)) - address = getattr(options, 'address', '') - user = getattr(options, 'user', '') - password = getattr(options, 'password', '') - host_type = getattr(options, 'host_type', '') - credential_name = getattr(options, 'credential_name', '') - ocp_cursor = OcpCursor(base_url=address, username=user, password=password) - if len(clients) == 0: - stdio.error("no available clients") - return plugin_context.return_false() - ssh_client = None - for ssh_client in clients.values(): - if ssh_client != None: - break - ssh_config = ssh_client.config - # query host types, add host type if current host_type is not empty and no matched record in ocp, otherwise use the first one - host_types = ocp_cursor.get_host_types(stdio=stdio)['data']['contents'] - host_type_id = None - if host_type == "": - if len(host_types) > 0: - host_type_id = host_types[0]['id'] - else: - for t in host_types: - if host_type == t['name']: - host_type_id = t['id'] + # init variables, include get obcluster info from deploy config + cluster_config = plugin_context.cluster_config + clients = plugin_context.clients + options = plugin_context.options + stdio = plugin_context.stdio + stdio.verbose(vars(cluster_config)) + host_type = getattr(options, 'host_type', '') + credential_name = getattr(options, 'credential_name', '') + if len(clients) == 0: + stdio.error("no available clients") + return plugin_context.return_false() + ssh_client = None + for ssh_client in clients.values(): + if ssh_client != None: break - if host_type_id is None: - create_host_type_data = {'name': host_type if host_type is not None else str(uuid.uuid4()).split('-')[-1]} - host_type_id = ocp_cursor.create_host_type(create_host_type_data, stdio=stdio)['data']['id'] - # query credentials - credential_id = None - if credential_name != "": - credentials = ocp_cursor.list_credentials(stdio=stdio)['data']['contents'] - for credential in credentials: - if credential['targetType'] == "HOST" and credential['name'] == credential_name: - stdio.verbose("found credential with id %d", credential['id']) - credential_id = credential['id'] - break - if credential_id is None: - name = credential_name if credential_name != "" else str(uuid.uuid4()).split('-')[-1] - credential_type = "PRIVATE_KEY" - if ssh_config.password is not None and ssh_config.password != "": - credential_type = "PASSWORD" - pass_phrase = ssh_config.password + ssh_config = ssh_client.config + cursors = plugin_context.get_return('connect').get_return('cursor') if not cursors else cursors + cursor = cursors[cluster_config.servers[0]] + # query host types, add host type if current host_type is not empty and no matched record in ocp, otherwise use the first one + host_types = cursor.get_host_types(stdio=stdio)['data']['contents'] + host_type_id = None + if host_type == "": + if len(host_types) > 0: + host_type_id = host_types[0]['id'] else: - key_file = ssh_config.key_filename if ssh_config.key_filename is not None else '{0}/.ssh/id_rsa'.format(os.path.expanduser("~")) - with open(key_file, 'r') as fd: - pass_phrase = fd.read() - create_credential_data = {"targetType":"HOST","name":name,"sshCredentialProperty":{"type":credential_type, "username":ssh_config.username,"passphrase":pass_phrase}} - credential_id = ocp_cursor.create_credential(create_credential_data, stdio=stdio)['data']['id'] - server = cluster_config.servers[0] - mysql_port = cluster_config.get_global_conf().get("mysql_port") - root_password = cluster_config.get_global_conf().get("root_password") - takeover_data = {"switchConfigUrl":True,"connectionMode":"direct","rootSysPassword":root_password,"address":server.ip,"port":mysql_port,"hostInfo":{"kind":"DEDICATED_PHYSICAL_MACHINE","hostTypeId":host_type_id,"sshPort":22,"credentialId":credential_id}} - proxyro_password = cluster_config.get_global_conf().get("proxyro_password") - if proxyro_password is not None and proxyro_password != "": - takeover_data.update({"proxyroPassword": proxyro_password}) - takeover_result = ocp_cursor.take_over(takeover_data, stdio=stdio) - stdio.verbose("takeover result %s" % takeover_result) - task_id = takeover_result['data']['id'] - cluster_id = takeover_result['data']['clusterId'] - return plugin_context.return_true(task_id=task_id, cluster_id = cluster_id) + for t in host_types: + if host_type == t['name']: + host_type_id = t['id'] + break + if host_type_id is None: + create_host_type_data = {'name': host_type if host_type is not None else str(uuid.uuid4()).split('-')[-1]} + host_type_id = cursor.create_host_type(create_host_type_data, stdio=stdio)['data']['id'] + # query credentials + credential_id = None + if credential_name != "": + credentials = cursor.list_credentials(stdio=stdio)['data']['contents'] + for credential in credentials: + if credential['targetType'] == "HOST" and credential['name'] == credential_name: + stdio.verbose("found credential with id %d", credential['id']) + credential_id = credential['id'] + break + if credential_id is None: + name = credential_name if credential_name != "" else str(uuid.uuid4()).split('-')[-1] + credential_type = "PRIVATE_KEY" + if ssh_config.password is not None and ssh_config.password != "": + credential_type = "PASSWORD" + pass_phrase = ssh_config.password + else: + key_file = ssh_config.key_filename if ssh_config.key_filename is not None else '{0}/.ssh/id_rsa'.format( + os.path.expanduser("~")) + with open(key_file, 'r') as fd: + pass_phrase = fd.read() + create_credential_data = {"targetType": "HOST", "name": name, + "sshCredentialProperty": {"type": credential_type, + "username": ssh_config.username, + "passphrase": pass_phrase}} + credential_id = cursor.create_credential(create_credential_data, stdio=stdio)['data']['id'] + server = cluster_config.servers[0] + mysql_port = cluster_config.get_global_conf().get("mysql_port") + root_password = cluster_config.get_global_conf().get("root_password") + takeover_data = {"switchConfigUrl": True, "connectionMode": "direct", "rootSysPassword": root_password, + "address": server.ip, "port": mysql_port, + "hostInfo": {"kind": "DEDICATED_PHYSICAL_MACHINE", "hostTypeId": host_type_id, "sshPort": 22, + "credentialId": credential_id}} + proxyro_password = cluster_config.get_global_conf().get("proxyro_password") + if proxyro_password is not None and proxyro_password != "": + takeover_data.update({"proxyroPassword": proxyro_password}) + takeover_result = cursor.take_over(takeover_data, stdio=stdio) + stdio.verbose("takeover result %s" % takeover_result) + task_id = takeover_result['data']['id'] + cluster_id = takeover_result['data']['clusterId'] + return plugin_context.return_true(task_id=task_id, cluster_id=cluster_id) + except Exception as ex: + stdio.error("do takeover got exception:%s", ex) + return plugin_context.return_false() \ No newline at end of file diff --git a/plugins/ocp-server/4.2.1/takeover_precheck.py b/plugins/ocp-server/4.2.1/takeover_precheck.py index 62fd1dd..466f66b 100644 --- a/plugins/ocp-server/4.2.1/takeover_precheck.py +++ b/plugins/ocp-server/4.2.1/takeover_precheck.py @@ -32,141 +32,37 @@ from tool import Cursor, FileUtil, YamlLoader from _errno import EC_OBSERVER_CAN_NOT_MIGRATE_IN -class OcpCursor(object): - class Response(object): - - def __init__(self, code, content): - self.code = code - self.content = content - - def __bool__(self): - return self.code == 200 - - def __init__(self, base_url="http://localhost:8080", username=None, password=None): - self.base_url = base_url.strip('/') - self.auth = None - self.username=username - self.password=password - if self.username: - self.auth = HTTPBasicAuth(username=username, password=password) - - def status(self, stdio=None): - resp = self._request('GET', '/api/v2/time', stdio=stdio) - ocp_status_ok = False - now = time.time() - check_wait_time = 180 - while time.time() - now < check_wait_time: - stdio.verbose("query ocp to check...") - try: - if resp.code == 200: - ocp_status_ok = True - break - except Exception: - stdio.verbose("ocp still not active") - time.sleep(5) - if ocp_status_ok: - stdio.verbose("check ocp server status ok") - return True - else: - stdio.verbose("ocp still not ok, check failed") - raise Exception("ocp still not ok, check failed") - - def info(self, stdio=None): - resp = self._request('GET', '/api/v2/info', stdio=stdio) - if resp.code == 200: - return resp.content - else: - msg = resp.content - if 'error' in resp.content and 'message' in resp.content['error']: - msg = resp.content['error']['message'] - raise Exception("failed to query ocp info: %s" % msg) - - def take_over_precheck(self, data, stdio=None): - resp = self._request('POST', '/api/v2/ob/clusters/takeOverPreCheck', data=data, stdio=stdio) - if resp.code == 200: - return resp.content - else: - msg = resp.content - if 'error' in resp.content and 'message' in resp.content['error']: - msg = resp.content['error']['message'] - raise Exception("takeover precheck failed: %s" % msg) - - def compute_host_types(self, data, stdio=None): - resp = self._request('POST', '/api/v2/compute/hostTypes', data=data, stdio=stdio) - if resp.code == 200: - return resp.content - - def profiles_credentials(self, data, stdio=None): - resp = self._request('POST', '/api/v2/profiles/me/credentials', data=data, stdio=stdio) - if resp.code == 200: - return resp.content - - def take_over(self, data, stdio=None): - resp = self._request('POST', '/api/v2/ob/clusters/takeOver', data=data, stdio=stdio) - if resp.code == 200: - return resp.content - - def _request(self, method, api, data=None, retry=5, stdio=None): - url = self.base_url + api - headers = {"Content-Type": "application/json"} - try: - if data is not None: - data = json.dumps(data) - stdio.verbose('send http request method: {}, url: {}, data: {}'.format(method, url, data)) - resp = requests.request(method, url, data=data, verify=False, headers=headers, auth=self.auth) - return_code = resp.status_code - content = resp.content - except Exception as e: - if retry: - retry -= 1 - return self._request(method=method, api=api, data=data, retry=retry, stdio=stdio) - stdio.exception("") - return_code = 500 - content = str(e) - if return_code != 200: - stdio.verbose("request ocp-server failed: %s" % content) - try: - content = json.loads(content.decode()) - except: - pass - return self.Response(code=return_code, content=content) - -def takeover_precheck(plugin_context, *args, **kwargs): - stdio = plugin_context.stdio +def takeover_precheck(plugin_context, cursors=None, *args, **kwargs): try: - _do_takeover_precheck(plugin_context, *args, **kwargs) + # init variables, include get obcluster info from deploy config + cluster_config = plugin_context.cluster_config + clients = plugin_context.clients + options = plugin_context.options + stdio = plugin_context.stdio + stdio.verbose(vars(cluster_config)) + cursors = plugin_context.get_return('connect').get_return('cursor') if not cursors else cursors + cursor = cursors[cluster_config.servers[0]] + ocp_info = cursor.info(stdio=stdio) + stdio.verbose("get ocp info %s", ocp_info) + ocp_version = Version(ocp_info['buildVersion'].split("_")[0]) + if ocp_version < Version("4.2.0"): + stdio.error("unable to export obcluster to ocp, ocp version must be at least 4.2.0") + return plugin_context.return_false(ocp_version=ocp_version) + server = cluster_config.servers[0] + mysql_port = cluster_config.get_global_conf().get("mysql_port") + root_password = cluster_config.get_global_conf().get("root_password") + if root_password is None or root_password == "": + stdio.error("unable to export obcluster to ocp, root password is empty") + return plugin_context.return_false(ocp_version=ocp_version) + precheck_data = {"connectionMode": "direct", "address": server.ip, "port": mysql_port, + "rootSysPassword": root_password} + proxyro_password = cluster_config.get_global_conf().get("proxyro_password") + if proxyro_password is not None and proxyro_password != "": + precheck_data.update({"proxyroPassword": proxyro_password}) + precheck_result = cursor.take_over_precheck(precheck_data, stdio=stdio) + stdio.verbose("precheck result %s" % precheck_result) + return plugin_context.return_true(ocp_version=ocp_version) except Exception as ex: stdio.error("do takeover precheck got exception:%s", ex) - return plugin_context.return_false() - -def _do_takeover_precheck(plugin_context, *args, **kwargs): - # init variables, include get obcluster info from deploy config - cluster_config = plugin_context.cluster_config - clients = plugin_context.clients - options = plugin_context.options - stdio = plugin_context.stdio - stdio.verbose(vars(cluster_config)) - address = getattr(options, 'address', '') - user = getattr(options, 'user', '') - password = getattr(options, 'password', '') - ocp_cursor = OcpCursor(base_url=address, username=user, password=password) - ocp_info = ocp_cursor.info(stdio=stdio) - stdio.verbose("get ocp info %s", ocp_info) - ocp_version = Version(ocp_info['buildVersion'].split("_")[0]) - if ocp_version < Version("4.2.0"): - stdio.error("unable to export obcluster to ocp, ocp version must be at least 4.2.0") - return plugin_context.return_false(ocp_version=ocp_version) - server = cluster_config.servers[0] - mysql_port = cluster_config.get_global_conf().get("mysql_port") - root_password = cluster_config.get_global_conf().get("root_password") - if root_password is None or root_password == "": - stdio.error("unable to export obcluster to ocp, root password is empty") - return plugin_context.return_false(ocp_version=ocp_version) - precheck_data = {"connectionMode":"direct","address":server.ip,"port":mysql_port,"rootSysPassword":root_password} - proxyro_password = cluster_config.get_global_conf().get("proxyro_password") - if proxyro_password is not None and proxyro_password != "": - precheck_data.update({"proxyroPassword": proxyro_password}) - precheck_result = ocp_cursor.take_over_precheck(precheck_data, stdio=stdio) - stdio.verbose("precheck result %s" % precheck_result) - return plugin_context.return_true(ocp_version=ocp_version) + return plugin_context.return_false() \ No newline at end of file diff --git a/plugins/ocp-server/4.2.1/upgrade.py b/plugins/ocp-server/4.2.1/upgrade.py index 6a6f49a..b62fd4e 100644 --- a/plugins/ocp-server/4.2.1/upgrade.py +++ b/plugins/ocp-server/4.2.1/upgrade.py @@ -37,8 +37,6 @@ def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args, dev_mode = plugin_context.dev_mode stdio = plugin_context.stdio upgrade_repositories = kwargs.get('upgrade_repositories') - sys_cursor = kwargs.get('sys_cursor') - metadb_cursor = kwargs.get('metadb_cursor') cur_repository = upgrade_repositories[0] dest_repository = upgrade_repositories[-1] @@ -69,7 +67,7 @@ def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args, if not init_plugin(namespace, namespaces, deploy_name, deploy_status, repositories, components, clients, cluster_config, cmds, options, stdio, upgrade=True, *args, **kwargs): return plugin_context.return_false() - if not start_plugin(namespace, namespaces, deploy_name, deploy_status, repositories, components, clients, cluster_config, cmds, options, stdio, sys_cursor1=sys_cursor, cursor=metadb_cursor, without_parameter=True, *args, **kwargs): + if not start_plugin(namespace, namespaces, deploy_name, deploy_status, repositories, components, clients, cluster_config, cmds, options, stdio, without_parameter=True, *args, **kwargs): return plugin_context.return_false() ret = connect_plugin(namespace, namespaces, deploy_name, deploy_status, repositories, components, clients, cluster_config, cmds, options, stdio, *args, **kwargs) diff --git a/plugins/ocp-server/4.2.1/upgrade_check.py b/plugins/ocp-server/4.2.1/upgrade_check.py index 7b38e10..2251bff 100644 --- a/plugins/ocp-server/4.2.1/upgrade_check.py +++ b/plugins/ocp-server/4.2.1/upgrade_check.py @@ -20,6 +20,7 @@ from __future__ import absolute_import, division, print_function +import os import re import time @@ -27,7 +28,7 @@ from _rpm import Version -def upgrade_check(plugin_context, meta_cursor, database='meta_database', init_check_status=False, *args, **kwargs): +def upgrade_check(plugin_context, meta_cursor, database='meta_database', init_check_status=False, java_check=True, *args, **kwargs): def check_pass(item): status = check_status[server] if status[item].status == err.CheckStatus.WAIT: @@ -73,35 +74,37 @@ def critical(item, error, suggests=[]): if init_check_status: return plugin_context.return_true(start_check_status=check_status) - stdio.start_loading('Check before upgrade ocp-server') + stdio.start_loading('Check before upgrade %s' % cluster_config.name) success = True - check_pass('java') for server in cluster_config.servers: + check_pass('java') client = clients[server] server_config = cluster_config.get_server_conf_with_default(server) try: # java version check - stdio.verbose('java check ') - java_bin = server_config.get('java_bin', '/usr/bin/java') - ret = client.execute_command('%s -version' % java_bin if not server_config.get('launch_user', None) else 'sudo su - %s -c "%s -version"' % (server_config.get('launch_user', None), java_bin)) - stdio.verbose('java version %s' % ret) - if not ret: - critical('java', err.EC_OCP_SERVER_JAVA_NOT_FOUND.format(server=server), - [err.SUG_OCP_SERVER_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0')]) - version_pattern = r'version\s+\"(\d+\.\d+\.\d+)(\_\d+)' - found = re.search(version_pattern, ret.stdout) or re.search(version_pattern, ret.stderr) - if not found: - error('java', err.EC_OCP_SERVER_JAVA_VERSION_ERROR.format(server=server, version='1.8.0'), - [err.SUG_OCP_SERVER_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0'), ]) - else: - java_major_version = found.group(1) - stdio.verbose('java_major_version %s' % java_major_version) - java_update_version = found.group(2)[1:] - stdio.verbose('java_update_version %s' % java_update_version) - if Version(java_major_version) != Version('1.8.0') or int(java_update_version) < 161: - critical('java', err.EC_OCP_SERVER_JAVA_VERSION_ERROR.format(server=server, version='1.8.0'), - [err.SUG_OCP_SERVER_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0'), ]) + if java_check: + stdio.verbose('java check ') + java_bin = server_config.get('java_bin', '/usr/bin/java') + client.add_env('PATH', '%s/jre/bin:' % server_config['home_path']) + ret = client.execute_command('%s -version' % java_bin if not server_config.get('launch_user', None) else 'sudo su - %s -c "%s -version"' % (server_config.get('launch_user', None), java_bin)) + stdio.verbose('java version %s' % ret) + if not ret: + critical('java', err.EC_OCP_SERVER_JAVA_NOT_FOUND.format(server=server), + [err.SUG_OCP_SERVER_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0')]) + version_pattern = r'version\s+\"(\d+\.\d+\.\d+)(\_\d+)' + found = re.search(version_pattern, ret.stdout) or re.search(version_pattern, ret.stderr) + if not found: + error('java', err.EC_OCP_SERVER_JAVA_VERSION_ERROR.format(server=server, version='1.8.0'), + [err.SUG_OCP_SERVER_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0'), ]) + else: + java_major_version = found.group(1) + stdio.verbose('java_major_version %s' % java_major_version) + java_update_version = found.group(2)[1:] + stdio.verbose('java_update_version %s' % java_update_version) + if Version(java_major_version) != Version('1.8.0') or int(java_update_version) < 161: + critical('java', err.EC_OCP_SERVER_JAVA_VERSION_ERROR.format(server=server, version='1.8.0'), + [err.SUG_OCP_SERVER_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0'), ]) except Exception as e: stdio.error(e) error('java', err.EC_OCP_SERVER_JAVA_VERSION_ERROR.format(server=server, version='1.8.0'), @@ -122,6 +125,10 @@ def critical(item, error, suggests=[]): success = False error('metadb_version', err.EC_OCP_SERVER_METADB_VERSION) + bootstrap_flag = os.path.join(server_config['home_path'], '.bootstrapped') + if not client.execute_command('ls %s' % bootstrap_flag): + client.execute_command('touch %s' % bootstrap_flag) + if success: stdio.stop_loading('succeed') return plugin_context.return_true() diff --git a/plugins/ocp-server/4.2.2/start_check.py b/plugins/ocp-server/4.2.2/start_check.py new file mode 100644 index 0000000..28ab95e --- /dev/null +++ b/plugins/ocp-server/4.2.2/start_check.py @@ -0,0 +1,584 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function + +import re +import os +import time +import datetime + +from copy import deepcopy + +from _deploy import DeployStatus +from _rpm import Version +import _errno as err +from tool import Cursor +from _types import Capacity + + +success = True + + +def get_missing_required_parameters(parameters): + results = [] + for key in ["jdbc_url"]: + if parameters.get(key) is None: + results.append(key) + return results + + +def get_port_socket_inode(client, port): + port = hex(port)[2:].zfill(4).upper() + cmd = "bash -c 'cat /proc/net/{udp*,tcp*}' | awk -F' ' '{if($4==\"0A\") print $2,$4,$10}' | grep ':%s' | awk -F' ' '{print $3}' | uniq" % port + res = client.execute_command(cmd) + if not res or not res.stdout.strip(): + return False + return res.stdout.strip().split('\n') + + +def password_check(password): + ocp_supported_special_characters = set('~!@#%^&*_-+=|(){}[]:;,.?/$`\'"\\<>') + if not password or len(password) > 32 or len(password) < 8: + return False + digit_count, lower_count, upper_count, special_count, all_char_legal = 0, 0, 0, 0, True + for c in password: + if c.isdigit(): + digit_count = 1 + elif c.islower(): + lower_count = 1 + elif c.isupper(): + upper_count = 1 + elif c in ocp_supported_special_characters: + special_count = 1 + else: + all_char_legal = False + break + if all_char_legal and digit_count + lower_count + upper_count + special_count >= 3: + return True + else: + return False + + +def get_mount_path(disk, _path): + _mount_path = '/' + for p in disk: + if p in _path: + if len(p) > len(_mount_path): + _mount_path = p + return _mount_path + + +def get_disk_info_by_path(path, client, stdio): + disk_info = {} + ret = client.execute_command('df --block-size=1024 {}'.format(path)) + if ret: + for total, used, avail, puse, path in re.findall(r'(\d+)\s+(\d+)\s+(\d+)\s+(\d+%)\s+(.+)', ret.stdout): + disk_info[path] = {'total': int(total) << 10, 'avail': int(avail) << 10, 'need': 0} + stdio.verbose('get disk info for path {}, total: {} avail: {}'.format(path, disk_info[path]['total'], disk_info[path]['avail'])) + return disk_info + + +def get_disk_info(all_paths, client, stdio): + overview_ret = True + disk_info = get_disk_info_by_path('', client, stdio) + if not disk_info: + overview_ret = False + disk_info = get_disk_info_by_path('/', client, stdio) + if not disk_info: + disk_info['/'] = {'total': 0, 'avail': 0, 'need': 0} + all_path_success = {} + for path in all_paths: + all_path_success[path] = False + cur_path = path + while cur_path not in disk_info: + disk_info_for_current_path = get_disk_info_by_path(cur_path, client, stdio) + if disk_info_for_current_path: + disk_info.update(disk_info_for_current_path) + all_path_success[path] = True + break + else: + cur_path = os.path.dirname(cur_path) + if overview_ret or all(all_path_success.values()): + return disk_info + + +def get_ocp_depend_config(cluster_config, stdio): + # depends config + env = {} + depend_observer = False + depend_info = {} + ob_servers_conf = {} + for comp in ["oceanbase", "oceanbase-ce"]: + ob_zones = {} + if comp in cluster_config.depends: + depend_observer = True + ob_servers = cluster_config.get_depend_servers(comp) + for ob_server in ob_servers: + ob_servers_conf[ob_server] = ob_server_conf = cluster_config.get_depend_config(comp, ob_server) + if 'server_ip' not in depend_info: + depend_info['server_ip'] = ob_server.ip + depend_info['mysql_port'] = ob_server_conf['mysql_port'] + depend_info['meta_tenant'] = ob_server_conf['ocp_meta_tenant']['tenant_name'] + depend_info['meta_user'] = ob_server_conf['ocp_meta_username'] + depend_info['meta_password'] = ob_server_conf['ocp_meta_password'] + depend_info['meta_db'] = ob_server_conf['ocp_meta_db'] + depend_info['monitor_tenant'] = ob_server_conf['ocp_monitor_tenant']['tenant_name'] + depend_info['monitor_user'] = ob_server_conf['ocp_monitor_username'] + depend_info['monitor_password'] = ob_server_conf['ocp_monitor_password'] + depend_info['monitor_db'] = ob_server_conf['ocp_monitor_db'] + zone = ob_server_conf['zone'] + if zone not in ob_zones: + ob_zones[zone] = ob_server + break + for comp in ['obproxy', 'obproxy-ce']: + if comp in cluster_config.depends: + obproxy_servers = cluster_config.get_depend_servers(comp) + obproxy_server = obproxy_servers[0] + obproxy_server_config = cluster_config.get_depend_config(comp, obproxy_server) + depend_info['server_ip'] = obproxy_server.ip + depend_info['mysql_port'] = obproxy_server_config['listen_port'] + break + + for server in cluster_config.servers: + default_server_config = deepcopy(cluster_config.get_server_conf_with_default(server)) + server_config = deepcopy(cluster_config.get_server_conf(server)) + original_server_config = cluster_config.get_original_server_conf_with_global(server) + missed_keys = get_missing_required_parameters(original_server_config) + if missed_keys: + if 'jdbc_url' in missed_keys and depend_observer: + default_server_config['jdbc_url'] = 'jdbc:oceanbase://{}:{}/{}'.format(depend_info['server_ip'], depend_info['mysql_port'], depend_info['meta_db'] if not original_server_config.get('ocp_meta_db', None) else original_server_config['ocp_meta_db']) if not original_server_config.get('jdbc_url', None) else original_server_config['jdbc_url'] + default_server_config['ocp_meta_username'] = depend_info['meta_user'] if not original_server_config.get('ocp_meta_username', None) else original_server_config['ocp_meta_username'] + default_server_config['ocp_meta_tenant']['tenant_name'] = depend_info['meta_tenant'] if not original_server_config.get('ocp_meta_tenant', None) else original_server_config['ocp_meta_tenant']['tenant_name'] + default_server_config['ocp_meta_password'] = depend_info['meta_password'] if not original_server_config.get('ocp_meta_password', None) else original_server_config['ocp_meta_password'] + default_server_config['ocp_meta_db'] = depend_info['meta_db'] if not original_server_config.get('ocp_meta_db', None) else original_server_config['ocp_meta_db'] + default_server_config['ocp_monitor_username'] = depend_info['monitor_user'] if not original_server_config.get('ocp_monitor_username', None) else original_server_config['ocp_monitor_username'] + default_server_config['ocp_monitor_tenant']['tenant_name'] = depend_info['monitor_tenant'] if not original_server_config.get('ocp_monitor_tenant', None) else original_server_config['ocp_monitor_tenant']['tenant_name'] + default_server_config['ocp_monitor_password'] = depend_info['monitor_password'] if not original_server_config.get('ocp_monitor_password', None) else original_server_config['ocp_monitor_password'] + default_server_config['ocp_monitor_db'] = depend_info['monitor_db'] if not original_server_config.get('ocp_monitor_db', None) else original_server_config['ocp_monitor_db'] + env[server] = default_server_config + return env + +def execute_cmd(server_config, cmd): + return cmd if not server_config.get('launch_user', None) else 'sudo ' + cmd + + +def start_check(plugin_context, init_check_status=False, work_dir_check=False, work_dir_empty_check=True, strict_check=False, precheck=False, + source_option="start", java_check=True, *args, **kwargs): + + def check_pass(item): + status = check_status[server] + if status[item].status == err.CheckStatus.WAIT: + status[item].status = err.CheckStatus.PASS + def check_fail(item, error, suggests=[]): + status = check_status[server][item] + if status.status == err.CheckStatus.WAIT: + status.error = error + status.suggests = suggests + status.status = err.CheckStatus.FAIL + def wait_2_pass(): + status = check_status[server] + for item in status: + check_pass(item) + def alert(item, error, suggests=[]): + global success + if strict_check: + success = False + check_fail(item, error, suggests) + stdio.error(error) + else: + stdio.warn(error) + def error(item, _error, suggests=[]): + global success + if plugin_context.dev_mode: + stdio.warn(_error) + else: + success = False + check_fail(item, _error, suggests) + stdio.error(_error) + def critical(item, error, suggests=[]): + global success + success = False + check_fail(item, error, suggests) + stdio.error(error) + def get_option(key, default=''): + value = getattr(options, key, default) + if not value: + value = default + return value + + cluster_config = plugin_context.cluster_config + options = plugin_context.options + clients = plugin_context.clients + stdio = plugin_context.stdio + deploy_status = plugin_context.deploy_status + global success + success = True + + check_status = {} + plugin_context.set_variable('start_check_status', check_status) + for server in cluster_config.servers: + check_status[server] = { + 'metadb connect': err.CheckStatus(), + 'port': err.CheckStatus(), + 'java': err.CheckStatus(), + 'disk': err.CheckStatus(), + 'mem': err.CheckStatus(), + 'oceanbase version': err.CheckStatus(), + 'time check': err.CheckStatus(), + 'launch user': err.CheckStatus(), + 'sudo nopasswd': err.CheckStatus(), + 'tenant': err.CheckStatus(), + 'clockdiff': err.CheckStatus(), + 'admin_password': err.CheckStatus() + } + if work_dir_check: + check_status[server]['dir'] = err.CheckStatus() + if init_check_status: + return plugin_context.return_true(start_check_status=check_status) + + stdio.start_loading('Check before start ocp-server') + env = get_ocp_depend_config(cluster_config, stdio) + if not env: + return plugin_context.return_false() + + stdio.verbose('oceanbase version check') + versions_check = { + "oceanbase version": { + 'comps': ['oceanbase', 'oceanbase-ce'], + 'min_version': Version('4.0') + }, + } + repo_versions = {} + for repository in plugin_context.repositories: + repo_versions[repository.name] = repository.version + + for check_item in versions_check: + for comp in versions_check[check_item]['comps']: + if comp not in cluster_config.depends: + continue + depend_comp_version = repo_versions.get(comp) + if depend_comp_version is None: + stdio.verbose('failed to get {} version, skip version check'.format(comp)) + continue + min_version = versions_check[check_item]['min_version'] + if depend_comp_version < min_version: + critical(check_item, err.EC_OCP_SERVER_DEPENDS_COMP_VERSION.format(ocp_server_version=cluster_config.version, comp=comp, comp_version=min_version)) + + server_port = {} + servers_dirs = {} + servers_check_dirs = {} + for server in cluster_config.servers: + client = clients[server] + + if not (client.execute_command('sudo -n true') or client.execute_command('[ `id -u` == "0" ]')): + critical('sudo nopasswd', err.EC_OCP_SERVER_SUDO_NOPASSWD.format(ip=str(server), user=client.config.username), + [err.SUG_OCP_SERVER_SUDO_NOPASSWD.format(ip=str(server), user=client.config.username)]) + server_config = env[server] + missed_keys = get_missing_required_parameters(server_config) + if missed_keys: + stdio.error(err.EC_NEED_CONFIG.format(server=server, component=cluster_config.name, miss_keys=missed_keys)) + success = False + home_path = server_config['home_path'] + if not precheck: + remote_pid_path = '%s/run/ocp-server.pid' % home_path + remote_pid = client.execute_command(execute_cmd(server_config, 'cat %s' % remote_pid_path)).stdout.strip() + if remote_pid: + if client.execute_command(execute_cmd(server_config, 'ls /proc/%s' % remote_pid)): + stdio.verbose('%s is running, skip' % server) + wait_2_pass() + continue + + if not cluster_config.depends and not precheck: + # check meta db connect before start + jdbc_url = server_config['jdbc_url'] + matched = re.match(r"^jdbc:\S+://(\S+?)(|:\d+)/(\S+)", jdbc_url) + cursor = getattr(options, 'metadb_cursor', '') + cursor = kwargs.get('metadb_cursor', '') if cursor == '' else cursor + stdio.verbose('metadb connect check') + if matched: + jdbc_host = matched.group(1) + jdbc_port = matched.group(2)[1:] + jdbc_database = matched.group(3) + connected = False + retries = 10 + while not connected and retries: + retries -= 1 + try: + cursor = Cursor(ip=jdbc_host, port=jdbc_port, user="{0}@{1}".format(server_config['ocp_meta_username'], server_config['ocp_meta_tenant']['tenant_name']), password=server_config['ocp_meta_password'], + stdio=stdio) + connected = True + stdio.verbose('check cursor passed') + except: + stdio.verbose('check cursor failed') + time.sleep(1) + if not connected: + success = False + error('metadb connect', err.EC_OCP_SERVER_CONNECT_METADB, [err.SUG_OCP_SERVER_JDBC_URL_CONFIG_ERROR]) + else: + critical('metadb connect', err.EC_OCP_SERVER_ERROR_JDBC_URL, [err.SUG_OCP_SERVER_JDBC_URL_CONFIG_ERROR]) + client = clients[server] + # time check + stdio.verbose('time check ') + now = client.execute_command('date +"%Y-%m-%d %H:%M:%S"').stdout.strip() + now = datetime.datetime.strptime(now, '%Y-%m-%d %H:%M:%S') + stdio.verbose('now: %s' % now) + stdio.verbose('cursor: %s' % cursor) + if cursor: + ob_time = cursor.fetchone("SELECT NOW() now")['now'] + stdio.verbose('ob_time: %s' % ob_time) + if not abs((now - ob_time).total_seconds()) < 180: + critical('time check', err.EC_OCP_SERVER_TIME_SHIFT.format(server=server)) + + # user check + stdio.verbose('user check ') + ocp_user = server_config.get('launch_user', '') + if ocp_user: + client = clients[server] + if not client.execute_command(execute_cmd(server_config, "id -u %s" % ocp_user)): + critical('launch user', err.EC_OCP_SERVER_LAUNCH_USER_NOT_EXIST.format(server=server, user=ocp_user)) + + if work_dir_check: + ip = server.ip + stdio.verbose('%s dir check' % server) + if ip not in servers_dirs: + servers_dirs[ip] = {} + servers_check_dirs[ip] = {} + dirs = servers_dirs[ip] + check_dirs = servers_check_dirs[ip] + original_server_conf = cluster_config.get_server_conf(server) + + keys = ['home_path', 'log_dir', 'soft_dir'] + for key in keys: + path = server_config.get(key) + suggests = [err.SUG_CONFIG_CONFLICT_DIR.format(key=key, server=server)] + if path in dirs and dirs[path]: + critical('dir', err.EC_CONFIG_CONFLICT_DIR.format(server1=server, path=path, server2=dirs[path]['server'], key=dirs[path]['key']), suggests) + dirs[path] = { + 'server': server, + 'key': key, + } + if key not in original_server_conf: + continue + empty_check = work_dir_empty_check + while True: + if path in check_dirs: + if check_dirs[path] != True: + critical('dir', check_dirs[path], suggests) + break + + if client.execute_command(execute_cmd(server_config, 'bash -c "[ -a %s ]"' % path)): + is_dir = client.execute_command(execute_cmd(server_config, '[ -d {} ]'.format(path))) + has_write_permission = client.execute_command(execute_cmd(server_config, '[ -w {} ]'.format(path))) + if is_dir and has_write_permission: + if empty_check: + check_privilege_cmd = "ls %s" % path + if server_config.get('launch_user', ''): + check_privilege_cmd = "sudo su - %s -c 'ls %s'" % (server_config['launch_user'], path) + ret = client.execute_command(check_privilege_cmd) + if not ret: + check_dirs[path] = err.EC_OCP_SERVER_DIR_ACCESS_FORBIDE.format(server=server, path=path, cur_path=path) + elif ret.stdout.strip(): + check_dirs[path] = err.EC_FAIL_TO_INIT_PATH.format(server=server, key=key, msg=err.InitDirFailedErrorMessage.NOT_EMPTY.format(path=path)) + else: + check_dirs[path] = True + else: + check_dirs[path] = True + else: + if not is_dir: + check_dirs[path] = err.EC_FAIL_TO_INIT_PATH.format(server=server, key=key, msg=err.InitDirFailedErrorMessage.NOT_DIR.format(path=path)) + else: + check_dirs[path] = err.EC_FAIL_TO_INIT_PATH.format(server=server, key=key, msg=err.InitDirFailedErrorMessage.PERMISSION_DENIED.format(path=path)) + else: + path = os.path.dirname(path) + empty_check = False + + stdio.verbose('port check ') + port = server_config['port'] + ip = server.ip + if ip not in server_port: + server_port[ip] = {} + ports = server_port[ip] + if port in server_port[ip]: + critical( + 'port', + err.EC_CONFIG_CONFLICT_PORT.format(server1=server, port=port, server2=ports[port]['server'], + key=ports[port]['key']), + [err.SUG_PORT_CONFLICTS.format()] + ) + ports[port] = { + 'server': server, + 'key': 'port' + } + if source_option == 'start' and get_port_socket_inode(client, port): + critical( + 'port', + err.EC_CONFLICT_PORT.format(server=ip, port=port), + [err.SUG_USE_OTHER_PORT.format()] + ) + check_pass('port') + + try: + # java version check + if java_check: + stdio.verbose('java check ') + java_bin = server_config.get('java_bin', '/usr/bin/java') + client.add_env('PATH', '%s/jre/bin:' % server_config['home_path']) + ret = client.execute_command(execute_cmd(server_config, '{} -version'.format(java_bin))) + stdio.verbose('java version %s' % ret) + if not ret: + critical('java', err.EC_OCP_SERVER_JAVA_NOT_FOUND.format(server=server), [err.SUG_OCP_SERVER_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0')]) + version_pattern = r'version\s+\"(\d+\.\d+\.\d+)(\_\d+)' + found = re.search(version_pattern, ret.stdout) or re.search(version_pattern, ret.stderr) + if not found: + error('java', err.EC_OCP_SERVER_JAVA_VERSION_ERROR.format(server=server, version='1.8.0'), [err.SUG_OCP_SERVER_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0'),]) + else: + java_major_version = found.group(1) + stdio.verbose('java_major_version %s' % java_major_version) + java_update_version = found.group(2)[1:] + stdio.verbose('java_update_version %s' % java_update_version) + if Version(java_major_version) != Version('1.8.0') or int(java_update_version) < 161: + critical('java', err.EC_OCP_SERVER_JAVA_VERSION_ERROR.format(server=server, version='1.8.0'), [err.SUG_OCP_SERVER_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0'),]) + except Exception as e: + stdio.error(e) + error('java', err.EC_OCP_SERVER_JAVA_VERSION_ERROR.format(server=server, version='1.8.0'), + [err.SUG_OCP_SERVER_INSTALL_JAVA_WITH_VERSION.format(version='1.8.0'), ]) + + try: + # clockdiff status check + stdio.verbose('clockdiff check ') + clockdiff_cmd = 'clockdiff -o 127.0.0.1' + if client.execute_command(clockdiff_cmd): + check_pass('clockdiff') + else: + if not client.execute_command('sudo -n true'): + critical('clockdiff', err.EC_OCP_SERVER_CLOCKDIFF_NOT_EXISTS.format(server=server)) + ret = client.execute_command('sudo ' + clockdiff_cmd) + if not ret: + critical('clockdiff', err.EC_OCP_SERVER_CLOCKDIFF_NOT_EXISTS.format(server=server)) + + clockdiff_bin = 'type -P clockdiff' + res = client.execute_command(clockdiff_bin).stdout + client.execute_command('sudo chmod u+s %s' % res) + client.execute_command("sudo setcap 'cap_net_raw+ep' %s" % res) + except Exception as e: + stdio.error(e) + critical('clockdiff', err.EC_OCP_SERVER_CLOCKDIFF_NOT_EXISTS.format(server=server)) + + servers_memory = {} + servers_disk = {} + servers_client = {} + ip_servers = {} + MIN_MEMORY_VALUE = 1073741824 + + memory_size = Capacity(server_config.get('memory_size', '1G')).btyes + if server_config.get('log_dir'): + log_dir = server_config['log_dir'] + else: + log_dir = os.path.join(server_config['home_path'], 'log') + need_size = Capacity(server_config.get('logging_file_total_size_cap', '1G')).btyes + ip = server.ip + if ip not in servers_client: + servers_client[ip] = client + if ip not in servers_memory: + servers_memory[ip] = { + 'need': memory_size, + 'server_num': 1 + } + else: + servers_memory[ip]['need'] += memory_size + servers_memory[ip]['server_num'] += 1 + if ip not in servers_disk: + servers_disk[ip] = {} + if log_dir not in servers_disk[ip]: + servers_disk[ip][log_dir] = need_size + else: + servers_disk[ip][log_dir] += need_size + if ip not in ip_servers: + ip_servers[ip] = [server] + else: + ip_servers[ip].append(server) + # memory check + stdio.verbose('memory check ') + for ip in servers_memory: + client = servers_client[ip] + memory_needed = servers_memory[ip]['need'] + ret = client.execute_command('cat /proc/meminfo') + if ret: + server_memory_stats = {} + memory_key_map = { + 'MemTotal': 'total', + 'MemFree': 'free', + 'MemAvailable': 'available', + 'Buffers': 'buffers', + 'Cached': 'cached' + } + for key in memory_key_map: + server_memory_stats[memory_key_map[key]] = 0 + + for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): + if k in memory_key_map: + key = memory_key_map[k] + server_memory_stats[key] = Capacity(str(v)).btyes + mem_suggests = [err.SUG_OCP_SERVER_REDUCE_MEM.format()] + if memory_needed > server_memory_stats['available']: + for server in ip_servers[ip]: + error('mem', err.EC_OCP_SERVER_NOT_ENOUGH_MEMORY_AVAILABLE.format(ip=ip, available=Capacity(server_memory_stats['available']), need=Capacity(memory_needed)), suggests=mem_suggests) + elif memory_needed > server_memory_stats['free'] + server_memory_stats['buffers'] + server_memory_stats['cached']: + for server in ip_servers[ip]: + error('mem', err.EC_OCP_SERVER_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=Capacity(server_memory_stats['free']), cached=Capacity(server_memory_stats['buffers'] + server_memory_stats['cached']), need=Capacity(memory_needed)), suggests=mem_suggests) + elif server_memory_stats['free'] < MIN_MEMORY_VALUE: + for server in ip_servers[ip]: + alert('mem', err.EC_OCP_SERVER_NOT_ENOUGH_MEMORY.format(ip=ip, free=Capacity(server_memory_stats['free']), need=Capacity(memory_needed)), suggests=mem_suggests) + # disk check + stdio.verbose('disk check ') + for ip in servers_disk: + client = servers_client[ip] + disk_info = get_disk_info(all_paths=servers_disk[ip], client=client, stdio=stdio) + if disk_info: + for path in servers_disk[ip]: + disk_needed = servers_disk[ip][path] + mount_path = get_mount_path(disk_info, path) + if disk_needed > disk_info[mount_path]['avail']: + for server in ip_servers[ip]: + error('disk', err.EC_OCP_SERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=mount_path, need=Capacity(disk_needed), avail=Capacity(disk_info[mount_path]['avail'])), suggests=[err.SUG_OCP_SERVER_REDUCE_DISK.format()]) + else: + stdio.warn(err.WC_OCP_SERVER_FAILED_TO_GET_DISK_INFO.format(ip)) + + # admin_passwd check + bootstrap_flag = os.path.join(home_path, '.bootstrapped') + if deploy_status == DeployStatus.STATUS_DEPLOYED and not client.execute_command('ls %s' % bootstrap_flag) and not get_option('skip_password_check', False): + for server in cluster_config.servers: + server_config = env[server] + admin_passwd = server_config['admin_password'] + if not admin_passwd or not password_check(admin_passwd): + error('admin_password', err.EC_COMPONENT_PASSWD_ERROR.format(ip=server.ip, component='ocp', key='admin_password', rule='Must be 8 to 32 characters in length, containing at least 3 types from digits, lowercase letters, uppercase letters and the following special characters: ~!@#%^&*_-+=|(){}[]:;,.?/$`\'"\\<>'), suggests=[err.SUG_OCP_SERVER_EDIT_ADMIN_PASSWD_ERROR.format()]) + + plugin_context.set_variable('start_env', env) + + for server in cluster_config.servers: + wait_2_pass() + + if success: + stdio.stop_loading('succeed') + return plugin_context.return_true() + else: + stdio.stop_loading('fail') + return plugin_context.return_false() diff --git a/plugins/openjdk-jre/1.8.0_322/file_map.yaml b/plugins/openjdk-jre/1.8.0_322/file_map.yaml new file mode 100644 index 0000000..21b3268 --- /dev/null +++ b/plugins/openjdk-jre/1.8.0_322/file_map.yaml @@ -0,0 +1,3 @@ +- src_path: jre + target_path: '' + type: dir diff --git a/plugins/sysbench/3.1.0/pre_test.py b/plugins/sysbench/3.1.0/pre_test.py index 3098562..04886dc 100644 --- a/plugins/sysbench/3.1.0/pre_test.py +++ b/plugins/sysbench/3.1.0/pre_test.py @@ -21,43 +21,14 @@ from __future__ import absolute_import, division, print_function import os -import re import subprocess from ssh import LocalClient +from _types import Capacity stdio = None -def parse_size(size): - _bytes = 0 - if not isinstance(size, str) or size.isdigit(): - _bytes = int(size) - else: - units = {"B": 1, "K": 1 << 10, "M": 1 << 20, "G": 1 << 30, "T": 1 << 40} - match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) - _bytes = int(match.group(1)) * units[match.group(2)] - return _bytes - - -def format_size(size, precision=1): - units = ['B', 'K', 'M', 'G'] - units_num = len(units) - 1 - idx = 0 - if precision: - div = 1024.0 - format = '%.' + str(precision) + 'f%s' - limit = 1024 - else: - div = 1024 - limit = 1024 - format = '%d%s' - while idx < units_num and size >= limit: - size /= div - idx += 1 - return format % (size, units[idx]) - - def pre_test(plugin_context, cursor, *args, **kwargs): def get_option(key, default=''): value = getattr(options, key, default) @@ -137,7 +108,7 @@ def get_option(key, default=''): if ret: server_num = ret.get("server_num", server_num) return plugin_context.return_true( - max_cpu=max_cpu, threads=threads, parse_size=parse_size, tenant=tenant_name, tenant_id=tenant_meta['tenant_id'], - format_size=format_size, server_num=server_num, obclient_bin=obclient_bin, host=host, port=port, user=user, + max_cpu=max_cpu, threads=threads, Capacity=Capacity, tenant=tenant_name, tenant_id=tenant_meta['tenant_id'], + format_size=Capacity, server_num=server_num, obclient_bin=obclient_bin, host=host, port=port, user=user, password=password, database=mysql_db ) \ No newline at end of file diff --git a/plugins/sysbench/4.0.0.0/pre_test.py b/plugins/sysbench/4.0.0.0/pre_test.py index 6c1a52f..43a7245 100644 --- a/plugins/sysbench/4.0.0.0/pre_test.py +++ b/plugins/sysbench/4.0.0.0/pre_test.py @@ -21,43 +21,14 @@ from __future__ import absolute_import, division, print_function import os -import re import subprocess from ssh import LocalClient +from _types import Capacity stdio = None -def parse_size(size): - _bytes = 0 - if not isinstance(size, str) or size.isdigit(): - _bytes = int(size) - else: - units = {"B": 1, "K": 1 << 10, "M": 1 << 20, "G": 1 << 30, "T": 1 << 40} - match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) - _bytes = int(match.group(1)) * units[match.group(2)] - return _bytes - - -def format_size(size, precision=1): - units = ['B', 'K', 'M', 'G'] - units_num = len(units) - 1 - idx = 0 - if precision: - div = 1024.0 - format = '%.' + str(precision) + 'f%s' - limit = 1024 - else: - div = 1024 - limit = 1024 - format = '%d%s' - while idx < units_num and size >= limit: - size /= div - idx += 1 - return format % (size, units[idx]) - - def pre_test(plugin_context, cursor, *args, **kwargs): def get_option(key, default=''): value = getattr(options, key, default) @@ -141,7 +112,7 @@ def get_option(key, default=''): if ret: server_num = ret.get("server_num", server_num) return plugin_context.return_true( - max_cpu=max_cpu, threads=threads, parse_size=parse_size, tenant=tenant_name, tenant_id=tenant_meta['TENANT_ID'], - format_size=format_size, server_num=server_num, obclient_bin=obclient_bin, host=host, port=port, user=user, + max_cpu=max_cpu, threads=threads, Capacity=Capacity, tenant=tenant_name, tenant_id=tenant_meta['TENANT_ID'], + format_size=Capacity, server_num=server_num, obclient_bin=obclient_bin, host=host, port=port, user=user, password=password, database=mysql_db ) \ No newline at end of file diff --git a/plugins/tpch/3.1.0/pre_test.py b/plugins/tpch/3.1.0/pre_test.py index def7161..c905130 100644 --- a/plugins/tpch/3.1.0/pre_test.py +++ b/plugins/tpch/3.1.0/pre_test.py @@ -30,24 +30,7 @@ import subprocess from ssh import LocalClient from tool import DirectoryUtil - - -def format_size(size, precision=1): - units = ['B', 'K', 'M', 'G'] - units_num = len(units) - 1 - idx = 0 - if precision: - div = 1024.0 - format = '%.' + str(precision) + 'f%s' - limit = 1024 - else: - div = 1024 - limit = 1024 - format = '%d%s' - while idx < units_num and size >= limit: - size /= div - idx += 1 - return format % (size, units[idx]) +from _types import Capacity def pre_test(plugin_context, cursor, *args, **kwargs): @@ -141,7 +124,7 @@ def local_execute_command(command, env=None, timeout=None): if get_option('test_only'): return plugin_context.return_true( max_cpu=max_cpu, min_memory=min_memory, unit_count=unit_count, server_num=server_num, tenant=tenant_name, - tenant_id=tenant_meta['tenant_id'], format_size=format_size + tenant_id=tenant_meta['tenant_id'], format_size=Capacity ) if not remote_tbl_dir: @@ -198,7 +181,7 @@ def local_execute_command(command, env=None, timeout=None): return plugin_context.return_true( obclient_bin=obclient_bin, host=host, port=port, user=user, password=password, database=mysql_db, max_cpu=max_cpu, min_memory=min_memory, unit_count=unit_count, server_num=server_num, tenant=tenant_name, - tenant_id=tenant_meta['tenant_id'], format_size=format_size + tenant_id=tenant_meta['tenant_id'], format_size=Capacity ) diff --git a/plugins/tpch/3.1.0/queries/db18.sql b/plugins/tpch/3.1.0/queries/db18.sql index 64be93b..9a40fcb 100644 --- a/plugins/tpch/3.1.0/queries/db18.sql +++ b/plugins/tpch/3.1.0/queries/db18.sql @@ -32,4 +32,5 @@ group by o_totalprice order by o_totalprice desc, - o_orderdate; + o_orderdate +limit 100; diff --git a/plugins/tpch/3.1.0/queries/db2.sql b/plugins/tpch/3.1.0/queries/db2.sql index 6a0b19d..578e57f 100644 --- a/plugins/tpch/3.1.0/queries/db2.sql +++ b/plugins/tpch/3.1.0/queries/db2.sql @@ -43,4 +43,5 @@ order by s_acctbal desc, n_name, s_name, - p_partkey; + p_partkey +limit 100; diff --git a/plugins/tpch/3.1.0/queries/db21.sql b/plugins/tpch/3.1.0/queries/db21.sql index da23dec..bf04b8a 100644 --- a/plugins/tpch/3.1.0/queries/db21.sql +++ b/plugins/tpch/3.1.0/queries/db21.sql @@ -39,4 +39,5 @@ group by s_name order by numwait desc, - s_name; + s_name +limit 100; diff --git a/plugins/tpch/3.1.0/queries/db3.sql b/plugins/tpch/3.1.0/queries/db3.sql index 941d623..3b5dcce 100644 --- a/plugins/tpch/3.1.0/queries/db3.sql +++ b/plugins/tpch/3.1.0/queries/db3.sql @@ -22,4 +22,5 @@ group by o_shippriority order by revenue desc, - o_orderdate; + o_orderdate +limit 10; diff --git a/plugins/tpch/3.1.0/run_test.py b/plugins/tpch/3.1.0/run_test.py index bc50c5b..9bd25a7 100644 --- a/plugins/tpch/3.1.0/run_test.py +++ b/plugins/tpch/3.1.0/run_test.py @@ -30,21 +30,9 @@ import subprocess from ssh import LocalClient - stdio = None -def parse_size(size): - _bytes = 0 - if not isinstance(size, str) or size.isdigit(): - _bytes = int(size) - else: - units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} - match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) - _bytes = int(match.group(1)) * units[match.group(2)] - return _bytes - - def exec_cmd(cmd): stdio.verbose('execute: %s' % cmd) process = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) diff --git a/plugins/tpch/4.0.0.0/pre_test.py b/plugins/tpch/4.0.0.0/pre_test.py index f42457a..b547417 100644 --- a/plugins/tpch/4.0.0.0/pre_test.py +++ b/plugins/tpch/4.0.0.0/pre_test.py @@ -30,24 +30,7 @@ import subprocess from ssh import LocalClient from tool import DirectoryUtil - - -def format_size(size, precision=1): - units = ['B', 'K', 'M', 'G'] - units_num = len(units) - 1 - idx = 0 - if precision: - div = 1024.0 - format = '%.' + str(precision) + 'f%s' - limit = 1024 - else: - div = 1024 - limit = 1024 - format = '%d%s' - while idx < units_num and size >= limit: - size /= div - idx += 1 - return format % (size, units[idx]) +from _types import Capacity def pre_test(plugin_context, cursor, *args, **kwargs): @@ -145,7 +128,7 @@ def local_execute_command(command, env=None, timeout=None): if get_option('test_only'): return plugin_context.return_true( max_cpu=max_cpu, min_memory=min_memory, unit_count=unit_count, server_num=server_num, tenant=tenant_name, - tenant_id=tenant_meta['TENANT_ID'], format_size=format_size + tenant_id=tenant_meta['TENANT_ID'], format_size=Capacity ) if not remote_tbl_dir: @@ -202,7 +185,7 @@ def local_execute_command(command, env=None, timeout=None): return plugin_context.return_true( obclient_bin=obclient_bin, host=host, port=port, user=user, password=password, database=mysql_db, max_cpu=max_cpu, min_memory=min_memory, unit_count=unit_count, server_num=server_num, tenant=tenant_name, - tenant_id=tenant_meta['TENANT_ID'], format_size=format_size + tenant_id=tenant_meta['TENANT_ID'], format_size=Capacity ) diff --git a/plugins/tpch/4.0.0.0/queries/db18.sql b/plugins/tpch/4.0.0.0/queries/db18.sql index 64be93b..9a40fcb 100644 --- a/plugins/tpch/4.0.0.0/queries/db18.sql +++ b/plugins/tpch/4.0.0.0/queries/db18.sql @@ -32,4 +32,5 @@ group by o_totalprice order by o_totalprice desc, - o_orderdate; + o_orderdate +limit 100; diff --git a/plugins/tpch/4.0.0.0/queries/db2.sql b/plugins/tpch/4.0.0.0/queries/db2.sql index 6a0b19d..578e57f 100644 --- a/plugins/tpch/4.0.0.0/queries/db2.sql +++ b/plugins/tpch/4.0.0.0/queries/db2.sql @@ -43,4 +43,5 @@ order by s_acctbal desc, n_name, s_name, - p_partkey; + p_partkey +limit 100; diff --git a/plugins/tpch/4.0.0.0/queries/db21.sql b/plugins/tpch/4.0.0.0/queries/db21.sql index da23dec..bf04b8a 100644 --- a/plugins/tpch/4.0.0.0/queries/db21.sql +++ b/plugins/tpch/4.0.0.0/queries/db21.sql @@ -39,4 +39,5 @@ group by s_name order by numwait desc, - s_name; + s_name +limit 100; diff --git a/plugins/tpch/4.0.0.0/queries/db3.sql b/plugins/tpch/4.0.0.0/queries/db3.sql index 941d623..3b5dcce 100644 --- a/plugins/tpch/4.0.0.0/queries/db3.sql +++ b/plugins/tpch/4.0.0.0/queries/db3.sql @@ -22,4 +22,5 @@ group by o_shippriority order by revenue desc, - o_orderdate; + o_orderdate +limit 10; diff --git a/plugins/tpch/4.0.0.0/run_test.py b/plugins/tpch/4.0.0.0/run_test.py index 4484841..6eab67a 100644 --- a/plugins/tpch/4.0.0.0/run_test.py +++ b/plugins/tpch/4.0.0.0/run_test.py @@ -35,17 +35,6 @@ stdio = None -def parse_size(size): - _bytes = 0 - if not isinstance(size, str) or size.isdigit(): - _bytes = int(size) - else: - units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} - match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) - _bytes = int(match.group(1)) * units[match.group(2)] - return _bytes - - def exec_cmd(cmd): stdio.verbose('execute: %s' % cmd) process = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) diff --git a/plugins/tpch/4.2.0.0/run_test.py b/plugins/tpch/4.2.0.0/run_test.py index 70c056d..439f76c 100644 --- a/plugins/tpch/4.2.0.0/run_test.py +++ b/plugins/tpch/4.2.0.0/run_test.py @@ -35,17 +35,6 @@ stdio = None -def parse_size(size): - _bytes = 0 - if not isinstance(size, str) or size.isdigit(): - _bytes = int(size) - else: - units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} - match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) - _bytes = int(match.group(1)) * units[match.group(2)] - return _bytes - - def exec_cmd(cmd): stdio.verbose('execute: %s' % cmd) process = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) diff --git a/plugins/tpch/4.3.0.0/create_tpch_mysql_table_part.ddl b/plugins/tpch/4.3.0.0/create_tpch_mysql_table_part.ddl new file mode 100644 index 0000000..528b2fb --- /dev/null +++ b/plugins/tpch/4.3.0.0/create_tpch_mysql_table_part.ddl @@ -0,0 +1,119 @@ +drop table if exists lineitem; +drop table if exists orders; +drop table if exists partsupp; +drop table if exists part; +drop table if exists customer; +drop table if exists supplier; +drop table if exists nation; +drop table if exists region; +drop tablegroup if exists tpch_tg_lineitem_order_group; +drop tablegroup if exists tpch_tg_partsupp_part; + +create tablegroup if not exists tpch_tg_lineitem_order_group binding true partition by key 1 partitions cpu_num; +create tablegroup if not exists tpch_tg_partsupp_part binding true partition by key 1 partitions cpu_num; + +drop table if exists lineitem; + create table lineitem ( + l_orderkey BIGINT NOT NULL, + l_partkey BIGINT NOT NULL, + l_suppkey INTEGER NOT NULL, + l_linenumber INTEGER NOT NULL, + l_quantity DECIMAL(15,2) NOT NULL, + l_extendedprice DECIMAL(15,2) NOT NULL, + l_discount DECIMAL(15,2) NOT NULL, + l_tax DECIMAL(15,2) NOT NULL, + l_returnflag char(1) DEFAULT NULL, + l_linestatus char(1) DEFAULT NULL, + l_shipdate date NOT NULL, + l_commitdate date DEFAULT NULL, + l_receiptdate date DEFAULT NULL, + l_shipinstruct char(25) DEFAULT NULL, + l_shipmode char(10) DEFAULT NULL, + l_comment varchar(44) DEFAULT NULL, + primary key(l_orderkey, l_linenumber))row_format = condensed + tablegroup = tpch_tg_lineitem_order_group + partition by key (l_orderkey) partitions cpu_num with column group (each column); + +drop table if exists orders; + create table orders ( + o_orderkey bigint not null, + o_custkey bigint not null, + o_orderstatus char(1) default null, + o_totalprice bigint default null, + o_orderdate date not null, + o_orderpriority char(15) default null, + o_clerk char(15) default null, + o_shippriority bigint default null, + o_comment varchar(79) default null, + primary key (o_orderkey))row_format = condensed + tablegroup = tpch_tg_lineitem_order_group + partition by key(o_orderkey) partitions cpu_num with column group (each column); + +drop table if exists partsupp; + create table partsupp ( + ps_partkey bigint not null, + ps_suppkey bigint not null, + ps_availqty bigint default null, + ps_supplycost bigint default null, + ps_comment varchar(199) default null, + primary key (ps_partkey, ps_suppkey))row_format = condensed + tablegroup tpch_tg_partsupp_part + partition by key(ps_partkey) partitions cpu_num with column group (each column); + + +drop table if exists part; + create table part ( + p_partkey bigint not null, + p_name varchar(55) default null, + p_mfgr char(25) default null, + p_brand char(10) default null, + p_type varchar(25) default null, + p_size bigint default null, + p_container char(10) default null, + p_retailprice bigint default null, + p_comment varchar(23) default null, + primary key (p_partkey))row_format = condensed + tablegroup tpch_tg_partsupp_part + partition by key(p_partkey) partitions cpu_num with column group (each column); + + +drop table if exists customer; + create table customer ( + c_custkey bigint not null, + c_name varchar(25) default null, + c_address varchar(40) default null, + c_nationkey bigint default null, + c_phone char(15) default null, + c_acctbal bigint default null, + c_mktsegment char(10) default null, + c_comment varchar(117) default null, + primary key (c_custkey))row_format = condensed + partition by key(c_custkey) partitions cpu_num with column group (each column); + +drop table if exists supplier; + create table supplier ( + s_suppkey bigint not null, + s_name char(25) default null, + s_address varchar(40) default null, + s_nationkey bigint default null, + s_phone char(15) default null, + s_acctbal bigint default null, + s_comment varchar(101) default null, + primary key (s_suppkey))row_format = condensed + partition by key(s_suppkey) partitions cpu_num with column group (each column); + + +drop table if exists nation; + create table nation ( + n_nationkey bigint not null, + n_name char(25) default null, + n_regionkey bigint default null, + n_comment varchar(152) default null, + primary key (n_nationkey))row_format = condensed with column group (each column); + +drop table if exists region; + create table region ( + r_regionkey bigint not null, + r_name char(25) default null, + r_comment varchar(152) default null, + primary key (r_regionkey))row_format = condensed with column group (each column); \ No newline at end of file diff --git a/plugins/tpch/4.3.0.0/pre_test.py b/plugins/tpch/4.3.0.0/pre_test.py new file mode 100644 index 0000000..f42457a --- /dev/null +++ b/plugins/tpch/4.3.0.0/pre_test.py @@ -0,0 +1,208 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function + + +import re +import os +from glob import glob +try: + import subprocess32 as subprocess +except: + import subprocess +from ssh import LocalClient +from tool import DirectoryUtil + + +def format_size(size, precision=1): + units = ['B', 'K', 'M', 'G'] + units_num = len(units) - 1 + idx = 0 + if precision: + div = 1024.0 + format = '%.' + str(precision) + 'f%s' + limit = 1024 + else: + div = 1024 + limit = 1024 + format = '%d%s' + while idx < units_num and size >= limit: + size /= div + idx += 1 + return format % (size, units[idx]) + + +def pre_test(plugin_context, cursor, *args, **kwargs): + def get_option(key, default=''): + value = getattr(options, key, default) + if not value: + value = default + stdio.verbose('get option: %s value %s' % (key, value)) + return value + + def get_path(key, default): + path = get_option('%s_path' % key) + if path and os.path.exists(path): + if os.path.isfile(path): + path = [path] + else: + path = glob(os.path.join(path, '*.%s' % key)) + stdio.verbose('get %s_path: %s' % (key, path)) + return path if path else default + + def local_execute_command(command, env=None, timeout=None): + return LocalClient.execute_command(command, env, timeout, stdio) + + cluster_config = plugin_context.cluster_config + stdio = plugin_context.stdio + options = plugin_context.options + clients = plugin_context.clients + + local_dir, _ = os.path.split(__file__) + dbgen_bin = get_option('dbgen_bin', 'dbgen') + dss_config = get_option('dss_config', '.') + scale_factor = get_option('scale_factor', 1) + disable_transfer = get_option('disable_transfer', False) + remote_tbl_dir = get_option('remote_tbl_dir') + tenant_name = get_option('tenant', 'test') + host = get_option('host', '127.0.0.1') + port = get_option('port', 2881) + mysql_db = get_option('database', 'test') + user = get_option('user', 'root') + password = get_option('password', '') + + MIN_MEMORY = 1073741824 + + if tenant_name == 'sys': + stdio.error('DO NOT use sys tenant for testing.') + return + + test_server = get_option('test_server') + tmp_dir = os.path.abspath(get_option('tmp_dir', './tmp')) + tbl_tmp_dir = os.path.join(tmp_dir, 's%s' % scale_factor) + ddl_path = get_path('ddl', [os.path.join(local_dir, 'create_tpch_mysql_table_part.ddl')]) + stdio.verbose('set ddl_path: %s' % ddl_path) + setattr(options, 'ddl_path', ddl_path) + tbl_path = get_path('tbl', glob(os.path.join(tbl_tmp_dir, '*.tbl'))) + sql_path = get_path('sql', glob(os.path.join(local_dir, 'queries/*.sql'))) + stdio.verbose('set sql_path: %s' % sql_path) + setattr(options, 'sql_path', sql_path) + obclient_bin = get_option('obclient_bin', 'obclient') + + ret = local_execute_command('%s --help' % obclient_bin) + if not ret: + stdio.error('%s\n%s is not an executable file. Please use `--obclient-bin` to set.\nYou may not have obclient installed' % (ret.stderr, obclient_bin)) + return + + if not DirectoryUtil.mkdir(tmp_dir, stdio=stdio): + return + stdio.verbose('set tmp_dir: %s' % tmp_dir) + setattr(options, 'tmp_dir', tmp_dir) + server_num = len(cluster_config.servers) + + sql = "select * from oceanbase.DBA_OB_TENANTS where TENANT_NAME = %s" + stdio.verbose('execute sql: %s' % (sql % tenant_name)) + tenant_meta = cursor.fetchone(sql, [tenant_name]) + if not tenant_meta: + stdio.error('Tenant %s not exists. Use `obd cluster tenant create` to create tenant.' % tenant_name) + return + sql = "select * from oceanbase.__all_resource_pool where tenant_id = %d" % tenant_meta['TENANT_ID'] + pool = cursor.fetchone(sql) + if pool is False: + return + sql = "select * from oceanbase.__all_unit_config where unit_config_id = %d" % pool['unit_config_id'] + tenant_unit = cursor.fetchone(sql) + if tenant_unit is False: + return + max_cpu = tenant_unit['max_cpu'] + min_memory = MIN_MEMORY + unit_count = pool['unit_count'] + server_num = len(cluster_config.servers) + sql = "select count(1) server_num from oceanbase.__all_server where status = 'active'" + ret = cursor.fetchone(sql) + if ret is False: + return + server_num = ret.get("server_num", server_num) + + if get_option('test_only'): + return plugin_context.return_true( + max_cpu=max_cpu, min_memory=min_memory, unit_count=unit_count, server_num=server_num, tenant=tenant_name, + tenant_id=tenant_meta['TENANT_ID'], format_size=format_size + ) + + if not remote_tbl_dir: + stdio.error('Please use --remote-tbl-dir to set a dir for remote tbl files') + return + + if disable_transfer: + ret = clients[test_server].execute_command('ls %s' % (os.path.join(remote_tbl_dir, '*.tbl'))) + tbl_path = ret.stdout.strip().split('\n') if ret else [] + if not tbl_path: + stdio.error('No tbl file in %s:%s' % (test_server, remote_tbl_dir)) + return + else: + if not tbl_path: + ret = local_execute_command('%s -h' % dbgen_bin) + if ret.code > 1: + stdio.error('%s\n%s is not an executable file. Please use `--dbgen-bin` to set.\nYou may not have obtpch installed' % (ret.stderr, dbgen_bin)) + return + + dss_path = os.path.join(dss_config, 'dists.dss') + if not os.path.exists(dss_path): + stdio.error('No such file: %s' % dss_path) + return + + tbl_tmp_dir = os.path.join(tmp_dir, 's%s' % scale_factor) + if not DirectoryUtil.mkdir(tbl_tmp_dir, stdio=stdio): + return + + stdio.start_loading('Generate Data (Scale Factor: %s)' % scale_factor) + ret = local_execute_command('cd %s; %s -s %s -b %s' % (tbl_tmp_dir, dbgen_bin, scale_factor, dss_path)) + if ret: + stdio.stop_loading('succeed') + tbl_path = glob(os.path.join(tbl_tmp_dir, '*.tbl')) + else: + stdio.stop_loading('fail') + return + + stdio.start_loading('Send tbl to remote (%s)' % test_server) + new_tbl_path = [] + for path in tbl_path: + _, fn = os.path.split(path) + fp = os.path.join(remote_tbl_dir, fn) + if not clients[test_server].put_file(path, fp): + stdio.stop_loading('fail') + return + + new_tbl_path.append(fp) + tbl_path = new_tbl_path + + stdio.stop_loading('succeed') + stdio.verbose('set tbl_path: %s' % tbl_path) + setattr(options, 'tbl_path', tbl_path) + + return plugin_context.return_true( + obclient_bin=obclient_bin, host=host, port=port, user=user, password=password, database=mysql_db, + max_cpu=max_cpu, min_memory=min_memory, unit_count=unit_count, server_num=server_num, tenant=tenant_name, + tenant_id=tenant_meta['TENANT_ID'], format_size=format_size + ) + + diff --git a/plugins/tpch/4.3.0.0/queries/db1.sql b/plugins/tpch/4.3.0.0/queries/db1.sql new file mode 100644 index 0000000..b4ac3a4 --- /dev/null +++ b/plugins/tpch/4.3.0.0/queries/db1.sql @@ -0,0 +1,24 @@ +-- using default substitutions + + +select /*+ TPCH_Q1 parallel(cpu_num) */ + l_returnflag, + l_linestatus, + sum(l_quantity) as sum_qty, + sum(l_extendedprice) as sum_base_price, + sum(l_extendedprice * (1 - l_discount)) as sum_disc_price, + sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge, + avg(l_quantity) as avg_qty, + avg(l_extendedprice) as avg_price, + avg(l_discount) as avg_disc, + count(*) as count_order +from + lineitem +where + l_shipdate <= date '1998-12-01' - interval '90' day +group by + l_returnflag, + l_linestatus +order by + l_returnflag, + l_linestatus; diff --git a/plugins/tpch/4.3.0.0/queries/db10.sql b/plugins/tpch/4.3.0.0/queries/db10.sql new file mode 100644 index 0000000..2392062 --- /dev/null +++ b/plugins/tpch/4.3.0.0/queries/db10.sql @@ -0,0 +1,35 @@ +-- using default substitutions + + +select /*+ TPCH_Q10 parallel(cpu_num) */ + c_custkey, + c_name, + sum(l_extendedprice * (1 - l_discount)) as revenue, + c_acctbal, + n_name, + c_address, + c_phone, + c_comment +from + customer, + orders, + lineitem, + nation +where + c_custkey = o_custkey + and l_orderkey = o_orderkey + and o_orderdate >= date '1993-10-01' + and o_orderdate < date '1993-10-01' + interval '3' month + and l_returnflag = 'R' + and c_nationkey = n_nationkey +group by + c_custkey, + c_name, + c_acctbal, + c_phone, + n_name, + c_address, + c_comment +order by + revenue desc +limit 20; diff --git a/plugins/tpch/4.3.0.0/queries/db11.sql b/plugins/tpch/4.3.0.0/queries/db11.sql new file mode 100644 index 0000000..708ea4e --- /dev/null +++ b/plugins/tpch/4.3.0.0/queries/db11.sql @@ -0,0 +1,30 @@ +-- using default substitutions + + +select /*+ TPCH_Q11 parallel(cpu_num) */ + ps_partkey, + sum(ps_supplycost * ps_availqty) as value +from + partsupp, + supplier, + nation +where + ps_suppkey = s_suppkey + and s_nationkey = n_nationkey + and n_name = 'GERMANY' +group by + ps_partkey having + sum(ps_supplycost * ps_availqty) > ( + select + sum(ps_supplycost * ps_availqty) * 0.0000100000 + from + partsupp, + supplier, + nation + where + ps_suppkey = s_suppkey + and s_nationkey = n_nationkey + and n_name = 'GERMANY' + ) +order by + value desc; diff --git a/plugins/tpch/4.3.0.0/queries/db12.sql b/plugins/tpch/4.3.0.0/queries/db12.sql new file mode 100644 index 0000000..3b0a8d2 --- /dev/null +++ b/plugins/tpch/4.3.0.0/queries/db12.sql @@ -0,0 +1,31 @@ +-- using default substitutions + + +select /*+ TPCH_Q12 parallel(cpu_num) */ + l_shipmode, + sum(case + when o_orderpriority = '1-URGENT' + or o_orderpriority = '2-HIGH' + then 1 + else 0 + end) as high_line_count, + sum(case + when o_orderpriority <> '1-URGENT' + and o_orderpriority <> '2-HIGH' + then 1 + else 0 + end) as low_line_count +from + orders, + lineitem +where + o_orderkey = l_orderkey + and l_shipmode in ('MAIL', 'SHIP') + and l_commitdate < l_receiptdate + and l_shipdate < l_commitdate + and l_receiptdate >= date '1994-01-01' + and l_receiptdate < date '1994-01-01' + interval '1' year +group by + l_shipmode +order by + l_shipmode; diff --git a/plugins/tpch/4.3.0.0/queries/db13.sql b/plugins/tpch/4.3.0.0/queries/db13.sql new file mode 100644 index 0000000..9b0ae05 --- /dev/null +++ b/plugins/tpch/4.3.0.0/queries/db13.sql @@ -0,0 +1,7 @@ +-- using default substitutions +SELECT /*+ TPCH_Q13 parallel(cpu_num) */ c_count, count(*) as custdist +from ( SELECT c_custkey, count(o_orderkey) as c_count + from customer left join orders on c_custkey = o_custkey and o_comment not like '%special%requests%' + group by c_custkey ) c_orders +group by c_count +order by custdist desc, c_count desc; diff --git a/plugins/tpch/4.3.0.0/queries/db14.sql b/plugins/tpch/4.3.0.0/queries/db14.sql new file mode 100644 index 0000000..7eab8ac --- /dev/null +++ b/plugins/tpch/4.3.0.0/queries/db14.sql @@ -0,0 +1,16 @@ +-- using default substitutions + + +select /*+ TPCH_Q14 parallel(cpu_num) */ + 100.00 * sum(case + when p_type like 'PROMO%' + then l_extendedprice * (1 - l_discount) + else 0 + end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue +from + lineitem, + part +where + l_partkey = p_partkey + and l_shipdate >= date '1995-09-01' + and l_shipdate < date '1995-09-01' + interval '1' month; diff --git a/plugins/tpch/4.3.0.0/queries/db15.sql b/plugins/tpch/4.3.0.0/queries/db15.sql new file mode 100644 index 0000000..ba16e11 --- /dev/null +++ b/plugins/tpch/4.3.0.0/queries/db15.sql @@ -0,0 +1,36 @@ +-- using default substitutions + +create view revenue0 (supplier_no, total_revenue) as + select + l_suppkey, + sum(l_extendedprice * (1 - l_discount)) + from + lineitem + where + l_shipdate >= date '1996-01-01' + and l_shipdate < date '1996-01-01' + interval '3' month + group by + l_suppkey; + + +select /*+ TPCH_Q15 parallel(cpu_num) */ + s_suppkey, + s_name, + s_address, + s_phone, + total_revenue +from + supplier, + revenue0 +where + s_suppkey = supplier_no + and total_revenue = ( + select + max(total_revenue) + from + revenue0 + ) +order by + s_suppkey; + +drop view revenue0; diff --git a/plugins/tpch/4.3.0.0/queries/db16.sql b/plugins/tpch/4.3.0.0/queries/db16.sql new file mode 100644 index 0000000..61ab5c5 --- /dev/null +++ b/plugins/tpch/4.3.0.0/queries/db16.sql @@ -0,0 +1,33 @@ +-- using default substitutions + + +select /*+ TPCH_Q16 parallel(cpu_num) */ + p_brand, + p_type, + p_size, + count(distinct ps_suppkey) as supplier_cnt +from + partsupp, + part +where + p_partkey = ps_partkey + and p_brand <> 'Brand#45' + and p_type not like 'MEDIUM POLISHED%' + and p_size in (49, 14, 23, 45, 19, 3, 36, 9) + and ps_suppkey not in ( + select + s_suppkey + from + supplier + where + s_comment like '%Customer%Complaints%' + ) +group by + p_brand, + p_type, + p_size +order by + supplier_cnt desc, + p_brand, + p_type, + p_size; diff --git a/plugins/tpch/4.3.0.0/queries/db17.sql b/plugins/tpch/4.3.0.0/queries/db17.sql new file mode 100644 index 0000000..5748361 --- /dev/null +++ b/plugins/tpch/4.3.0.0/queries/db17.sql @@ -0,0 +1,20 @@ +-- using default substitutions + + +select /*+ TPCH_Q17 parallel(cpu_num) */ + sum(l_extendedprice) / 7.0 as avg_yearly +from + lineitem, + part +where + p_partkey = l_partkey + and p_brand = 'Brand#23' + and p_container = 'MED BOX' + and l_quantity < ( + select + 0.2 * avg(l_quantity) + from + lineitem + where + l_partkey = p_partkey + ); diff --git a/plugins/tpch/4.3.0.0/queries/db18.sql b/plugins/tpch/4.3.0.0/queries/db18.sql new file mode 100644 index 0000000..9a40fcb --- /dev/null +++ b/plugins/tpch/4.3.0.0/queries/db18.sql @@ -0,0 +1,36 @@ +-- using default substitutions + + +select /*+ TPCH_Q18 parallel(cpu_num) */ + c_name, + c_custkey, + o_orderkey, + o_orderdate, + o_totalprice, + sum(l_quantity) +from + customer, + orders, + lineitem +where + o_orderkey in ( + select + l_orderkey + from + lineitem + group by + l_orderkey having + sum(l_quantity) > 300 + ) + and c_custkey = o_custkey + and o_orderkey = l_orderkey +group by + c_name, + c_custkey, + o_orderkey, + o_orderdate, + o_totalprice +order by + o_totalprice desc, + o_orderdate +limit 100; diff --git a/plugins/tpch/4.3.0.0/queries/db19.sql b/plugins/tpch/4.3.0.0/queries/db19.sql new file mode 100644 index 0000000..851edbb --- /dev/null +++ b/plugins/tpch/4.3.0.0/queries/db19.sql @@ -0,0 +1,38 @@ +-- using default substitutions + + +select /*+ TPCH_Q19 parallel(cpu_num) */ + sum(l_extendedprice* (1 - l_discount)) as revenue +from + lineitem, + part +where + ( + p_partkey = l_partkey + and p_brand = 'Brand#12' + and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG') + and l_quantity >= 1 and l_quantity <= 1 + 10 + and p_size between 1 and 5 + and l_shipmode in ('AIR', 'AIR REG') + and l_shipinstruct = 'DELIVER IN PERSON' + ) + or + ( + p_partkey = l_partkey + and p_brand = 'Brand#23' + and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') + and l_quantity >= 10 and l_quantity <= 10 + 10 + and p_size between 1 and 10 + and l_shipmode in ('AIR', 'AIR REG') + and l_shipinstruct = 'DELIVER IN PERSON' + ) + or + ( + p_partkey = l_partkey + and p_brand = 'Brand#34' + and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG') + and l_quantity >= 20 and l_quantity <= 20 + 10 + and p_size between 1 and 15 + and l_shipmode in ('AIR', 'AIR REG') + and l_shipinstruct = 'DELIVER IN PERSON' + ); diff --git a/plugins/tpch/4.3.0.0/queries/db2.sql b/plugins/tpch/4.3.0.0/queries/db2.sql new file mode 100644 index 0000000..578e57f --- /dev/null +++ b/plugins/tpch/4.3.0.0/queries/db2.sql @@ -0,0 +1,47 @@ +-- using default substitutions + + +select /*+ TPCH_Q2 parallel(cpu_num) */ + s_acctbal, + s_name, + n_name, + p_partkey, + p_mfgr, + s_address, + s_phone, + s_comment +from + part, + supplier, + partsupp, + nation, + region +where + p_partkey = ps_partkey + and s_suppkey = ps_suppkey + and p_size = 15 + and p_type like '%BRASS' + and s_nationkey = n_nationkey + and n_regionkey = r_regionkey + and r_name = 'EUROPE' + and ps_supplycost = ( + select + min(ps_supplycost) + from + partsupp, + supplier, + nation, + region + where + p_partkey = ps_partkey + and s_suppkey = ps_suppkey + and s_nationkey = n_nationkey + and n_regionkey = r_regionkey + and r_name = 'EUROPE' + ) +order by + s_acctbal desc, + n_name, + s_name, + p_partkey +limit 100; diff --git a/plugins/tpch/4.3.0.0/queries/db20.sql b/plugins/tpch/4.3.0.0/queries/db20.sql new file mode 100644 index 0000000..e7a3142 --- /dev/null +++ b/plugins/tpch/4.3.0.0/queries/db20.sql @@ -0,0 +1,40 @@ +-- using default substitutions + + +select /*+ TPCH_Q20 parallel(cpu_num) */ + s_name, + s_address +from + supplier, + nation +where + s_suppkey in ( + select + ps_suppkey + from + partsupp + where + ps_partkey in ( + select + p_partkey + from + part + where + p_name like 'forest%' + ) + and ps_availqty > ( + select + 0.5 * sum(l_quantity) + from + lineitem + where + l_partkey = ps_partkey + and l_suppkey = ps_suppkey + and l_shipdate >= date '1994-01-01' + and l_shipdate < date '1994-01-01' + interval '1' year + ) + ) + and s_nationkey = n_nationkey + and n_name = 'CANADA' +order by + s_name; diff --git a/plugins/tpch/4.3.0.0/queries/db21.sql b/plugins/tpch/4.3.0.0/queries/db21.sql new file mode 100644 index 0000000..bf04b8a --- /dev/null +++ b/plugins/tpch/4.3.0.0/queries/db21.sql @@ -0,0 +1,43 @@ +-- using default substitutions + + +select /*+ TPCH_Q21 parallel(cpu_num) */ + s_name, + count(*) as numwait +from + supplier, + lineitem l1, + orders, + nation +where + s_suppkey = l1.l_suppkey + and o_orderkey = l1.l_orderkey + and o_orderstatus = 'F' + and l1.l_receiptdate > l1.l_commitdate + and exists ( + select + * + from + lineitem l2 + where + l2.l_orderkey = l1.l_orderkey + and l2.l_suppkey <> l1.l_suppkey + ) + and not exists ( + select + * + from + lineitem l3 + where + l3.l_orderkey = l1.l_orderkey + and l3.l_suppkey <> l1.l_suppkey + and l3.l_receiptdate > l3.l_commitdate + ) + and s_nationkey = n_nationkey + and n_name = 'SAUDI ARABIA' +group by + s_name +order by + numwait desc, + s_name +limit 100; diff --git a/plugins/tpch/4.3.0.0/queries/db22.sql b/plugins/tpch/4.3.0.0/queries/db22.sql new file mode 100644 index 0000000..8f3440f --- /dev/null +++ b/plugins/tpch/4.3.0.0/queries/db22.sql @@ -0,0 +1,40 @@ +-- using default substitutions + + +select /*+ TPCH_Q22 parallel(cpu_num) */ + cntrycode, + count(*) as numcust, + sum(c_acctbal) as totacctbal +from + ( + select + substring(c_phone from 1 for 2) as cntrycode, + c_acctbal + from + customer + where + substring(c_phone from 1 for 2) in + ('13', '31', '23', '29', '30', '18', '17') + and c_acctbal > ( + select + avg(c_acctbal) + from + customer + where + c_acctbal > 0.00 + and substring(c_phone from 1 for 2) in + ('13', '31', '23', '29', '30', '18', '17') + ) + and not exists ( + select + * + from + orders + where + o_custkey = c_custkey + ) + ) as custsale +group by + cntrycode +order by + cntrycode; diff --git a/plugins/tpch/4.3.0.0/queries/db3.sql b/plugins/tpch/4.3.0.0/queries/db3.sql new file mode 100644 index 0000000..3b5dcce --- /dev/null +++ b/plugins/tpch/4.3.0.0/queries/db3.sql @@ -0,0 +1,26 @@ +-- using default substitutions + + +select /*+ TPCH_Q3 parallel(cpu_num) */ + l_orderkey, + sum(l_extendedprice * (1 - l_discount)) as revenue, + o_orderdate, + o_shippriority +from + customer, + orders, + lineitem +where + c_mktsegment = 'BUILDING' + and c_custkey = o_custkey + and l_orderkey = o_orderkey + and o_orderdate < date '1995-03-15' + and l_shipdate > date '1995-03-15' +group by + l_orderkey, + o_orderdate, + o_shippriority +order by + revenue desc, + o_orderdate +limit 10; diff --git a/plugins/tpch/4.3.0.0/queries/db4.sql b/plugins/tpch/4.3.0.0/queries/db4.sql new file mode 100644 index 0000000..6e7bcb9 --- /dev/null +++ b/plugins/tpch/4.3.0.0/queries/db4.sql @@ -0,0 +1,11 @@ +-- using default substitutions +SELECT /*+ TPCH_Q4 parallel(cpu_num) no_unnest */ o_orderpriority, count(*) as order_count +from orders +where o_orderdate >= DATE'1993-07-01' and + o_orderdate < DATE'1993-07-01' + interval '3' month and + exists ( SELECT * + from lineitem + where l_orderkey = o_orderkey and + l_commitdate < l_receiptdate ) + group by o_orderpriority + order by o_orderpriority; diff --git a/plugins/tpch/4.3.0.0/queries/db5.sql b/plugins/tpch/4.3.0.0/queries/db5.sql new file mode 100644 index 0000000..c74dd8c --- /dev/null +++ b/plugins/tpch/4.3.0.0/queries/db5.sql @@ -0,0 +1,27 @@ +-- using default substitutions + + +select /*+ TPCH_Q5 parallel(cpu_num) */ + n_name, + sum(l_extendedprice * (1 - l_discount)) as revenue +from + customer, + orders, + lineitem, + supplier, + nation, + region +where + c_custkey = o_custkey + and l_orderkey = o_orderkey + and l_suppkey = s_suppkey + and c_nationkey = s_nationkey + and s_nationkey = n_nationkey + and n_regionkey = r_regionkey + and r_name = 'ASIA' + and o_orderdate >= date '1994-01-01' + and o_orderdate < date '1994-01-01' + interval '1' year +group by + n_name +order by + revenue desc; diff --git a/plugins/tpch/4.3.0.0/queries/db6.sql b/plugins/tpch/4.3.0.0/queries/db6.sql new file mode 100644 index 0000000..bda99ba --- /dev/null +++ b/plugins/tpch/4.3.0.0/queries/db6.sql @@ -0,0 +1,12 @@ +-- using default substitutions + + +select /*+ TPCH_Q6 parallel(cpu_num) */ + sum(l_extendedprice * l_discount) as revenue +from + lineitem +where + l_shipdate >= date '1994-01-01' + and l_shipdate < date '1994-01-01' + interval '1' year + and l_discount between .06 - 0.01 and .06 + 0.01 + and l_quantity < 24; diff --git a/plugins/tpch/4.3.0.0/queries/db7.sql b/plugins/tpch/4.3.0.0/queries/db7.sql new file mode 100644 index 0000000..4f95411 --- /dev/null +++ b/plugins/tpch/4.3.0.0/queries/db7.sql @@ -0,0 +1,42 @@ +-- using default substitutions + + +select /*+ TPCH_Q7 parallel(cpu_num) */ + supp_nation, + cust_nation, + l_year, + sum(volume) as revenue +from + ( + select + n1.n_name as supp_nation, + n2.n_name as cust_nation, + extract(year from l_shipdate) as l_year, + l_extendedprice * (1 - l_discount) as volume + from + supplier, + lineitem, + orders, + customer, + nation n1, + nation n2 + where + s_suppkey = l_suppkey + and o_orderkey = l_orderkey + and c_custkey = o_custkey + and s_nationkey = n1.n_nationkey + and c_nationkey = n2.n_nationkey + and ( + (n1.n_name = 'FRANCE' and n2.n_name = 'GERMANY') + or (n1.n_name = 'GERMANY' and n2.n_name = 'FRANCE') + ) + and l_shipdate between date '1995-01-01' and date '1996-12-31' + ) as shipping +group by + supp_nation, + cust_nation, + l_year +order by + supp_nation, + cust_nation, + l_year; diff --git a/plugins/tpch/4.3.0.0/queries/db8.sql b/plugins/tpch/4.3.0.0/queries/db8.sql new file mode 100644 index 0000000..b8df462 --- /dev/null +++ b/plugins/tpch/4.3.0.0/queries/db8.sql @@ -0,0 +1,40 @@ +-- using default substitutions + + +select /*+ TPCH_Q8 parallel(cpu_num) */ + o_year, + sum(case + when nation = 'BRAZIL' then volume + else 0 + end) / sum(volume) as mkt_share +from + ( + select + extract(year from o_orderdate) as o_year, + l_extendedprice * (1 - l_discount) as volume, + n2.n_name as nation + from + part, + supplier, + lineitem, + orders, + customer, + nation n1, + nation n2, + region + where + p_partkey = l_partkey + and s_suppkey = l_suppkey + and l_orderkey = o_orderkey + and o_custkey = c_custkey + and c_nationkey = n1.n_nationkey + and n1.n_regionkey = r_regionkey + and r_name = 'AMERICA' + and s_nationkey = n2.n_nationkey + and o_orderdate between date '1995-01-01' and date '1996-12-31' + and p_type = 'ECONOMY ANODIZED STEEL' + ) as all_nations +group by + o_year +order by + o_year; diff --git a/plugins/tpch/4.3.0.0/queries/db9.sql b/plugins/tpch/4.3.0.0/queries/db9.sql new file mode 100644 index 0000000..3bbf5f0 --- /dev/null +++ b/plugins/tpch/4.3.0.0/queries/db9.sql @@ -0,0 +1,35 @@ +-- using default substitutions + + +select /*+ TPCH_Q9 parallel(cpu_num) */ + nation, + o_year, + sum(amount) as sum_profit +from + ( + select + n_name as nation, + extract(year from o_orderdate) as o_year, + l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity as amount + from + part, + supplier, + lineitem, + partsupp, + orders, + nation + where + s_suppkey = l_suppkey + and ps_suppkey = l_suppkey + and ps_partkey = l_partkey + and p_partkey = l_partkey + and o_orderkey = l_orderkey + and s_nationkey = n_nationkey + and p_name like '%green%' + ) as profit +group by + nation, + o_year +order by + nation, + o_year desc; diff --git a/profile/obd.sh b/profile/obd.sh index 8ac3796..9edb4fe 100644 --- a/profile/obd.sh +++ b/profile/obd.sh @@ -92,22 +92,27 @@ function _obd_complete_func all_cmds["obd web *"]="install upgrade" all_cmds["obd web install *"]="_obd_reply_deploy_names" all_cmds["obd web upgrade *"]="_obd_reply_deploy_names" + all_cmds["obd obdiag"]="check gather deploy analyze rca update" + all_cmds["obd obdiag gather"]="all log clog slog obproxy_log perf plan_monitor stack sysstat scene" + all_cmds["obd obdiag gather scene"]="run list" + all_cmds["obd obdiag gather scene run"]="_obd_reply_deploy_names" + all_cmds["obd obdiag gather *"]="_obd_reply_deploy_names" + all_cmds["obd obdiag analyze"]="log flt_trace" + all_cmds["obd obdiag analyze *"]="_obd_reply_deploy_names" + all_cmds["obd obdiag check"]="_obd_reply_deploy_names" + all_cmds["obd obdiag rca"]="list run" + all_cmds["obd obdiag rca run"]="_obd_reply_deploy_names" + all_cmds["obd tool"]="list install uninstall update" # if [ -f "$env_file" ] && [ "$(grep '"OBD_DEV_MODE": "1"' "$env_file")" != "" ]; then - all_cmds["obd"]="${all_cmds[obd]} devmode env tool" - all_cmds["obd devmode"]="enable disable" - all_cmds["obd tool"]="command db_connect dooba" - all_cmds["obd tool db_connect"]="_obd_reply_deploy_names" - all_cmds["obd tool dooba"]="_obd_reply_deploy_names" - all_cmds["obd tool command"]="_obd_reply_deploy_names" - all_cmds["obd tool command *"]="_obd_reply_tool_commands" - all_cmds["obd env"]="set unset show clear" - all_cmds["obd obdiag"]="check gather deploy analyze" - all_cmds["obd obdiag gather"]="all log clog slog obproxy_log perf plan_monitor stack sysstat" - all_cmds["obd obdiag gather *"]="_obd_reply_deploy_names" - all_cmds["obd obdiag analyze"]="log flt_trace" - all_cmds["obd obdiag analyze *"]="_obd_reply_deploy_names" - all_cmds["obd obdiag check"]="_obd_reply_deploy_names" + all_cmds["obd"]+=" devmode env tool" + all_cmds["obd devmode"]="enable disable" + all_cmds["obd tool"]+=" command db_connect dooba" + all_cmds["obd tool db_connect"]="_obd_reply_deploy_names" + all_cmds["obd tool dooba"]="_obd_reply_deploy_names" + all_cmds["obd tool command"]="_obd_reply_deploy_names" + all_cmds["obd tool command *"]="_obd_reply_tool_commands" + all_cmds["obd env"]="set unset show clear" # fi case $prev in list) diff --git a/rpm/build.sh b/rpm/build.sh index c76d8a7..50f12f0 100755 --- a/rpm/build.sh +++ b/rpm/build.sh @@ -2,7 +2,7 @@ python_bin='python' W_DIR=`pwd` -VERSION=${VERSION:-'1.3.3'} +VERSION=${VERSION:-'2.7.0'} function python_version() diff --git a/rpm/ob-deploy.spec b/rpm/ob-deploy.spec index 029d659..f6920c1 100644 --- a/rpm/ob-deploy.spec +++ b/rpm/ob-deploy.spec @@ -95,7 +95,7 @@ mkdir -p ${RPM_BUILD_ROOT}/usr/obd/lib/ mkdir -p ${RPM_BUILD_ROOT}/usr/obd/lib/executer \cp -rf ${RPM_DIR}/executer27 ${RPM_BUILD_ROOT}/usr/obd/lib/executer/ \cp -rf $BUILD_DIR/SOURCES/example ${RPM_BUILD_ROOT}/usr/obd/ -cd ${RPM_BUILD_ROOT}/usr/obd/plugins && ln -s oceanbase oceanbase-ce && \cp -rf obproxy/3.1.0 obproxy-ce/ && \cp -rf $SRC_DIR/plugins/obproxy-ce/* obproxy-ce/ +cd ${RPM_BUILD_ROOT}/usr/obd/plugins && ln -s oceanbase oceanbase-ce && \cp -rf obproxy/* obproxy-ce/ && \cp -rf $SRC_DIR/plugins/obproxy-ce/* obproxy-ce/ cd ${RPM_BUILD_ROOT}/usr/obd/plugins && ln -sf ocp-server ocp-server-ce mv obproxy/3.1.0 obproxy/3.2.1 cd ${RPM_BUILD_ROOT}/usr/obd/config_parser && ln -s oceanbase oceanbase-ce diff --git a/service/handler/component_handler.py b/service/handler/component_handler.py index ea8694e..7fc4110 100644 --- a/service/handler/component_handler.py +++ b/service/handler/component_handler.py @@ -68,26 +68,38 @@ def __get_all_components(self, component_filter=const.VERSION_FILTER): local_pkg = local_packages[local_pkg_idx] remote_pkg = remote_packages[remote_pkg_idx] if local_pkg >= remote_pkg: + size = getattr(local_pkg, 'size', const.PKG_ESTIMATED_SIZE[local_pkg.name]) + size = const.PKG_ESTIMATED_SIZE[local_pkg.name] if not size else size component_dict[local_pkg.name].append( ComponentInfo(version=local_pkg.version, md5=local_pkg.md5, release=local_pkg.release, - arch=local_pkg.arch, type=MirrorRepositoryType.LOCAL.value, estimated_size=const.PKG_ESTIMATED_SIZE[local_pkg.name])) + arch=local_pkg.arch, type=MirrorRepositoryType.LOCAL.value, + estimated_size=size)) local_pkg_idx -= 1 else: if len(component_dict[remote_pkg.name]) > 0 and component_dict[remote_pkg.name][-1].md5 == remote_pkg.md5: log.get_logger().debug("already found local package %s", remote_pkg) else: + size = getattr(remote_pkg, 'size', const.PKG_ESTIMATED_SIZE[remote_pkg.name]) + size = const.PKG_ESTIMATED_SIZE[remote_pkg.name] if not size else size component_dict[remote_pkg.name].append( ComponentInfo(version=remote_pkg.version, md5=remote_pkg.md5, release=remote_pkg.release, - arch=remote_pkg.arch, type=MirrorRepositoryType.REMOTE.value, estimated_size=const.PKG_ESTIMATED_SIZE[remote_pkg.name])) + arch=remote_pkg.arch, type=MirrorRepositoryType.REMOTE.value, + estimated_size=size)) remote_pkg_idx -= 1 if local_pkg_idx >= 0: for pkg in local_packages[local_pkg_idx::-1]: + size = getattr(pkg, 'size', const.PKG_ESTIMATED_SIZE[pkg.name]) + size = const.PKG_ESTIMATED_SIZE[pkg.name] if not size else size component_dict[pkg.name].append( - ComponentInfo(version=pkg.version, md5=pkg.md5, release=pkg.release, arch=pkg.arch, type=MirrorRepositoryType.LOCAL.value, estimated_size=const.PKG_ESTIMATED_SIZE[pkg.name])) + ComponentInfo(version=pkg.version, md5=pkg.md5, release=pkg.release, arch=pkg.arch, type=MirrorRepositoryType.LOCAL.value, + estimated_size=size)) if remote_pkg_idx >= 0: for pkg in remote_packages[remote_pkg_idx::-1]: + size = getattr(pkg, 'size', const.PKG_ESTIMATED_SIZE[pkg.name]) + size = const.PKG_ESTIMATED_SIZE[pkg.name] if not size else size component_dict[pkg.name].append( - ComponentInfo(version=pkg.version, md5=pkg.md5, release=pkg.release, arch=pkg.arch, type=MirrorRepositoryType.REMOTE.value, estimated_size=const.PKG_ESTIMATED_SIZE[pkg.name])) + ComponentInfo(version=pkg.version, md5=pkg.md5, release=pkg.release, arch=pkg.arch, type=MirrorRepositoryType.REMOTE.value, + estimated_size=size)) for component, version in component_filter.items(): if component in component_dict.keys(): log.get_logger().debug("filter component: {0} above version: {1}".format(component, version)) @@ -173,10 +185,10 @@ def list_component_parameters(self, parameter_request, accept_language): del(self.obd.namespaces[spacename]) if not ret: self.obd.deploy_manager.remove_deploy_config(name) - raise Exception("genconfig failed for compoennt: {0}".format(parameter_filter.component)) + raise Exception("genconfig failed for component: {0}".format(parameter_filter.component)) else: auto_keys = ret.get_return("generate_keys") - log.get_logger().info("auto keys for comopnent %s are %s", parameter_filter.component, auto_keys) + log.get_logger().info("auto keys for component %s are %s", parameter_filter.component, auto_keys) parameter_plugin = self.obd.plugin_manager.get_best_plugin(PluginType.PARAM, parameter_filter.component, parameter_filter.version) ## use plugin.params to generate parameter meta diff --git a/service/handler/deployment_handler.py b/service/handler/deployment_handler.py index 34709e0..6128ca6 100644 --- a/service/handler/deployment_handler.py +++ b/service/handler/deployment_handler.py @@ -27,6 +27,8 @@ import yaml from _deploy import DeployStatus, DeployConfigStatus from _errno import CheckStatus, FixEval +from _plugin import PluginType +from const import COMP_JRE, COMP_OCP_EXPRESS from service.api.v1.deployments import DeploymentInfo from service.handler.base_handler import BaseHandler from service.model.deployments import DeploymentConfig, PreCheckResult, RecoverChangeParameter, TaskInfo, \ @@ -39,6 +41,9 @@ from service.common.task import Serial as serial from service.common.task import AutoRegister as auto_register from ssh import LocalClient +from tool import COMMAND_ENV +from const import TELEMETRY_COMPONENT_OB +from _environ import ENV_TELEMETRY_REPORTER @singleton @@ -342,6 +347,7 @@ def _do_install(self, name): data = {} for component, _ in self.obd.namespaces.items(): data[component] = _.get_variable('run_result') + COMMAND_ENV.set(ENV_TELEMETRY_REPORTER, TELEMETRY_COMPONENT_OB, save=True) LocalClient.execute_command_background("nohup obd telemetry post %s --data='%s' > /dev/null &" % (name, json.dumps(data))) self.obd.set_deploy(None) @@ -557,7 +563,17 @@ def _do_precheck(self, repositories, start_check_plugins): raise Exception('generate config dump error,place check disk space!') for repository in repositories: - res = self.obd.call_plugin(start_check_plugins[repository], repository, init_check_status=False, work_dir_check=True, precheck=True) + java_check = True + if repository.name == COMP_OCP_EXPRESS: + jre_name = COMP_JRE + install_plugin = self.obd.search_plugin(repository, PluginType.INSTALL) + if install_plugin and jre_name in install_plugin.requirement_map(repository): + version = install_plugin.requirement_map(repository)[jre_name].version + min_version = install_plugin.requirement_map(repository)[jre_name].min_version + max_version = install_plugin.requirement_map(repository)[jre_name].max_version + if len(self.obd.search_images(jre_name, version=version, min_version=min_version, max_version=max_version)) > 0: + java_check = False + res = self.obd.call_plugin(start_check_plugins[repository], repository, init_check_status=False, work_dir_check=True, precheck=True, java_check=java_check) if not res and res.get_return("exception"): raise res.get_return("exception") diff --git a/service/handler/ocp_handler.py b/service/handler/ocp_handler.py index 893879e..447360f 100644 --- a/service/handler/ocp_handler.py +++ b/service/handler/ocp_handler.py @@ -40,9 +40,14 @@ from _deploy import DeployStatus, DeployConfigStatus, UserConfig from _errno import CheckStatus, FixEval from _repository import Repository -from ssh import SshClient, SshConfig +from _plugin import PluginType +from ssh import SshClient, SshConfig, LocalClient from tool import Cursor -from ssh import LocalClient +from const import COMP_JRE, COMPS_OCP +from tool import COMMAND_ENV +from const import TELEMETRY_COMPONENT_OCP +from _environ import ENV_TELEMETRY_REPORTER + @singleton @@ -140,7 +145,7 @@ def generate_metadb_config(self, cluster_config, oceanbase, home_path): log.get_logger().error('oceanbase component : %s not exist' % oceanbase.component) raise Exception('oceanbase component : %s not exist' % oceanbase.component) - def generate_obproxy_config(self, cluster_config, obproxy_config, home_path, ob_componet): + def generate_obproxy_config(self, cluster_config, obproxy_config, home_path, ob_component): comp_config = dict() config_dict = obproxy_config.dict() for key in config_dict: @@ -162,7 +167,7 @@ def generate_obproxy_config(self, cluster_config, obproxy_config, home_path, ob_ comp_config['global'][parameter.key] = parameter.value if 'depends' not in comp_config.keys(): comp_config['depends'] = list() - comp_config['depends'].append(ob_componet) + comp_config['depends'].append(ob_component) if obproxy_config.component == const.OBPROXY_CE: cluster_config[const.OBPROXY_CE] = comp_config elif obproxy_config.component == const.OBPROXY: @@ -192,23 +197,25 @@ def generate_ocp_config(self, cluster_config, config, home_path, launch_user, ob ocp_config['global']['jdbc_password'] = config_dict['metadb']['password'] if config.meta_tenant: - ocp_config['global']['ocp_meta_tenant'] = {} - ocp_config['global']['ocp_meta_tenant']['tenant_name'] = config_dict['meta_tenant']['name']['tenant_name'] - ocp_config['global']['ocp_meta_tenant']['max_cpu'] = config_dict['meta_tenant']['resource']['cpu'] - ocp_config['global']['ocp_meta_tenant']['memory_size'] = str(config_dict['meta_tenant']['resource']['memory']) + 'G' - ocp_config['global']['ocp_meta_username'] = config_dict['meta_tenant']['name']['user_name'] - ocp_config['global']['ocp_meta_password'] = config_dict['meta_tenant']['password'] - ocp_config['global']['ocp_meta_db'] = config_dict['meta_tenant']['name']['user_database'] if config_dict['meta_tenant']['name']['user_database'] != '' else 'meta_database' + tenant_config = cluster_config[ob_component] if ob_component is not None else ocp_config + tenant_config['global']['ocp_meta_tenant'] = {} + tenant_config['global']['ocp_meta_tenant']['tenant_name'] = config_dict['meta_tenant']['name']['tenant_name'] + tenant_config['global']['ocp_meta_tenant']['max_cpu'] = config_dict['meta_tenant']['resource']['cpu'] + tenant_config['global']['ocp_meta_tenant']['memory_size'] = str(config_dict['meta_tenant']['resource']['memory']) + 'G' + tenant_config['global']['ocp_meta_username'] = config_dict['meta_tenant']['name']['user_name'] + tenant_config['global']['ocp_meta_password'] = config_dict['meta_tenant']['password'] + tenant_config['global']['ocp_meta_db'] = config_dict['meta_tenant']['name']['user_database'] if config_dict['meta_tenant']['name']['user_database'] != '' else 'meta_database' self.context['meta_tenant'] = config_dict['meta_tenant']['name']['tenant_name'] if config.monitor_tenant: - ocp_config['global']['ocp_monitor_tenant'] = {} - ocp_config['global']['ocp_monitor_tenant']['tenant_name'] = config_dict['monitor_tenant']['name']['tenant_name'] - ocp_config['global']['ocp_monitor_tenant']['max_cpu'] = config_dict['monitor_tenant']['resource']['cpu'] - ocp_config['global']['ocp_monitor_tenant']['memory_size'] = str(config_dict['monitor_tenant']['resource']['memory']) + 'G' - ocp_config['global']['ocp_monitor_username'] = config_dict['monitor_tenant']['name']['user_name'] - ocp_config['global']['ocp_monitor_password'] = config_dict['monitor_tenant']['password'] - ocp_config['global']['ocp_monitor_db'] = config_dict['monitor_tenant']['name']['user_database'] if config_dict['monitor_tenant']['name']['user_database'] != '' else 'monitor_database' + tenant_config = cluster_config[ob_component] if ob_component is not None else ocp_config + tenant_config['global']['ocp_monitor_tenant'] = {} + tenant_config['global']['ocp_monitor_tenant']['tenant_name'] = config_dict['monitor_tenant']['name']['tenant_name'] + tenant_config['global']['ocp_monitor_tenant']['max_cpu'] = config_dict['monitor_tenant']['resource']['cpu'] + tenant_config['global']['ocp_monitor_tenant']['memory_size'] = str(config_dict['monitor_tenant']['resource']['memory']) + 'G' + tenant_config['global']['ocp_monitor_username'] = config_dict['monitor_tenant']['name']['user_name'] + tenant_config['global']['ocp_monitor_password'] = config_dict['monitor_tenant']['password'] + tenant_config['global']['ocp_monitor_db'] = config_dict['monitor_tenant']['name']['user_database'] if config_dict['monitor_tenant']['name']['user_database'] != '' else 'monitor_database' self.context['monitor_tenant'] = config_dict['monitor_tenant']['name']['tenant_name'] if config.home_path == '': @@ -227,9 +234,7 @@ def generate_ocp_config(self, cluster_config, config, home_path, launch_user, ob if not ob_component: if config_dict['metadb']: ocp_config['global']['jdbc_url'] = 'jdbc:oceanbase://' + config_dict['metadb']['host'] + ':' + str( - config_dict['metadb']['port']) + '/' + config_dict['metadb']['database'] - ocp_config['global']['jdbc_username'] = config_dict['metadb']['user'] - ocp_config['global']['jdbc_password'] = config_dict['metadb']['password'] + config_dict['metadb']['port']) + '/' + (config_dict['meta_tenant']['name']['user_database'] if config_dict['meta_tenant']['name']['user_database'] != '' else 'meta_database') if 'depends' not in ocp_config.keys() and ob_component and obp_component: ocp_config['depends'] = list() ocp_config['depends'].append(ob_component) @@ -480,8 +485,19 @@ def _do_precheck(self, repositories, start_check_plugins): ssh_clients = self.obd.get_clients(self.obd.deploy.deploy_config, repositories) for repository in repositories: log.get_logger().info('begin start_check: %s' % repository.name) + java_check = True + if repository.name in COMPS_OCP: + jre_name = COMP_JRE + install_plugin = self.obd.search_plugin(repository, PluginType.INSTALL) + if install_plugin and jre_name in install_plugin.requirement_map(repository): + version = install_plugin.requirement_map(repository)[jre_name].version + min_version = install_plugin.requirement_map(repository)[jre_name].min_version + max_version = install_plugin.requirement_map(repository)[jre_name].max_version + if len(self.obd.search_images(jre_name, version=version, min_version=min_version, max_version=max_version)) > 0: + java_check = False res = self.obd.call_plugin(start_check_plugins[repository], repository, init_check_status=False, - work_dir_check=True, precheck=True, clients=ssh_clients, sys_cursor=self.context['metadb_cursor']) + work_dir_check=True, precheck=True, java_check=java_check, clients=ssh_clients, + sys_cursor=self.context['metadb_cursor']) if not res and res.get_return("exception"): raise res.get_return("exception") log.get_logger().info('end start_check: %s' % repository.name) @@ -557,6 +573,7 @@ def parse_precheck_result(self, component, check_result, task_info, server, resu check_info.result = PrecheckEventResult.FAILED check_info.code = v.error.code check_info.advisement = v.error.msg + print(k, vars(v)) check_info.recoverable = len(v.suggests) > 0 and v.suggests[0].auto_fix all_passed = False info.status = TaskStatus.FINISHED @@ -684,6 +701,46 @@ def install(self, id, background_tasks): self.context['task_info'][self.context['ocp_deployment'][ret.id]] = ret return ret + def _create_tenant(self): + metadb_version = self.context['metadb_cursor'].fetchone("select ob_version() as version")["version"] + mock_oceanbase_repository = Repository("oceanbase-ce", "/") + mock_oceanbase_repository.version = metadb_version + repositories = [mock_oceanbase_repository] + create_tenant_plugins = self.obd.search_py_script_plugin(repositories, "create_tenant") + ocp_config = self.obd.deploy.deploy_config.components["ocp-server-ce"] + global_conf_with_default = ocp_config.get_global_conf_with_default() + meta_tenant_config = global_conf_with_default['ocp_meta_tenant'] + meta_tenant_config["variables"] = "ob_tcp_invited_nodes='%'" + meta_tenant_config["create_if_not_exists"] = True + meta_tenant_config["database"] = global_conf_with_default["ocp_meta_db"] + meta_tenant_config["db_username"] = global_conf_with_default["ocp_meta_username"] + meta_tenant_config["db_password"] = global_conf_with_default.get("ocp_meta_password", "") + meta_tenant_config[meta_tenant_config['tenant_name'] + "_root_password"] = global_conf_with_default.get("ocp_meta_password", "") + monitor_tenant_config = global_conf_with_default['ocp_monitor_tenant'] + monitor_tenant_config["variables"] = "ob_tcp_invited_nodes='%'" + monitor_tenant_config["create_if_not_exists"] = True + monitor_tenant_config["database"] = global_conf_with_default["ocp_monitor_db"] + monitor_tenant_config["db_username"] = global_conf_with_default["ocp_monitor_username"] + monitor_tenant_config["db_password"] = global_conf_with_default.get("ocp_monitor_password", "") + monitor_tenant_config[monitor_tenant_config['tenant_name'] + "_root_password"] = global_conf_with_default.get("ocp_monitor_password", "") + + ssh_clients = self.obd.get_clients(self.obd.deploy.deploy_config, self.obd.load_local_repositories(self.obd.deploy.deploy_info, False)) + + deploy = self.obd.deploy + self.obd.set_deploy(None) + log.get_logger().info("start create meta tenant") + create_meta_ret = self.obd.call_plugin(create_tenant_plugins[mock_oceanbase_repository], mock_oceanbase_repository, cluster_config=ocp_config, cursor=self.context['metadb_cursor'], create_tenant_options=[Values(meta_tenant_config)], clients = ssh_clients) + if not create_meta_ret: + self.obd.set_deploy(deploy) + raise Exception("Create meta tenant failed") + log.get_logger().info("start create monitor tenant") + create_monitor_ret = self.obd.call_plugin(create_tenant_plugins[mock_oceanbase_repository], mock_oceanbase_repository, cluster_config=ocp_config, cursor=self.context['metadb_cursor'], create_tenant_options=[Values(monitor_tenant_config)], clients = ssh_clients) + if not create_monitor_ret: + self.obd.set_deploy(deploy) + raise Exception("Create monitor tenant failed") + self.obd.set_deploy(deploy) + + @auto_register("install") def _do_install(self, id, task_id): self.context['deploy_status'] = self.context['process_installed'] = '' @@ -707,7 +764,12 @@ def _do_install(self, id, task_id): setattr(opt, "clean", True) setattr(opt, "force", True) self.obd.set_options(opt) + try: + # add create tenant operations before deploy ocp if it uses an existing OceanBase as it's metadb cluster + if 'oceanbase-ce' not in self.obd.deploy.deploy_config.components['ocp-server-ce'].depends: + log.get_logger().info("not depends on oceanbase, create tenant first") + self._create_tenant() deploy_success = self.obd.deploy_cluster(name) if not deploy_success: log.get_logger().warn("deploy %s failed", name) @@ -771,6 +833,7 @@ def _do_install(self, id, task_id): data = {} for component, _ in self.obd.namespaces.items(): data[component] = _.get_variable('run_result') + COMMAND_ENV.set(ENV_TELEMETRY_REPORTER, TELEMETRY_COMPONENT_OCP, save=True) LocalClient.execute_command_background("nohup obd telemetry post %s --data='%s' > /dev/null &" % (name, json.dumps(data))) def get_install_task_info(self, id, task_id): @@ -1204,7 +1267,18 @@ def _do_upgrade_precheck(self, repositories, start_check_plugins): raise Exception('generate config dump error,place check disk space!') for repository in repositories: - res = self.obd.call_plugin(start_check_plugins[repository], repository, database=self.context['meta_database'] ,meta_cursor=self.context['metadb_cursor']) + java_check = True + if repository.name in COMPS_OCP: + jre_name = COMP_JRE + install_plugin = self.obd.search_plugin(repository, PluginType.INSTALL) + if install_plugin and jre_name in install_plugin.requirement_map(repository): + version = install_plugin.requirement_map(repository)[jre_name].version + min_version = install_plugin.requirement_map(repository)[jre_name].min_version + max_version = install_plugin.requirement_map(repository)[jre_name].max_version + if len(self.obd.search_images(jre_name, version=version, min_version=min_version, max_version=max_version)) > 0: + java_check = False + res = self.obd.call_plugin(start_check_plugins[repository], repository, database=self.context['meta_database'], + meta_cursor=self.context['metadb_cursor'], java_check=java_check) if not res and res.get_return("exception"): raise res.get_return("exception") @@ -1557,6 +1631,7 @@ def _ocp_upgrade_from_new_deployment(self, repositories, deploy, pkgs, name, met raise Exception("failed to deploy component: %s", repository.name) opt = Values() setattr(opt, "without_parameter", True) + setattr(opt, "skip_password_check", True) self.obd.set_options(opt) log.get_logger().info('begin start ocp') ret = self.obd.start_cluster(name) diff --git a/service/handler/service_info_handler.py b/service/handler/service_info_handler.py index 958b34d..d8e4696 100644 --- a/service/handler/service_info_handler.py +++ b/service/handler/service_info_handler.py @@ -86,7 +86,14 @@ def get_ocp_depend_config(self, cluster_config, stdio): if 'server_ip' not in depend_info: depend_info['server_ip'] = ob_server.ip depend_info['mysql_port'] = ob_server_conf['mysql_port'] - depend_info['root_password'] = ob_server_conf['root_password'] + depend_info['meta_tenant'] = ob_server_conf['ocp_meta_tenant']['tenant_name'] + depend_info['meta_user'] = ob_server_conf['ocp_meta_username'] + depend_info['meta_password'] = ob_server_conf['ocp_meta_password'] + depend_info['meta_db'] = ob_server_conf['ocp_meta_db'] + depend_info['monitor_tenant'] = ob_server_conf['ocp_monitor_tenant']['tenant_name'] + depend_info['monitor_user'] = ob_server_conf['ocp_monitor_username'] + depend_info['monitor_password'] = ob_server_conf['ocp_monitor_password'] + depend_info['monitor_db'] = ob_server_conf['ocp_monitor_db'] zone = ob_server_conf['zone'] if zone not in ob_zones: ob_zones[zone] = ob_server @@ -102,13 +109,23 @@ def get_ocp_depend_config(self, cluster_config, stdio): for server in cluster_config.servers: server_config = copy.deepcopy(cluster_config.get_server_conf_with_default(server)) - original_server_config = cluster_config.get_original_server_conf(server) + original_server_config = cluster_config.get_original_server_conf_with_global(server) missed_keys = self.get_missing_required_parameters(original_server_config) if missed_keys: if 'jdbc_url' in missed_keys and depend_observer: - server_config['jdbc_url'] = 'jdbc:oceanbase://{}:{}/{}'.format(depend_info['server_ip'], - depend_info['mysql_port'], - server_config['ocp_meta_db']) + if not server_config.get('ocp_meta_tenant', None): + server_config['ocp_meta_tenant'] = {} + if not server_config.get('ocp_monitor_tenant', None): + server_config['ocp_monitor_tenant'] = {} + server_config['jdbc_url'] = 'jdbc:oceanbase://{}:{}/{}'.format(depend_info['server_ip'], depend_info['mysql_port'], depend_info['meta_db'] if not original_server_config.get('ocp_meta_db', None) else original_server_config['ocp_meta_db']) if not original_server_config.get('jdbc_url', None) else original_server_config['jdbc_url'] + server_config['ocp_meta_username'] = depend_info['meta_user'] if not original_server_config.get('ocp_meta_username', None) else original_server_config['ocp_meta_username'] + server_config['ocp_meta_tenant']['tenant_name'] = depend_info['meta_tenant'] if not original_server_config.get('ocp_meta_tenant', None) else original_server_config['ocp_meta_tenant']['tenant_name'] + server_config['ocp_meta_password'] = depend_info['meta_password'] if not original_server_config.get('ocp_meta_password', None) else original_server_config['ocp_meta_password'] + server_config['ocp_meta_db'] = depend_info['meta_db'] if not original_server_config.get('ocp_meta_db', None) else original_server_config['ocp_meta_db'] + server_config['ocp_monitor_username'] = depend_info['monitor_user'] if not original_server_config.get('ocp_monitor_username', None) else original_server_config['ocp_monitor_username'] + server_config['ocp_monitor_tenant']['tenant_name'] = depend_info['monitor_tenant'] if not original_server_config.get('ocp_monitor_tenant', None) else original_server_config['ocp_monitor_tenant']['tenant_name'] + server_config['ocp_monitor_password'] = depend_info['monitor_password'] if not original_server_config.get('ocp_monitor_password', None) else original_server_config['ocp_monitor_password'] + server_config['ocp_monitor_db'] = depend_info['monitor_db'] if not original_server_config.get('ocp_monitor_db', None) else original_server_config['ocp_monitor_db'] server_config['jdbc_username'] = '%s@%s' % ( server_config['ocp_meta_username'], server_config['ocp_meta_tenant']['tenant_name']) server_config['jdbc_password'] = server_config['ocp_meta_password'] @@ -234,7 +251,11 @@ def get_metadb_connection(self, name): metadb_copy.password = '' return metadb_copy return metadb + if deploy.deploy_info.status != DeployStatus.STATUS_RUNNING: + raise Exception ("previous deploy is not running") deploy_config = deploy.deploy_config + repositories = self.obd.load_local_repositories(deploy.deploy_info, False) + self.obd.search_param_plugin_and_apply(repositories, deploy_config) if const.OCP_SERVER in deploy_config.components: cluster_config = deploy_config.components[const.OCP_SERVER] elif const.OCP_SERVER_CE in deploy_config.components: diff --git a/ssh.py b/ssh.py index 25352b1..9564d16 100644 --- a/ssh.py +++ b/ssh.py @@ -362,7 +362,7 @@ def add_env(self, key, value, rewrite=False, stdio=None): if self._is_local: self._add_env_for_local(key, value, rewrite) else: - self.env[key] += value + self.env[key] = value + self.env[key] self._update_env() def _add_env_for_local(self, key, value, rewrite=False): @@ -371,7 +371,7 @@ def _add_env_for_local(self, key, value, rewrite=False): else: if key not in self.env: self.env[key] = COMMAND_ENV.get(key, '') - self.env[key] += value + self.env[key] = value + self.env[key] def get_env(self, key, stdio=None): return self.env[key] if key in self.env else None @@ -394,6 +394,7 @@ def _login(self, stdio=None, exit=True): err = None try: self.ssh_client.set_missing_host_key_policy(AutoAddPolicy()) + self.ssh_client.set_log_channel(None) stdio.verbose('host: %s, port: %s, user: %s, password: %s' % (self.config.host, self.config.port, self.config.username, self.config.password)) self.ssh_client.connect( self.config.host, diff --git a/tool.py b/tool.py index f37f689..a632a74 100644 --- a/tool.py +++ b/tool.py @@ -71,7 +71,7 @@ class OrderedDict(dict): pass -__all__ = ("timeout", "DynamicLoading", "ConfigUtil", "DirectoryUtil", "FileUtil", "YamlLoader", "OrderedDict", "COMMAND_ENV", "TimeUtils") +__all__ = ("timeout", "DynamicLoading", "ConfigUtil", "DirectoryUtil", "FileUtil", "YamlLoader", "OrderedDict", "COMMAND_ENV", "TimeUtils", "Cursor") _WINDOWS = os.name == 'nt' @@ -231,7 +231,7 @@ def get_random_pwd_by_total_length(pwd_length=10): return pwd @staticmethod - def get_random_pwd_by_rule(lowercase_length=2, uppercase_length=2, digits_length=2, punctuation_length=2): + def get_random_pwd_by_rule(lowercase_length=2, uppercase_length=2, digits_length=2, punctuation_length=2, punctuation_chars='(._+@#%)'): pwd = "" for i in range(lowercase_length): pwd += random.choice(string.ascii_lowercase) @@ -240,7 +240,7 @@ def get_random_pwd_by_rule(lowercase_length=2, uppercase_length=2, digits_length for i in range(digits_length): pwd += random.choice(string.digits) for i in range(punctuation_length): - pwd += random.choice('(._+@#%)') + pwd += random.choice(punctuation_chars) pwd_list = list(pwd) random.shuffle(pwd_list) return ''.join(pwd_list) @@ -322,11 +322,12 @@ def mkdir(path, mode=0o755, stdio=None): def rm(path, stdio=None): stdio and getattr(stdio, 'verbose', print)('rm %s' % path) try: - if os.path.exists(path): - if os.path.islink(path): - os.remove(path) - else: - shutil.rmtree(path) + if os.path.islink(path): + os.remove(path) + elif os.path.exists(path): + shutil.rmtree(path) + else: + pass return True except Exception as e: stdio and getattr(stdio, 'exception', print)('') @@ -584,6 +585,7 @@ def var_replace(string, var, pattern=_KEYCRE): return ''.join(done) + class CommandEnv(SafeStdio): def __init__(self): @@ -677,6 +679,7 @@ def get_host_ip(): ip = socket.gethostbyname(hostname) return ip + COMMAND_ENV=CommandEnv() @@ -748,6 +751,7 @@ def parse_time_since(since=None, stdio=None): format_from_time = TimeUtils.sub_minutes(format_to_time, 30) return format_from_time, format_to_time + class Cursor(SafeStdio): def __init__(self, ip, port, user='root', tenant='sys', password='', stdio=None): @@ -832,3 +836,4 @@ def close(self): if self.db: self.db.close() self.db = None + diff --git a/web/package.json b/web/package.json index 940ff17..41ec745 100644 --- a/web/package.json +++ b/web/package.json @@ -34,7 +34,7 @@ "classnames": "^2.3.2", "copy-to-clipboard": "3.3.3", "cross-env": "7.0.3", - "i18next": "^23.7.16", + "i18next": "^23.8.1", "lodash": "^4.17.21", "lottie-web": "5.10.2", "moment": "^2.29.4", diff --git a/web/src/component/CustomPasswordInput/index.tsx b/web/src/component/CustomPasswordInput/index.tsx index aa2881a..7afe95d 100644 --- a/web/src/component/CustomPasswordInput/index.tsx +++ b/web/src/component/CustomPasswordInput/index.tsx @@ -1,10 +1,17 @@ +import { + copyText, + generateRandomPassword, + OB_PASSWORD_ERROR_REASON, + OCP_PASSWORD_ERROR_REASON, + OCP_PASSWORD_ERROR_REASON_OLD, + passwordCheck, + passwordCheckLowVersion +} from '@/utils/helper'; import { intl } from '@/utils/intl'; import { ProForm } from '@ant-design/pro-components'; -import { Input, Button, message } from 'antd'; +import { Button, Input, message } from 'antd'; import { FormInstance } from 'antd/lib/form'; import { NamePath } from 'rc-field-form/lib/interface'; -import { generateRandomPassword } from '@/utils'; -import { copyText } from '@/utils/helper'; interface CustomPasswordInputProps { onChange: (value: string) => void; @@ -14,6 +21,8 @@ interface CustomPasswordInputProps { showCopyBtn?: boolean; form: FormInstance; msgInfo: MsgInfoType; + useOldRuler?: boolean; + useFor: 'ob' | 'ocp'; setMsgInfo: React.Dispatch>; } @@ -21,8 +30,12 @@ type MsgInfoType = { validateStatus: 'success' | 'error'; errorMsg: string | null; }; -const passwordReg = - /^(?=.*[A-Z].*[A-Z])(?=.*[a-z].*[a-z])(?=.*\d.*\d)(?=.*[~!@#%^&*_\-+=`|(){}[\]:;',.?/].*[~!@#%^&*_\-+=`|(){}[\]:;',.?/])[A-Za-z\d~!@#%^&*_\-+=`|(){}[\]:;',.?/]{8,32}$/; + +/** + * + * @param useOldRuler ocp版本<4.2.2用之前的密码校验规则 + * @returns + */ export default function CustomPasswordInput({ onChange, value, @@ -31,35 +44,30 @@ export default function CustomPasswordInput({ name, msgInfo, setMsgInfo, + useOldRuler = false, + useFor, ...props }: CustomPasswordInputProps) { const textStyle = { marginTop: '8px' }; - const validateInput = (value: string): MsgInfoType => { - const regex = /^[A-Za-z\d~!@#%^&*_\-+=`|(){}[\]:;',.?/]*$/; - if (value.length < 8 || value.length > 32) { + const oldValidateInput = (value: string): MsgInfoType => { + if (!passwordCheckLowVersion(value)) { return { validateStatus: 'error', - errorMsg: intl.formatMessage({ - id: 'OBD.component.CustomPasswordInput.TheLengthShouldBeTo', - defaultMessage: '长度应为 8~32 个字符', - }), + errorMsg: OCP_PASSWORD_ERROR_REASON_OLD, }; - } else if (!regex.test(value)) { - return { - validateStatus: 'error', - errorMsg: intl.formatMessage({ - id: 'OBD.component.CustomPasswordInput.CanOnlyContainLettersNumbers', - defaultMessage: - "只能包含字母、数字和特殊字符~!@#%^&*_-+=`|(){}[]:;',.?/", - }), - }; - } else if (!passwordReg.test(value)) { + } + return { + validateStatus: 'success', + errorMsg: null, + }; + }; + const newValidateInput = (value: string): MsgInfoType => { + if (!passwordCheck(value, useFor)) { + const REASON = + useFor === 'ob' ? OB_PASSWORD_ERROR_REASON : OCP_PASSWORD_ERROR_REASON; return { validateStatus: 'error', - errorMsg: intl.formatMessage({ - id: 'OBD.component.CustomPasswordInput.AtLeastUppercaseAndLowercase', - defaultMessage: '大小写字母、数字和特殊字符都至少包含 2 个', - }), + errorMsg: REASON, }; } return { @@ -67,13 +75,14 @@ export default function CustomPasswordInput({ errorMsg: null, }; }; + const validateInput = useOldRuler ? oldValidateInput : newValidateInput; const handleChange = (value: string) => { setMsgInfo(validateInput(value)); onChange(value); }; const handleRandomGenerate = () => { - const password = generateRandomPassword(); + const password = generateRandomPassword(useFor, useOldRuler); setMsgInfo(validateInput(password)); onChange(password); }; diff --git a/web/src/component/DeployConfig/index.tsx b/web/src/component/DeployConfig/index.tsx index aba8662..4c611d4 100644 --- a/web/src/component/DeployConfig/index.tsx +++ b/web/src/component/DeployConfig/index.tsx @@ -14,7 +14,11 @@ import { import { getTailPath } from '@/utils/helper'; import { intl } from '@/utils/intl'; import customRequest from '@/utils/useRequest'; -import { CopyOutlined, InfoCircleOutlined } from '@ant-design/icons'; +import { + CopyOutlined, + InfoCircleOutlined, + SafetyCertificateFilled, +} from '@ant-design/icons'; import { ProCard, ProForm, ProFormText } from '@ant-design/pro-components'; import { useRequest } from 'ahooks'; import { @@ -272,15 +276,16 @@ export default function DeployConfig({ labelInValue onChange={(value) => onVersionChange(value, record)} style={{ width: 207 }} + popupClassName={styles?.popupClassName} > - {_.map((item: any) => ( - -
-
{ + return ( + + V {item.version} {item?.release ? `-${item?.release}` : ''} -
+ {item.versionType === 'ce' ? ( {intl.formatMessage({ @@ -306,9 +311,20 @@ export default function DeployConfig({ })} )} -
-
- ))} + {item?.type === 'local' ? ( + + + {intl.formatMessage({ + id: 'OBD.pages.components.InstallConfig.LocalImage', + defaultMessage: '本地镜像', + })} + + ) : ( + '' + )} + + ); + })} )} @@ -402,6 +418,7 @@ export default function DeployConfig({ version: item.version, release: item.release, md5: item.md5, + type: item.type, })); }; @@ -609,7 +626,7 @@ export default function DeployConfig({ key: component?.key!, }; setInitVersion(item); - memory += caculateSize(getRecommendInfo(item).estimated_size); + memory += getRecommendInfo(item).estimated_size; dataSource.push(temp); return memory; }; @@ -721,7 +738,6 @@ export default function DeployConfig({ } else { setComponentLoading(false); } - completionComponent(dataSource); isNewDB && sortComponent(dataSource); setDeployMemory(memory); @@ -858,7 +874,7 @@ export default function DeployConfig({ id: 'OBD.component.DeployConfig.EstimatedInstallationRequirements', defaultMessage: '预计安装需要', })} - {deployMemory} + {caculateSize(deployMemory)} {intl.formatMessage({ id: 'OBD.component.DeployConfig.MbSpace', defaultMessage: 'MB空间', diff --git a/web/src/component/MetaDBConfig/ClusterConfig.tsx b/web/src/component/MetaDBConfig/ClusterConfig.tsx index dbdb089..33eea87 100644 --- a/web/src/component/MetaDBConfig/ClusterConfig.tsx +++ b/web/src/component/MetaDBConfig/ClusterConfig.tsx @@ -1,23 +1,24 @@ import { oceanbaseAddonAfter, PARAMETER_TYPE } from '@/constant/configuration'; import ConfigTable from '@/pages/Obdeploy/ClusterConfig/ConfigTable'; import { queryComponentParameters } from '@/services/ob-deploy-web/Components'; +import { getErrorInfo } from '@/utils'; import { + formatMoreConfig, generateRandomPassword as generatePassword, - getErrorInfo, - passwordRules, -} from '@/utils'; -import { formatMoreConfig } from '@/utils/helper'; + getPasswordRules, +} from '@/utils/helper'; import { intl } from '@/utils/intl'; import useRequest from '@/utils/useRequest'; import { DownOutlined, RightOutlined } from '@ant-design/icons'; import { ProFormText } from '@ant-design/pro-components'; -import { useUpdateEffect } from 'ahooks'; import { Button, Row, Space } from 'antd'; +import { useUpdateEffect } from 'ahooks'; import { FormInstance } from 'antd/lib/form'; import { useState } from 'react'; import { useModel } from 'umi'; import InputPort from '../InputPort'; -import styles from './indexZh.less'; +import styles from './index.less'; + export default function ClusterConfig({ form }: { form: FormInstance }) { const [clusterMoreLoading, setClusterMoreLoading] = useState(false); @@ -141,7 +142,7 @@ export default function ClusterConfig({ form }: { form: FormInstance }) { }; const generateRandomPassword = () => { - const password = generatePassword(); + const password = generatePassword('ob'); setPassword(password); }; @@ -168,7 +169,7 @@ export default function ClusterConfig({ form }: { form: FormInstance }) { id: 'OBD.component.MetaDBConfig.ClusterConfig.RootSysPassword', defaultMessage: 'root@sys 密码', })} - rules={passwordRules} + rules={getPasswordRules('ob')} fieldProps={{ style: { width: 328 }, autoComplete: 'new-password', @@ -186,7 +187,7 @@ export default function ClusterConfig({ form }: { form: FormInstance }) { - } - headStyle={{ paddingLeft: '16px', paddingRight: '16px' }} - > - - - {loading ? null : ( - <> - - - - {statusData?.precheck_result?.map( - (item: API.PreCheckInfo, index: number) => { - //根据索引找到对应项 - const task_info_item = - statusData?.task_info?.info[index - 1]; - return ( - - ) : ( - - ) - ) : null - } - > - {item?.name} {item?.server} - - ); - }, - )} - - - )} - - - {hasManual ? ( - setOnlyManual(e.target.checked)} - disabled={!checkFinished || statusData?.all_passed} - > - {intl.formatMessage({ - id: 'OBD.pages.components.PreCheckStatus.OnlyManualFixes', - defaultMessage: '只看手动修复项', - })} - - ) : null} - - - } - > - {showFailedList?.length ? ( -
- {showFailedList?.map((item, index) => { - let reason = ''; - if (item?.advisement) { - const index = item?.advisement.indexOf(':'); - reason = item?.advisement.substring( - index, - item?.advisement.length, - ); - } - return ( - - - - - - {item.name} - - - - - {intl.formatMessage({ - id: 'OBD.pages.components.PreCheckStatus.Reason', - defaultMessage: '原因:', - })} - - ERR-{item.code} - {' '} - {reason} - - - - - - {intl.formatMessage({ - id: 'OBD.pages.components.PreCheckStatus.Suggestions', - defaultMessage: '建议:', - })} - {item.recoverable ? ( - - {intl.formatMessage({ - id: 'OBD.pages.components.PreCheckStatus.AutomaticRepair', - defaultMessage: '自动修复', - })} - - ) : ( - - {intl.formatMessage({ - id: 'OBD.pages.components.PreCheckStatus.ManualRepair', - defaultMessage: '手动修复', - })} - - )}{' '} - {item.advisement} - -
- - {intl.formatMessage({ - id: 'OBD.pages.components.PreCheckStatus.LearnMore', - defaultMessage: '了解更多方案', - })} - -
-
- ); - })} - {!checkFinished ? ( -
{shape}
- ) : null} -
- ) : checkFinished ? ( - - {intl.formatMessage({ - id: 'OBD.pages.components.PreCheckStatus.GreatNoFailedItems', - defaultMessage: '太棒了!无失败项', - })} - - } - /> - ) : ( -
- {shape} -
- {intl.formatMessage({ - id: 'OBD.pages.components.PreCheckStatus.NoFailedItemsFoundYet', - defaultMessage: '暂未发现失败项', - })} -
-
- )} -
- - - {' '} - - - {!statusData?.all_passed ? ( - - - - ) : ( - - )} - - + ); } diff --git a/web/src/component/OCPPreCheck/helper.ts b/web/src/component/OCPPreCheck/helper.ts index 2ddc9ab..91d8878 100644 --- a/web/src/component/OCPPreCheck/helper.ts +++ b/web/src/component/OCPPreCheck/helper.ts @@ -76,3 +76,22 @@ export const formatPreCheckData = (configData: any) => { } return _configData; }; + +export const formatOcpPreCheckStatusData = ( + dataSource: API.PrecheckTaskInfo, +) => { + dataSource.finished = + dataSource?.task_info?.info.filter( + (item) => item.result === 'SUCCESSFUL' || item.result === 'FAILED', + ).length || 0; + dataSource.total = dataSource?.precheck_result?.length || 0; + dataSource.all_passed = dataSource.task_info.result === 'SUCCESSFUL'; + + dataSource.timelineData = dataSource.task_info.info.map((item) => { + let result = {}; + result.isRunning = item?.result === 'RUNNING'; + result.result = item?.result; + return result; + }); + return { ...dataSource }; +}; diff --git a/web/src/component/PreCheck/preCheck.tsx b/web/src/component/PreCheck/preCheck.tsx new file mode 100644 index 0000000..bee73d0 --- /dev/null +++ b/web/src/component/PreCheck/preCheck.tsx @@ -0,0 +1,513 @@ +import { intl } from '@/utils/intl'; +import { useEffect } from 'react'; +import { + Space, + Button, + Progress, + Timeline, + Checkbox, + Typography, + Tooltip, + Tag, + Spin, + Empty, +} from 'antd'; +import { ProCard } from '@ant-design/pro-components'; +import { + CloseOutlined, + QuestionCircleFilled, + ReadFilled, + CheckCircleFilled, + CloseCircleFilled, +} from '@ant-design/icons'; +import CustomFooter from '../CustomFooter'; +import ExitBtn from '@/component/ExitBtn'; +import NP from 'number-precision'; +import { getLocale } from 'umi'; +import ZhStyles from '@/pages/Obdeploy/indexZh.less'; +import EnStyles from '@/pages/Obdeploy/indexEn.less'; + +interface PreCehckComponentProps { + checkFinished: boolean; + checkStatus: boolean; + createLoading?: boolean; + preCheckLoading: boolean; + loading: boolean; + hasManual: boolean; + hasAuto: boolean; + recoverLoading: boolean; + precheckLoading?: boolean; + errCodeLink:string; + failedList: API.PreCheckInfo[]; + statusData: API.PreCheckResult; + showFailedList: API.PreCheckInfo[]; + handleAutoRepair: () => void; + handleRetryCheck: (parm?: any) => void; + prevStep: () => void; + handleInstall: () => void; + handelCheck: () => void; + setOnlyManual: React.Dispatch>; + setIsScroll: React.Dispatch>; + setIsScrollFailed: React.Dispatch>; +} + +const locale = getLocale(); +const styles = locale === 'zh-CN' ? ZhStyles : EnStyles; + +const { Text } = Typography; + +const statusColorConfig = { + PASSED: 'green', + PENDING: 'gray', + FAILED: 'red', + SUCCESSFUL: 'green' +}; + +let timerScroll: NodeJS.Timer; +let timerFailed: NodeJS.Timer; +const initDuration = 3; +let durationScroll = initDuration; +let durationFailed = initDuration; +export default function PreCehckComponent({ + checkFinished, + failedList, + statusData, + checkStatus, + createLoading, + preCheckLoading, + errCodeLink, + handleRetryCheck, + loading, + hasManual, + setOnlyManual, + handleAutoRepair, + hasAuto, + recoverLoading, + showFailedList, + prevStep, + precheckLoading, + handleInstall, + setIsScroll, + setIsScrollFailed, +}: PreCehckComponentProps) { + const handleScrollTimeline = () => { + if (!checkFinished) { + setIsScroll(true); + clearInterval(timerScroll); + durationScroll = initDuration; + timerScroll = setInterval(() => { + if (durationScroll === 0) { + clearInterval(timerScroll); + setIsScroll(false); + durationScroll = initDuration; + } else { + durationScroll -= 1; + } + }, 1000); + } + }; + + const handleScrollFailed = () => { + if (!checkFinished) { + setIsScrollFailed(true); + clearInterval(timerFailed); + durationFailed = initDuration; + timerFailed = setInterval(() => { + if (durationFailed === 0) { + clearInterval(timerFailed); + setIsScrollFailed(false); + durationFailed = initDuration; + } else { + durationFailed -= 1; + } + }, 1000); + } + }; + + useEffect(() => { + const timelineContainer = document.getElementById('timeline-container'); + timelineContainer.onmousewheel = handleScrollTimeline; // ie , chrome + timelineContainer?.addEventListener('DOMMouseScroll', handleScrollTimeline); // firefox + return () => { + timelineContainer.onmousewheel = () => {}; + timelineContainer?.removeEventListener( + 'DOMMouseScroll', + handleScrollTimeline, + ); + }; + }, []); + + useEffect(() => { + const addEventFailedContainer = () => { + const failedContainer = document.getElementById('failed-container'); + if (failedList?.length && failedContainer) { + if (!failedContainer.onmousewheel) { + failedContainer.onmousewheel = handleScrollFailed; // ie , chrome + failedContainer?.addEventListener( + 'DOMMouseScroll', + handleScrollFailed, + ); + // firefox + } + } else { + setTimeout(() => { + addEventFailedContainer(); + }, 3000); + } + }; + + addEventFailedContainer(); + return () => { + const failedContainer = document.getElementById('failed-container'); + if (failedContainer) { + failedContainer.onmousewheel = () => {}; + failedContainer?.removeEventListener( + 'DOMMouseScroll', + handleScrollFailed, + ); + } + }; + }, [failedList]); + + let progressStatus = 'active'; + if (statusData?.task_info?.status === 'FAILED') { + progressStatus = 'exception'; + } else if (checkFinished) { + if (statusData.all_passed) { + progressStatus = 'success'; + } else { + progressStatus = 'exception'; + } + } + + const shape = ( +
+
+
+
+
+
+ ); + + const checkItemLength = `${statusData?.finished || 0}/${ + statusData?.total || 0 + }`; + const failedItemLength = failedList?.length; + return ( + + + handleRetryCheck()} + data-aspm-click="c307513.d317293" + data-aspm-desc={intl.formatMessage({ + id: 'OBD.component.PreCheck.preCheck.PreCheckResultReCheck', + defaultMessage: '预检查结果-重新检查', + })} + data-aspm-param={``} + data-aspm-expo + > + {intl.formatMessage({ + id: 'OBD.component.PreCheck.preCheck.ReCheck', + defaultMessage: '重新检查', + })} + + } + headStyle={{ paddingLeft: '16px', paddingRight: '16px' }} + > + + + {loading ? null : ( + <> + + + + {(statusData?.info || statusData?.task_info?.info)?.map( + (item: API.PreCheckInfo, index: number) => { + //根据索引找到对应项 + const timelineData = statusData?.timelineData[index]; + return ( + + ) : ( + + ) + ) : null + } + > + {item?.name} {item?.server} + + ); + }, + )} + + + )} + + + {hasManual ? ( + setOnlyManual(e.target.checked)} + disabled={!checkFinished || statusData?.all_passed} + > + {intl.formatMessage({ + id: 'OBD.component.PreCheck.preCheck.OnlyManualFixes', + defaultMessage: '只看手动修复项', + })} + + ) : null} + + + } + > + {showFailedList?.length ? ( +
+ {showFailedList?.map((item, index) => { + let reason = '',responseReason = item?.description || item?.advisement as string + if (responseReason) { + const index = responseReason.indexOf(':'); + reason = responseReason.substring( + index, + responseReason.length, + ); + } + return ( + + + + + + {item.name} + + + + + {intl.formatMessage({ + id: 'OBD.component.PreCheck.preCheck.Reason', + defaultMessage: '原因:', + })} + + OBD-{item.code} + {' '} + {reason} + + + + + + {intl.formatMessage({ + id: 'OBD.component.PreCheck.preCheck.Suggestions', + defaultMessage: '建议:', + })} + {item.recoverable ? ( + + {intl.formatMessage({ + id: 'OBD.component.PreCheck.preCheck.AutomaticRepair', + defaultMessage: '自动修复', + })} + + ) : ( + + {intl.formatMessage({ + id: 'OBD.component.PreCheck.preCheck.ManualRepair', + defaultMessage: '手动修复', + })} + + )}{' '} + {item.advisement?.description || item?.advisement} + +
+ + {intl.formatMessage({ + id: 'OBD.component.PreCheck.preCheck.LearnMore', + defaultMessage: '了解更多方案', + })} + +
+
+ ); + })} + {!checkFinished ? ( +
{shape}
+ ) : null} +
+ ) : checkFinished ? ( + + {intl.formatMessage({ + id: 'OBD.component.PreCheck.preCheck.GreatNoFailedItems', + defaultMessage: '太棒了!无失败项', + })} + + } + /> + ) : ( +
+ {shape} +
+ {intl.formatMessage({ + id: 'OBD.component.PreCheck.preCheck.NoFailedItemsFoundYet', + defaultMessage: '暂未发现失败项', + })} +
+
+ )} + + + + {' '} + + + {!statusData?.all_passed ? ( + + + + ) : ( + + )} + + + ); +} diff --git a/web/src/constant/docs.ts b/web/src/constant/docs.ts index a27f823..8c89e98 100644 --- a/web/src/constant/docs.ts +++ b/web/src/constant/docs.ts @@ -40,9 +40,9 @@ export function getDocs(getLocale) { //OCP Express const OCP_EXPRESS_CN = - 'https://www.oceanbase.com/docs/common-oceanbase-database-cn-1000000000218662'; + 'https://www.oceanbase.com/docs/common-oceanbase-database-cn-1000000000508228'; const OCP_EXPRESS_EN = - 'https://en.oceanbase.com/docs/common-oceanbase-database-10000000000870497'; + 'https://en.oceanbase.com/docs/common-oceanbase-database-10000000001166743'; const OCP_EXPRESS = getLocale() === 'zh-CN' ? OCP_EXPRESS_CN : OCP_EXPRESS_EN; //OCP @@ -52,9 +52,9 @@ export function getDocs(getLocale) { //OBAgent const OBAGENT_DOCS_CN = - 'https://www.oceanbase.com/docs/common-oceanbase-database-cn-1000000000220263'; + 'https://www.oceanbase.com/docs/common-oceanbase-database-cn-1000000000509852'; const OBAGENT_DOCS_EN = - 'https://en.oceanbase.com/docs/common-oceanbase-database-10000000001029751'; + 'https://en.oceanbase.com/docs/common-oceanbase-database-10000000001168964'; const OBAGENT_DOCS = getLocale() === 'zh-CN' ? OBAGENT_DOCS_CN : OBAGENT_DOCS_EN; @@ -66,9 +66,9 @@ export function getDocs(getLocale) { //模式配置规则 const MODE_CONFIG_RULE_CN = - 'https://www.oceanbase.com/docs/community-obd-cn-1000000000487050'; + 'https://www.oceanbase.com/docs/community-obd-cn-1000000000634883'; const MODE_CONFIG_RULE_EN = - 'https://en.oceanbase.com/docs/community-obd-en-10000000001082203'; + 'https://en.oceanbase.com/docs/community-obd-en-10000000001181555'; const MODE_CONFIG_RULE = getLocale() === 'zh-CN' ? MODE_CONFIG_RULE_CN : MODE_CONFIG_RULE_EN; @@ -81,8 +81,12 @@ export function getDocs(getLocale) { const ERR_CODE = getLocale() === 'zh-CN' ? ERR_CODE_CN : ERR_CODE_EN; // 部署向导帮助文档 + const DOCS_PRODUCTION_CN = + 'https://www.oceanbase.com/docs/community-obd-cn-1000000000634915'; + const DOCS_PRODUCTION_EN = + 'https://en.oceanbase.com/docs/community-obd-en-10000000001181618'; const DOCS_PRODUCTION = - 'https://www.oceanbase.com/docs/community-obd-cn-1000000000314362'; + getLocale() === 'zh-CN' ? DOCS_PRODUCTION_CN : DOCS_PRODUCTION_EN; // SOP文档 const DOCS_SOP = 'https://ask.oceanbase.com/t/topic/35605473'; diff --git a/web/src/constant/index.ts b/web/src/constant/index.ts index 2bed93e..faa0e8f 100644 --- a/web/src/constant/index.ts +++ b/web/src/constant/index.ts @@ -1,16 +1,6 @@ import moment from 'moment'; -// import { -// getNameValidateMessage, -// getChineseNameValidateMessage, -// getUsernameValidateMessage, -// getDatabaseNameValidateMessage, -// getMySQLDbUserNameValidateMessage, -// getOracleDbUserNameValidateMessage, -// getSpaceValidateMessage, -// } from '@/constant/component'; import { MICROSECOND } from '@/constant/must-ignore'; import { intl } from '@/utils/intl'; -// import { showTotal } from '@/utils'; export const ALL = '__OCP_ALL_CONSTANT_VALUE__'; @@ -20,85 +10,6 @@ export const WILDCARD = '*'; // OB 官网链接 export const OB_SITE_LINK = 'https://www.oceanbase.com'; -/* 正则表达式 */ - -// // 通用名称正则校验: 以英文字母开头、英文或数字结尾,可包含英文、数字和下划线,且长度为 2 ~ 32 -// export const NAME_REGEXP = /^[a-zA-Z]{1,1}[a-zA-Z0-9_]{0,30}[a-zA-Z0-9]{1,1}$/; -// export const NAME_RULE = { -// pattern: NAME_REGEXP, -// message: getNameValidateMessage(), -// }; - -// // 支持中文名称正则校验: 可包含中文、英文、数字、下划线、中横线,且长度为 2 ~ 32 -// export const CHINESE_NAME_REGEXP = /^[a-zA-Z0-9\-_\u4e00-\u9fa5]{2,32}$/; -// export const CHINESE_NAME_RULE = { -// pattern: CHINESE_NAME_REGEXP, -// message: getChineseNameValidateMessage(), -// }; - -// // 合法名称正则校验,不限制长度,可结合 getTextLength() 搭配使用 -// export const VALID_NAME_REGEXP = /^[a-zA-Z][a-zA-Z0-9_]*$/; -// export const VALID_NAME_RULE = { -// pattern: VALID_NAME_REGEXP, -// message: intl.formatMessage({ -// id: 'ocp-express.src.constant.ItMustStartWithA', -// defaultMessage: '以英文字母开头,可包含英文、数字和下划线', -// }), -// }; - -// // database 命名规则 -// export const DATABASE_NAME_RULE = { -// pattern: /^[a-z]{1,1}[a-z0-9_]{1,127}$/, -// message: getDatabaseNameValidateMessage(), -// }; - -// // MySQLdbuser 命名规则 -// export const DATABASE_USER_NAME_RULE = { -// pattern: /^[a-z]{1,1}[a-z0-9_]{1,63}$/, -// message: getMySQLDbUserNameValidateMessage(), -// }; - -// // Oracle dbuUser 命名规则 -// export const ORACLE_DATABASE_USER_NAME_RULE = { -// pattern: /^[a-zA-Z]{1,1}[a-zA-Z0-9_]{1,29}$/, -// message: getOracleDbUserNameValidateMessage(30), -// }; - -// // Oracle dbRole 命名规则 -// export const ORACLE_DATABASE_ROLE_NAME_RULE = { -// pattern: /^[a-zA-Z]{1,1}[a-zA-Z0-9_]{1,29}$/, -// message: getOracleDbUserNameValidateMessage(30), -// }; - -// // 用户名称正则校验: 以英文字母开头、英文或数字结尾,可包含英文、数字、点号、中划线和下划线,且长度为 4 ~ 48 -// export const USERNAME_REGEXP = /^[a-zA-Z]{1,1}[a-zA-Z0-9.\-_]{2,46}[a-zA-Z0-9]{1,1}$/; -// export const USERNAME_RULE = { -// pattern: USERNAME_REGEXP, -// message: getUsernameValidateMessage(), -// }; - -// // 特殊字符支持 ~!@#%^&*_\-+=|(){}[]:;,.?/ -// export const PASSWORD_REGEX = -// /^(?=(.*[a-z]){2,})(?=(.*[A-Z]){2,})(?=(.*\d){2,})(?=(.*[~!@#%^&*_\-+=|(){}\[\]:;,.?/]){2,})[A-Za-z\d~!@#%^&*_\-+=|(){}\[\]:;,.?/]{8,32}$/; - -// // 校验空格 -// export const SPACE_REGEX = /^[^\s]*$/; -// // export const SPACE_RULE = { -// // pattern: SPACE_REGEX, -// // message: getSpaceValidateMessage(), -// // }; - -// export const BOOLEAN_LIST = [ -// { -// label: intl.formatMessage({ id: 'ocp-express.src.constant.Is', defaultMessage: '是' }), -// value: true, -// }, - -// { -// label: intl.formatMessage({ id: 'ocp-express.src.constant.No', defaultMessage: '否' }), -// value: false, -// }, -// ]; export const SMLL_FORM_ITEM_LAYOUT = { labelCol: { @@ -395,4 +306,4 @@ export const OCP_UPGRADE_STATUS_LIST = [ value: 'IGNORED', badgeStatus: 'ignored', }, -]; +]; \ No newline at end of file diff --git a/web/src/i18n/strings/en-US.json b/web/src/i18n/strings/en-US.json index d1a0952..4622d89 100644 --- a/web/src/i18n/strings/en-US.json +++ b/web/src/i18n/strings/en-US.json @@ -448,7 +448,7 @@ "OBD.src.utils.SelectTheCorrectOcpNode": "Select the correct OCP node", "OBD.src.utils.EnterAPassword": "Enter a password", "OBD.src.utils.ToCharactersInLength": "8 to 32 characters in length", - "OBD.src.utils.CanOnlyContainLettersNumbers": "Can only contain letters, numbers, and special characters (~! @ **** & *_-+ = |(){}[]:;,.? /'$\"<>)", + "OBD.src.utils.CanOnlyContainLettersNumbers": "Can only contain letters, numbers, and special characters (~!@#%^&*_-+=|(){}[]:;,.?/)", "OBD.src.utils.AtLeastUppercaseAndLowercase": "At least 2 uppercase and lowercase letters, numbers, and special characters", "OBD.src.component.Access.NoOperationPermissionIsAvailable": "No operation permission is available. Please contact the administrator to activate the permission.", "OBD.component.AobException.ReturnToHomePage": "Return to Home Page", @@ -899,5 +899,35 @@ "OBD.component.ConnectConfig.EnterAMetadbAccessAccount": "Enter a MetaDB access account", "OBD.component.ConnectConfig.MetadbAccessPassword": "MetaDB access password", "OBD.component.ConnectConfig.EnterMetadbAccessPassword": "Enter MetaDB access password", - "OBD.src.utils.helper.CopiedSuccessfully": "Copied successfully" + "OBD.src.utils.helper.CopiedSuccessfully": "Copied successfully", + "OBD.component.CustomPasswordInput.CanOnlyContainLettersNumbers.1": "Can only contain letters, numbers, and special characters {specialSymbols}", + "OBD.src.utils.helper.ContainsAtLeastThreeCharacters": "Contains at least three characters of the following four types: numbers (0~9), uppercase letters (A ~ Z), lowercase letters (a ~ z), and special symbols ~!@#%^& *_-+=|(){}[]:;,.? /, within 8 to 32 characters in length", + "OBD.src.utils.helper.CanOnlyContainLettersNumbers": "Can only contain letters, numbers, and special characters {symbols}", + "OBD.src.utils.helper.ThereAreAtLeastThree": "There are at least three types of uppercase letters, lowercase letters, numbers, and special characters.", + "OBD.component.CustomPasswordInput.ToCharactersInLengthAnd": "8 to 32 characters in length and uppercase and lowercase letters, numbers, and special characters ~! @ **** & *_-+ = '|(){}[]:;',.? /Both contain at least 2", + "OBD.src.utils.helper.TheLengthIsToAnd": "The length is 8 to 32 and contains at least uppercase letters, lowercase letters, numbers, and, ~!@#%^&*_-+=|(){}[]:;,.?/", + "OBD.src.utils.helper.TheLengthIsToAnd.1": "The length is 8 to 32 and contains at least three of uppercase letters, lowercase letters, numbers, and ~ ^ *{}[]_-+", + "OBD.src.utils.helper.TheLengthIsToAnd.2": "The length is 8 to 32 and contains at least uppercase letters, lowercase letters, numbers, and special characters ~!@#%^&*_-+=|(){}[]:;,.?/", + "OBD.component.PreCheck.preCheck.CheckCompleted": "Check completed", + "OBD.component.PreCheck.preCheck.Checking": "Checking", + "OBD.component.PreCheck.preCheck.CheckFailed": "Check failed", + "OBD.component.PreCheck.preCheck.CheckItemCheckitemlength": "Check item {checkItemLength}", + "OBD.component.PreCheck.preCheck.PreCheckResultReCheck": "Pre-check result-re-check", + "OBD.component.PreCheck.preCheck.ReCheck": "Re-check", + "OBD.component.PreCheck.preCheck.FailedItemFaileditemlength": "Failed item {failedItemLength}", + "OBD.component.PreCheck.preCheck.OnlyManualFixes": "Only manual fixes", + "OBD.component.PreCheck.preCheck.PreCheckResultAutomaticRepair": "Pre-check result-automatic repair", + "OBD.component.PreCheck.preCheck.AutomaticRepair": "Automatic Repair", + "OBD.component.PreCheck.preCheck.Reason": "Reason:", + "OBD.component.PreCheck.preCheck.Suggestions": "Suggestions:", + "OBD.component.PreCheck.preCheck.ManualRepair": "Manual repair", + "OBD.component.PreCheck.preCheck.LearnMore": "Learn More", + "OBD.component.PreCheck.preCheck.GreatNoFailedItems": "Great! No failed items", + "OBD.component.PreCheck.preCheck.NoFailedItemsFoundYet": "No failed items found yet", + "OBD.component.PreCheck.preCheck.PreviousStep": "Previous step", + "OBD.component.PreCheck.preCheck.FixAllFailedItems": "Fix all failed items", + "OBD.component.PreCheck.preCheck.NextStep": "Next Step", + "OBD.component.OCPConfigNew.ServiceConfig.ThePasswordMustBeMet": "The password must be met: {OCPPASSWORDERROR}", + "OBD.src.utils.helper.ItIsToCharactersIn": "It is 8 to 32 characters in length and supports letters, numbers, and special characters. It can contain at least 2 large, lowercase letters, numbers, and special characters. The supported special characters are ~^*{}[]_-+", + "OBD.src.utils.helper.TheLengthIsToAnd.3": "The length is 8 to 32 and contains at least three of uppercase letters, lowercase letters, numbers, and special characters ~^*{}[]_-+" } diff --git a/web/src/i18n/strings/zh-CN.json b/web/src/i18n/strings/zh-CN.json index 4516513..e0445ae 100644 --- a/web/src/i18n/strings/zh-CN.json +++ b/web/src/i18n/strings/zh-CN.json @@ -899,5 +899,35 @@ "OBD.component.ConnectConfig.EnterAMetadbAccessAccount": "请输入 MetaDB 访问账号", "OBD.component.ConnectConfig.MetadbAccessPassword": "MetaDB 访问密码", "OBD.component.ConnectConfig.EnterMetadbAccessPassword": "请输入 MetaDB 访问密码", - "OBD.src.utils.helper.CopiedSuccessfully": "复制成功" + "OBD.src.utils.helper.CopiedSuccessfully": "复制成功", + "OBD.component.CustomPasswordInput.CanOnlyContainLettersNumbers.1": "只能包含字母、数字和特殊字符{{specialSymbols}}", + "OBD.src.utils.helper.ContainsAtLeastThreeCharacters": "包含以下四种类型字符至少三种及以上:数字(0~9)、大写字母(A~Z)、小写字母(a~z)、特殊符号~!@#%^&*_-+=|(){}[]:;,.?/,长度在 8-32 个字符之内", + "OBD.src.utils.helper.CanOnlyContainLettersNumbers": "只能包含字母、数字和特殊字符{symbols}", + "OBD.src.utils.helper.ThereAreAtLeastThree": "大写字母、小写字母、数字和特殊字符4种类型中至少包含 3 种", + "OBD.component.CustomPasswordInput.ToCharactersInLengthAnd": "长度8~32 且大小写字母、数字和特殊字符~!@#%^&*_-+=`|(){}[]:;',.?/都至少包含 2 个", + "OBD.src.utils.helper.TheLengthIsToAnd": "长度8~32 且至少包含 大写字母、小写字母、数字、~!@#%^&*_-+=`|(){}[]:;',.?/ 中的三种", + "OBD.src.utils.helper.TheLengthIsToAnd.1": "长度8~32 且至少包含 大写字母、小写字母、数字、~^*{}[]_-+ 中的三种", + "OBD.src.utils.helper.TheLengthIsToAnd.2": "长度8~32 且至少包含 大写字母、小写字母、数字和特殊字符 ~!@#%^&*_-+=|(){}[]:;,.?/ 中的三种", + "OBD.component.PreCheck.preCheck.CheckCompleted": "检查完成", + "OBD.component.PreCheck.preCheck.Checking": "检查中", + "OBD.component.PreCheck.preCheck.CheckFailed": "检查失败", + "OBD.component.PreCheck.preCheck.CheckItemCheckitemlength": "检查项 {checkItemLength}", + "OBD.component.PreCheck.preCheck.PreCheckResultReCheck": "预检查结果-重新检查", + "OBD.component.PreCheck.preCheck.ReCheck": "重新检查", + "OBD.component.PreCheck.preCheck.FailedItemFaileditemlength": "失败项 {failedItemLength}", + "OBD.component.PreCheck.preCheck.OnlyManualFixes": "只看手动修复项", + "OBD.component.PreCheck.preCheck.PreCheckResultAutomaticRepair": "预检查结果-自动修复", + "OBD.component.PreCheck.preCheck.AutomaticRepair": "自动修复", + "OBD.component.PreCheck.preCheck.Reason": "原因:", + "OBD.component.PreCheck.preCheck.Suggestions": "建议:", + "OBD.component.PreCheck.preCheck.ManualRepair": "手动修复", + "OBD.component.PreCheck.preCheck.LearnMore": "了解更多方案", + "OBD.component.PreCheck.preCheck.GreatNoFailedItems": "太棒了!无失败项", + "OBD.component.PreCheck.preCheck.NoFailedItemsFoundYet": "暂未发现失败项", + "OBD.component.PreCheck.preCheck.PreviousStep": "上一步", + "OBD.component.PreCheck.preCheck.FixAllFailedItems": "请修复全部失败项", + "OBD.component.PreCheck.preCheck.NextStep": "下一步", + "OBD.component.OCPConfigNew.ServiceConfig.ThePasswordMustBeMet": "密码需满足:{OCPPASSWORDERROR}", + "OBD.src.utils.helper.ItIsToCharactersIn": "长度为 8~32 个字符,支持字母、数字和特殊字符,且至少包含大、小写字母、数字和特殊字符各 2 个,支持的特殊字符为~^*{}[]_-+", + "OBD.src.utils.helper.TheLengthIsToAnd.3": "长度8~32 且至少包含 大写字母、小写字母、数字和特殊字符 ~^*{}[]_-+ 中的三种" } diff --git a/web/src/pages/Guide/index.less b/web/src/pages/Guide/index.less index b955e31..5d7c601 100644 --- a/web/src/pages/Guide/index.less +++ b/web/src/pages/Guide/index.less @@ -1,89 +1,92 @@ -.content { - position: relative; - z-index: 10; - width: 100vw; - height: 79vh; - min-height: 628px; - padding: 24px calc(120 / 1280 * 100%); - padding-bottom: 92px; - background-color: rgba(245, 248, 254, 1); - box-sizing: border-box; -} -.disableCustomCardContainer { - background-color: #f5f8fe; - // pointer-events: none; -} -.disableCustomCardContainer, -.customCardContainer { - box-sizing: border-box; - min-width: 484px; +.guideContainer { height: 100%; - min-height: 200px; - display: flex; - align-items: center; - // width: 40%; - // height: 40%; - padding: 35px 18px; - .cardHeader { + .content { + position: relative; + z-index: 10; + width: 100vw; + height: calc(100% - 171px); + min-height: 628px; + padding: 24px calc(120 / 1280 * 100%); + padding-bottom: 92px; + background-color: rgba(245, 248, 254, 1); + box-sizing: border-box; + } + .disableCustomCardContainer { + background-color: #f5f8fe; + // pointer-events: none; + } + .disableCustomCardContainer, + .customCardContainer { + box-sizing: border-box; + min-width: 484px; + height: 100%; + min-height: 200px; display: flex; - justify-content: center; - height: 60px; - margin-bottom: 16px; - .cardImg { - margin-right: 8px; + align-items: center; + // width: 40%; + // height: 40%; + padding: 35px 18px; + .cardHeader { + display: flex; + justify-content: center; + height: 60px; + margin-bottom: 16px; + .cardImg { + margin-right: 8px; + } + .cardTitle, + .disableCardTitle { + margin-left: 8px; + font-weight: 500; + font-size: 18px; + line-height: 60px; + } + .disableCardTitle { + color: #cdd5e4; + } } - .cardTitle, - .disableCardTitle { - margin-left: 8px; - font-weight: 500; - font-size: 18px; - line-height: 60px; + .cardDetail, + .disableCardDetail { + font-weight: 400; + font-size: 16px; + line-height: 22px; + text-align: center; } - .disableCardTitle { + .disableCardDetail { color: #cdd5e4; } - } - .cardDetail, - .disableCardDetail { - font-weight: 400; - font-size: 16px; - line-height: 22px; - text-align: center; - } - .disableCardDetail { - color: #cdd5e4; - } - &.customCardSelect { - color: #006aff !important; - border: 2px solid #006aff; - :global { - .ant-result-title, - .ant-result-subtitle { - color: #006aff !important; + &.customCardSelect { + color: #006aff !important; + border: 2px solid #006aff; + :global { + .ant-result-title, + .ant-result-subtitle { + color: #006aff !important; + } } } - } - &.customCardContainer:hover { - cursor: pointer; + &.customCardContainer:hover { + cursor: pointer; + } } -} -.pageFooterContainer { - position: fixed; - right: 0; - bottom: 0; - left: 0; - z-index: 99; - padding: 16px; - background-color: #f5f8ff; - border-top: 1px solid #dde4ed; - .pageFooter { - width: 1040px; - margin: 0 auto; - overflow: hidden; - .foolterAction { - float: right; + .pageFooterContainer { + position: fixed; + right: 0; + bottom: 0; + left: 0; + z-index: 99; + padding: 16px; + background-color: #f5f8ff; + border-top: 1px solid #dde4ed; + .pageFooter { + width: 1040px; + margin: 0 auto; + overflow: hidden; + .foolterAction { + float: right; + } } } } diff --git a/web/src/pages/Guide/index.tsx b/web/src/pages/Guide/index.tsx index 7f113d7..4a04ce6 100644 --- a/web/src/pages/Guide/index.tsx +++ b/web/src/pages/Guide/index.tsx @@ -196,7 +196,7 @@ export default function Guide() { }; return ( -
+
{ +export const parameterValidator = (_: any, value?: API.ParameterValue) => { if (value?.adaptive) { return Promise.resolve(); } else if (value?.require && !value?.value) { @@ -36,6 +42,7 @@ const parameterValidator = (_: any, value?: API.ParameterValue) => { const getMoreColumns = ( label: string, componentKey: string, + rulesDetail?: RulesDetail, ) => { const columns: ColumnsType = [ { @@ -52,20 +59,25 @@ const getMoreColumns = ( width: locale === 'zh-CN' ? 280 : 360, dataIndex: 'parameterValue', render: (parameterValue, record) => { - const {defaultValue,defaultUnit} = record.parameterValue + const { defaultValue, defaultUnit } = record.parameterValue; const param = { - defaultValue - } - if(defaultUnit)param.defaultUnit = defaultUnit + defaultValue, + }; + if (defaultUnit) param.defaultUnit = defaultUnit; + return ( - ({ validator: parameterValidator })] + } > - - + + ); }, }, @@ -98,11 +110,16 @@ const getMoreColumns = ( return columns; }; - +/** + * + * @param parameterRules 用于动态自定义某些字段的校验规则 RulesDetail | RulesDetail[] 涉及到多个table需要传数组,rule需要通过targetTable字段映射到对应的table + * @returns + */ export default function ConfigTable({ showVisible, dataSource, loading, + parameterRules, }: ConfigTableProps) { return ( <> @@ -115,6 +132,17 @@ export default function ConfigTable({ style={{ minHeight: 50, marginTop: 16 }} > {dataSource.map((moreItem) => { + let rulesDetail: RulesDetail = { rules: [] }; + if (parameterRules) { + if (Array.isArray(parameterRules)) { + rulesDetail = parameterRules.find( + (item) => item.targetTable === moreItem.component, + )!; + } else { + rulesDetail = parameterRules; + } + } + return ( ({ + rules: [() => ({ validator: parameterValidator })], + targetTable: 'obproxy-ce', + targetColumn: 'obproxy_sys_password', + }); + const [authParameterRules, setAuthParameterRules] = useState({ + rules: [() => ({ validator: parameterValidator })], + targetTable: 'obagent', + targetColumn: 'http_basic_auth_password', + }); const [currentMode, setCurrentMode] = useState( oceanbase?.mode || 'PRODUCTION', ); @@ -158,18 +182,6 @@ export default function ClusterConfig() { } }; - const portValidator = (_: any, value: number) => { - if (value < 1024 || value > 65535) { - return Promise.reject( - intl.formatMessage({ - id: 'OBD.component.InputPort.ThePortNumberCanOnly', - defaultMessage: '端口号只支持 1024~65535 范围', - }), - ); - } - return Promise.resolve(); - }; - const getInitialParameters = ( currentComponent: string, dataSource: API.MoreParameter[], @@ -342,7 +354,38 @@ export default function ClusterConfig() { } }, []); - const initPassword = getRandomPassword(); + useUpdateEffect(() => { + if (!proxyPasswordFormValue?.adaptive) { + setProxyParameterRules({ + rules: getPasswordRules('ob'), + targetTable: 'obproxy-ce', + targetColumn: 'obproxy_sys_password', + }); + } else { + setProxyParameterRules({ + rules: [() => ({ validator: parameterValidator })], + targetTable: 'obproxy-ce', + targetColumn: 'obproxy_sys_password', + }); + } + }, [proxyPasswordFormValue]); + useUpdateEffect(() => { + if (!authPasswordFormValue?.adaptive) { + setAuthParameterRules({ + rules: getPasswordRules('ocp'), + targetTable: 'obagent', + targetColumn: 'http_basic_auth_password', + }); + } else { + setAuthParameterRules({ + rules: [() => ({ validator: parameterValidator })], + targetTable: 'obagent', + targetColumn: 'http_basic_auth_password', + }); + } + }, [authPasswordFormValue]); + + const initPassword = generateRandomPassword('ob'); const initialValues = { oceanbase: { @@ -476,27 +519,12 @@ export default function ClusterConfig() { onVisibleChange: setPasswordVisible, }, }} + validateFirst placeholder={intl.formatMessage({ id: 'OBD.pages.components.ClusterConfig.PleaseEnter', defaultMessage: '请输入', })} - rules={[ - { - required: true, - message: intl.formatMessage({ - id: 'OBD.pages.components.ClusterConfig.PleaseEnter', - defaultMessage: '请输入', - }), - }, - { - pattern: /^[0-9a-zA-Z~!@#%^&*_\-+=|(){}\[\]:;,.?/]{8,32}$/, - message: intl.formatMessage({ - id: 'OBD.pages.components.ClusterConfig.OnlyEnglishNumbersAndSpecial', - defaultMessage: - '仅支持英文、数字和特殊字符(~!@#%^&*_-+=|(){}[]:;,.?/),长度在 8-32 个字符之内', - }), - }, - ]} + rules={getPasswordRules('ob')} /> @@ -659,6 +687,7 @@ export default function ClusterConfig() { dataSource={componentsMoreConfig} loading={componentsMoreLoading} customParameter={} + parameterRules={[proxyParameterRules, authParameterRules]} /> diff --git a/web/src/pages/Obdeploy/NodeConfig.tsx b/web/src/pages/Obdeploy/NodeConfig.tsx index 2de091c..70cbf01 100644 --- a/web/src/pages/Obdeploy/NodeConfig.tsx +++ b/web/src/pages/Obdeploy/NodeConfig.tsx @@ -822,6 +822,7 @@ export default function NodeConfig() { id: 'OBD.pages.components.NodeConfig.PleaseSelect', defaultMessage: '请选择', })} + validateFirst rules={[ { required: true, diff --git a/web/src/pages/Obdeploy/PreCheckStatus.tsx b/web/src/pages/Obdeploy/PreCheckStatus.tsx index 54da3cb..61ff995 100644 --- a/web/src/pages/Obdeploy/PreCheckStatus.tsx +++ b/web/src/pages/Obdeploy/PreCheckStatus.tsx @@ -7,57 +7,19 @@ import { } from '@/services/ob-deploy-web/Deployments'; import { getErrorInfo, handleQuit } from '@/utils'; import { intl } from '@/utils/intl'; -import useRequest from '@/utils/useRequest'; -import { - CheckCircleFilled, - CloseCircleFilled, - CloseOutlined, - QuestionCircleFilled, - ReadFilled, -} from '@ant-design/icons'; -import { ProCard } from '@ant-design/pro-components'; -import { - Button, - Checkbox, - Empty, - message, - Progress, - Space, - Spin, - Tag, - Timeline, - Tooltip, - Typography, -} from 'antd'; -import NP from 'number-precision'; import { useEffect, useState } from 'react'; -import { getLocale, useModel } from 'umi'; -import EnStyles from './indexEn.less'; -import ZhStyles from './indexZh.less'; - -const locale = getLocale(); -const styles = locale === 'zh-CN' ? ZhStyles : EnStyles; - -const { Text } = Typography; +import { useModel } from 'umi'; +import { message } from 'antd'; -const statusColorConfig = { - PASSED: 'green', - PENDING: 'gray', - FAILED: 'red', -}; - -let timerScroll: NodeJS.Timer; -let timerFailed: NodeJS.Timer; -const initDuration = 3; -let durationScroll = initDuration; -let durationFailed = initDuration; +import useRequest from '@/utils/useRequest'; +import PreCehckComponent from '@/component/PreCheck/preCheck'; +import NP from 'number-precision'; export default function PreCheckStatus() { const { setCurrentStep, configData, setCheckOK, - handleQuitProgress, getInfoByName, setConfigData, setErrorVisible, @@ -81,11 +43,26 @@ export default function PreCheckStatus() { const [currentPage, setCurrentPage] = useState(true); const [firstErrorTimestamp, setFirstErrorTimestamp] = useState(); + const formatDataSource = (dataSource: API.PreCheckResult) => { + dataSource.timelineData = dataSource.info?.map( + (item: API.PreCheckInfo, index: number) => { + return { + isRunning: + (dataSource?.info[index - 1]?.status === 'FINISHED' && + item.status === 'PENDING') || + (dataSource?.all_passed && index === dataSource?.info.length - 1), + result: item.status === 'FINISHED' ? item.result : item.status, + }; + }, + ); + return { ...dataSource }; + }; + const { run: fetchPreCheckStatus } = useRequest(preCheckStatus, { onSuccess: ({ success, data }: API.OBResponsePreCheckResult_) => { if (success) { let timer: NodeJS.Timer; - setStatusData(data || {}); + setStatusData(formatDataSource(data) || {}); if (data?.status === 'RUNNING') { timer = setTimeout(() => { fetchPreCheckStatus({ name }); @@ -332,40 +309,6 @@ export default function PreCheckStatus() { } }; - const handleScrollTimeline = () => { - if (!checkFinished) { - setIsScroll(true); - clearInterval(timerScroll); - durationScroll = initDuration; - timerScroll = setInterval(() => { - if (durationScroll === 0) { - clearInterval(timerScroll); - setIsScroll(false); - durationScroll = initDuration; - } else { - durationScroll -= 1; - } - }, 1000); - } - }; - - const handleScrollFailed = () => { - if (!checkFinished) { - setIsScrollFailed(true); - clearInterval(timerFailed); - durationFailed = initDuration; - timerFailed = setInterval(() => { - if (durationFailed === 0) { - clearInterval(timerFailed); - setIsScrollFailed(false); - durationFailed = initDuration; - } else { - durationFailed -= 1; - } - }, 1000); - } - }; - const handleAutoRepair = () => { setHasAuto(false); handleRecover({ name }); @@ -382,440 +325,29 @@ export default function PreCheckStatus() { useEffect(() => { handelCheck(); - const timelineContainer = document.getElementById('timeline-container'); - timelineContainer.onmousewheel = handleScrollTimeline; // ie , chrome - timelineContainer?.addEventListener('DOMMouseScroll', handleScrollTimeline); // firefox - return () => { - timelineContainer.onmousewheel = () => {}; - timelineContainer?.removeEventListener( - 'DOMMouseScroll', - handleScrollTimeline, - ); - }; }, []); - - useEffect(() => { - const addEventFailedContainer = () => { - const failedContainer = document.getElementById('failed-container'); - if (failedList?.length && failedContainer) { - if (!failedContainer.onmousewheel) { - failedContainer.onmousewheel = handleScrollFailed; // ie , chrome - failedContainer?.addEventListener( - 'DOMMouseScroll', - handleScrollFailed, - ); - // firefox - } - } else { - setTimeout(() => { - addEventFailedContainer(); - }, 3000); - } - }; - - addEventFailedContainer(); - return () => { - const failedContainer = document.getElementById('failed-container'); - if (failedContainer) { - failedContainer.onmousewheel = () => {}; - failedContainer?.removeEventListener( - 'DOMMouseScroll', - handleScrollFailed, - ); - } - }; - }, [failedList]); - - let progressStatus = 'active'; - if (statusData?.status === 'FAILED') { - progressStatus = 'exception'; - } else if (checkFinished) { - if (statusData?.all_passed) { - progressStatus = 'success'; - } else { - progressStatus = 'exception'; - } - } - - const shape = ( -
-
-
-
-
-
- ); - - const checkItemLength = `${statusData?.finished || 0}/${ - statusData?.total || 0 - }`; - const failedItemLength = failedList?.length; - return ( - - - handleRetryCheck()} - data-aspm-click="c307513.d317293" - data-aspm-desc={intl.formatMessage({ - id: 'OBD.pages.components.PreCheckStatus.PreCheckResultReCheck', - defaultMessage: '预检查结果-重新检查', - })} - data-aspm-param={``} - data-aspm-expo - > - {intl.formatMessage({ - id: 'OBD.pages.components.PreCheckStatus.ReCheck', - defaultMessage: '重新检查', - })} - - } - headStyle={{ paddingLeft: '16px', paddingRight: '16px' }} - > - - - {loading ? null : ( - <> - - - {statusData?.info?.map( - (item: API.PreCheckInfo, index: number) => ( - - ) : ( - - ) - ) : null - } - > - {item?.name} {item?.server} - - ), - )} - - - )} - - - {hasManual ? ( - setOnlyManual(e.target.checked)} - disabled={!checkFinished || statusData?.all_passed} - > - {intl.formatMessage({ - id: 'OBD.pages.components.PreCheckStatus.OnlyManualFixes', - defaultMessage: '只看手动修复项', - })} - - ) : null} - - - } - > - {showFailedList?.length ? ( -
- {showFailedList?.map((item, index) => { - let reason = ''; - if (item?.description) { - const index = item?.description.indexOf(':'); - reason = item?.description.substring( - index, - item?.description.length, - ); - } - return ( - - - - - - {item.name} - - - - - {intl.formatMessage({ - id: 'OBD.pages.components.PreCheckStatus.Reason', - defaultMessage: '原因:', - })} - - OBD-{item.code} - {' '} - {reason} - - - - - - {intl.formatMessage({ - id: 'OBD.pages.components.PreCheckStatus.Suggestions', - defaultMessage: '建议:', - })} - {item.recoverable ? ( - - {intl.formatMessage({ - id: 'OBD.pages.components.PreCheckStatus.AutomaticRepair', - defaultMessage: '自动修复', - })} - - ) : ( - - {intl.formatMessage({ - id: 'OBD.pages.components.PreCheckStatus.ManualRepair', - defaultMessage: '手动修复', - })} - - )}{' '} - {item.advisement?.description} - -
- - {intl.formatMessage({ - id: 'OBD.pages.components.PreCheckStatus.LearnMore', - defaultMessage: '了解更多方案', - })} - -
-
- ); - })} - {!checkFinished ? ( -
{shape}
- ) : null} -
- ) : checkFinished ? ( - - {intl.formatMessage({ - id: 'OBD.pages.components.PreCheckStatus.GreatNoFailedItems', - defaultMessage: '太棒了!无失败项', - })} - - } - /> - ) : ( -
- {shape} -
- {intl.formatMessage({ - id: 'OBD.pages.components.PreCheckStatus.NoFailedItemsFoundYet', - defaultMessage: '暂未发现失败项', - })} -
-
- )} - - -
-
- - - - {!statusData?.all_passed ? ( - - - - ) : ( - - )} - -
-
- + ); } diff --git a/web/src/utils/helper.ts b/web/src/utils/helper.ts index 6f5b983..178edfd 100644 --- a/web/src/utils/helper.ts +++ b/web/src/utils/helper.ts @@ -11,6 +11,7 @@ import { intl } from '@/utils/intl'; //与UI无关的函数 import { message } from 'antd'; import { clone } from 'lodash'; import copy from 'copy-to-clipboard'; +import { generateRandomPassword as oldGenerateRandomPassword } from '.'; // 不用navigator.clipboard.writeText的原因:该接口需要在HTTPS环境下才能使用 export function copyText(text: string) { let inputDom = document.createElement('input'); @@ -121,6 +122,268 @@ export const handleCopy = (content: string) => { ); }; +export const SPECIAL_SYMBOLS_OB = '~!@#%^&*_-+=|(){}[]:;,.?/'; +// export const SPECIAL_SYMBOLS_OCP = '~!@#%^&*_-+=|(){}[]:;,.?/$`\'"<>'; +export const SPECIAL_SYMBOLS_OCP = '~^*{}[]_-+'; +const SPECIAL_SYMBOLS_REG_OB = /^[~!@#%^&*()_+\-=|{}\:[\];,.?\/]+$/; +export const SPECIAL_SYMBOLS_REG_OCP = /^[~^*{}[\]_\-+]+$/; +// const SPECIAL_SYMBOLS_REG_OCP = /^[~!@#%^&*()_\-+=|:{}[\];,.<>?\/$`'"\\]+$/; +const REG_NUMBER = /^[0-9]$/; +const REG_LOWER_CASE = /^[a-z]$/; +const REG_UPPER_CASE = /^[A-Z]$/; + +export const passwordRangeCheck = (password: string, useFor: 'ob' | 'ocp') => { + if (!password) return false; + const passwordChar = password.split(''); + const SPECIAL_SYMBOLS_REG = + useFor === 'ob' ? SPECIAL_SYMBOLS_REG_OB : SPECIAL_SYMBOLS_REG_OCP; + //检验内容是否超出范围 + for (let char of passwordChar) { + if ( + !SPECIAL_SYMBOLS_REG.test(char) && + !REG_NUMBER.test(char) && + !REG_LOWER_CASE.test(char) && + !REG_UPPER_CASE.test(char) + ) { + return false; + } + } + return true; +}; + +export const passwordSymbolsCheck = ( + password: string, + useFor: 'ob' | 'ocp', +) => { + let passwordChar = password.split(''), + haveSymbols = false, + haveNumber = false, + haveLowerCase = false, + haveUpperCaseReg = false; + const SPECIAL_SYMBOLS_REG = + useFor === 'ob' ? SPECIAL_SYMBOLS_REG_OB : SPECIAL_SYMBOLS_REG_OCP; + + for (let char of passwordChar) { + if (SPECIAL_SYMBOLS_REG.test(char)) { + haveSymbols = true; + } + if (REG_NUMBER.test(char)) { + haveNumber = true; + } + if (REG_LOWER_CASE.test(char)) { + haveLowerCase = true; + } + if (REG_UPPER_CASE.test(char)) { + haveUpperCaseReg = true; + } + } + if ( + [haveSymbols, haveNumber, haveLowerCase, haveUpperCaseReg].filter( + (val) => val === true, + ).length < 3 + ) { + return false; + } + return true; +}; + +/** + * + * @param str 待校验密码 + * @param type 校验类型 ob | ocp + * @returns Boolean 是否通过校验 + */ +export const passwordCheck = (str: string, type: 'ob' | 'ocp') => { + const SPECIAL_SYMBOLS_REG = + type === 'ob' ? SPECIAL_SYMBOLS_REG_OB : SPECIAL_SYMBOLS_REG_OCP; + let strArr = str.split(''), + haveSymbols = false, + haveNumber = false, + haveLowerCase = false, + haveUpperCaseReg = false; + if (typeof str !== 'string') { + throw new Error('type error'); + } + + //检验长度 + if (str.length < 8 || str.length > 32) { + return false; + } + //检验内容是否超出范围 + for (let str of strArr) { + if ( + !SPECIAL_SYMBOLS_REG.test(str) && + !REG_NUMBER.test(str) && + !REG_LOWER_CASE.test(str) && + !REG_UPPER_CASE.test(str) + ) { + return false; + } + } + for (let str of strArr) { + if (SPECIAL_SYMBOLS_REG.test(str)) { + haveSymbols = true; + } + if (REG_NUMBER.test(str)) { + haveNumber = true; + } + if (REG_LOWER_CASE.test(str)) { + haveLowerCase = true; + } + if (REG_UPPER_CASE.test(str)) { + haveUpperCaseReg = true; + } + } + if ( + [haveSymbols, haveNumber, haveLowerCase, haveUpperCaseReg].filter( + (val) => val === true, + ).length < 3 + ) { + return false; + } + return true; +}; + +export const passwordCheckLowVersion = (pwd: string) => { + let strArr = pwd.split(''), + symbolsCount = 0, + numberCount = 0, + lowerCaseCount = 0, + upperCaseCount = 0; + if (typeof pwd !== 'string') { + throw new Error('type error'); + } + + //检验长度 + if (pwd.length < 8 || pwd.length > 32) { + return false; + } + + //检验内容是否超出范围 + for (let str of strArr) { + if ( + !SPECIAL_SYMBOLS_REG_OCP.test(str) && + !REG_NUMBER.test(str) && + !REG_LOWER_CASE.test(str) && + !REG_UPPER_CASE.test(str) + ) { + return false; + } + } + for (let str of strArr) { + if (SPECIAL_SYMBOLS_REG_OCP.test(str)) { + symbolsCount += 1; + } + if (REG_NUMBER.test(str)) { + numberCount += 1; + } + if (REG_LOWER_CASE.test(str)) { + lowerCaseCount += 1; + } + if (REG_UPPER_CASE.test(str)) { + upperCaseCount += 1; + } + } + // 是否每种类型都有且数量大于等于2 + if ( + [symbolsCount, numberCount, lowerCaseCount, upperCaseCount].filter( + (count) => count >= 2, + ).length !== 4 + ) { + return false; + } + return true; +}; + +export function generateRandomPassword( + type: 'ob' | 'ocp', + useOldRuler?: boolean, +) { + if (useOldRuler) { + return oldGenerateRandomPassword(); + } + + const length = Math.floor(Math.random() * 25) + 8; // 生成8到32之间的随机长度 + const characters = + type === 'ob' + ? `ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789${SPECIAL_SYMBOLS_OB}` + : `ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789${SPECIAL_SYMBOLS_OCP}`; + + let password = ''; + let haveUppercase = false; + let havetLowercase = false; + let haveNumber = false; + let haveSpecialChar = false; + + // 生成随机密码 + for (let i = 0; i < length; i++) { + const randomIndex = Math.floor(Math.random() * characters.length); + const randomChar = characters[randomIndex]; + password += randomChar; + + // 判断字符类型并增加相应计数器 + if (/[A-Z]/.test(randomChar)) { + haveUppercase = true; + } else if (/[a-z]/.test(randomChar)) { + havetLowercase = true; + } else if (/[0-9]/.test(randomChar)) { + haveNumber = true; + } else { + haveSpecialChar = true; + } + } + + // 检查计数器是否满足要求 + if ( + [haveSpecialChar, haveNumber, havetLowercase, haveUppercase].filter( + (val) => val === true, + ).length < 3 + ) { + return generateRandomPassword(type); // 重新生成密码 + } + + return password; +} +export const OB_PASSWORD_ERROR_REASON = intl.formatMessage({ + id: 'OBD.src.utils.helper.TheLengthIsToAnd.2', + defaultMessage: + '长度8~32 且至少包含 大写字母、小写字母、数字和特殊字符 ~!@#%^&*_-+=|(){}[]:;,.?/ 中的三种', +}); +export const OCP_PASSWORD_ERROR_REASON = intl.formatMessage({ + id: 'OBD.src.utils.helper.TheLengthIsToAnd.3', + defaultMessage: + '长度8~32 且至少包含 大写字母、小写字母、数字和特殊字符 ~^*{}[]_-+ 中的三种', +}); + +// ocp版本小于422部分密码采用老版本校验规则 +export const OCP_PASSWORD_ERROR_REASON_OLD = intl.formatMessage({ + id: 'OBD.src.utils.helper.ItIsToCharactersIn', + defaultMessage: + '长度为 8~32 个字符,支持字母、数字和特殊字符,且至少包含大、小写字母、数字和特殊字符各 2 个,支持的特殊字符为~^*{}[]_-+', +}); + +export const getPasswordRules = (useFor: 'ob' | 'ocp') => [ + { + required: true, + message: intl.formatMessage({ + id: 'OBD.src.utils.EnterAPassword', + defaultMessage: '请输入密码', + }), + }, + () => ({ + validator(_: any, originValue: string | API.ParameterValue) { + let value = + typeof originValue === 'object' ? originValue.value! : originValue; + const REASON = + useFor === 'ob' ? OB_PASSWORD_ERROR_REASON : OCP_PASSWORD_ERROR_REASON; + if (!passwordCheck(value, useFor)) { + return Promise.reject(new Error(REASON)); + } + return Promise.resolve(); + }, + }), +]; + /** * 判断一个字符串或者数字是否有值,避免判断 0 为 false */ @@ -136,7 +399,7 @@ export const isExist = (val: string | number | undefined): boolean => { export const formatConfigData = (configData: any) => { let _config = clone(configData); Object.keys(_config?.components).forEach((compKey) => { - _config?.components[compKey]?.parameters?.forEach((parameter:any) => { + _config?.components[compKey]?.parameters?.forEach((parameter: any) => { parameter.isChanged = true; }); }); diff --git a/web/src/utils/index.tsx b/web/src/utils/index.tsx index 37565d4..a2d5840 100644 --- a/web/src/utils/index.tsx +++ b/web/src/utils/index.tsx @@ -4,6 +4,7 @@ import { message, Modal, notification } from 'antd'; import type { FormInstance } from 'antd/lib/form'; import RandExp from 'randexp'; import { getLocale, history } from 'umi'; +import { SPECIAL_SYMBOLS_OCP } from './helper'; export const handleResponseError = (desc: any, msg?: string | undefined) => { notification.error({ @@ -156,67 +157,6 @@ export const getRandomPassword = (isToken?: boolean) => { } return getRandomPassword(isToken); }; - -// const statusCodeMessage = { -// 400: intl.formatMessage({ -// id: 'ocp-express.src.util.request.TheErrorOccurredInThe', -// defaultMessage: '发出的请求有错误,服务器没有进行新建或修改数据的操作。', -// }), - -// 401: intl.formatMessage({ -// id: 'ocp-express.src.util.request.TheUserIsNotLogged', -// defaultMessage: '用户未登录,或者登录使用的用户名和密码错误。', -// }), - -// 403: intl.formatMessage({ -// id: 'ocp-express.src.util.request.YouDoNotHaveThe', -// defaultMessage: '没有权限进行对应操作,请联系管理员。', -// }), - -// 404: intl.formatMessage({ -// id: 'ocp-express.src.util.request.TheRequestIsForA', -// defaultMessage: '发出的请求针对的是不存在的记录,服务器没有进行操作。', -// }), - -// 405: intl.formatMessage({ -// id: 'ocp-express.src.util.request.TheRequestMethodCannotBe', -// defaultMessage: '请求方法不能被用于请求相应的资源,或者请求路径不正确。', -// }), -// 406: intl.formatMessage({ -// id: 'ocp-express.src.util.request.TheRequestFormatIsNot', -// defaultMessage: '请求的格式不可得。', -// }), - -// 410: intl.formatMessage({ -// id: 'ocp-express.src.util.request.TheRequestedResourceIsPermanently', -// defaultMessage: '请求的资源被永久删除,且不会再得到的。', -// }), - -// 422: intl.formatMessage({ -// id: 'ocp-express.src.util.request.AValidationErrorOccursWhen', -// defaultMessage: '当创建一个对象时,发生一个验证错误。', -// }), - -// 500: intl.formatMessage({ -// id: 'ocp-express.src.util.request.AnErrorOccurredOnThe', -// defaultMessage: '服务器发生错误,请检查服务器。', -// }), - -// 502: intl.formatMessage({ -// id: 'ocp-express.src.util.request.GatewayError', -// defaultMessage: '网关错误。', -// }), -// 503: intl.formatMessage({ -// id: 'ocp-express.src.util.request.TheServiceIsUnavailableAnd', -// defaultMessage: '服务不可用,服务器暂时过载或维护。', -// }), - -// 504: intl.formatMessage({ -// id: 'ocp-express.src.util.request.TheGatewayTimedOut', -// defaultMessage: '网关超时。', -// }), -// }; - /** * 异常处理程序 * response 为浏览器的 Response 对象,而 data 才是后端实际返回的响应数据 @@ -437,7 +377,7 @@ export const serversValidator = ( export function generateRandomPassword() { const length = Math.floor(Math.random() * 25) + 8; // 生成8到32之间的随机长度 const characters = - "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789~!@#%^&*_-+=`|(){}[]:;',.?/"; // 可用字符集合 + `ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789${SPECIAL_SYMBOLS_OCP}`; // 可用字符集合 let password = ''; let countUppercase = 0; // 大写字母计数器 @@ -475,63 +415,3 @@ export function generateRandomPassword() { return password; } -export const passwordRules = [ - { - required: true, - message: intl.formatMessage({ - id: 'OBD.src.utils.EnterAPassword', - defaultMessage: '请输入密码', - }), - }, - () => ({ - validator(_: any, value: string) { - if (value.length >= 8 && value.length <= 32) { - return Promise.resolve(); - } - return Promise.reject( - new Error( - intl.formatMessage({ - id: 'OBD.src.utils.TheLengthShouldBeTo', - defaultMessage: '长度应为 8~32 个字符', - }), - ), - ); - }, - }), - () => ({ - validator(_: any, value: string) { - const regex = /^[A-Za-z\d~!@#%^&*_\-+=`|(){}[\]:;',.?/]*$/; - if (regex.test(value)) { - return Promise.resolve(); - } - return Promise.reject( - new Error( - intl.formatMessage({ - id: 'OBD.src.utils.CanOnlyContainLettersNumbers.2', - defaultMessage: - "只能包含字母、数字和特殊字符~!@#%^&*_-+=`|(){}[]:;',.?/", - }), - ), - ); - }, - }), - () => ({ - validator(_: any, value: string) { - if ( - /^(?=.*[A-Z].*[A-Z])(?=.*[a-z].*[a-z])(?=.*\d.*\d)(?=.*[~!@#%^&*_\-+=`|(){}[\]:;',.?/].*[~!@#%^&*_\-+=`|(){}[\]:;',.?/])[A-Za-z\d~!@#%^&*_\-+=`|(){}[\]:;',.?/]{8,32}$/.test( - value, - ) - ) { - return Promise.resolve(); - } - return Promise.reject( - new Error( - intl.formatMessage({ - id: 'OBD.src.utils.AtLeastUppercaseAndLowercase', - defaultMessage: '大小写字母、数字和特殊字符都至少包含 2 个', - }), - ), - ); - }, - }), -];