diff --git a/CITATION.bib b/CITATION.bib new file mode 100644 index 000000000..ec3093aae --- /dev/null +++ b/CITATION.bib @@ -0,0 +1,12 @@ +@inproceedings {superbench, + author = {Yifan Xiong and Yuting Jiang and Ziyue Yang and Lei Qu and Guoshuai Zhao and Shuguang Liu and Dong Zhong and Boris Pinzur and Jie Zhang and Yang Wang and Jithin Jose and Hossein Pourreza and Jeff Baxter and Kushal Datta and Prabhat Ram and Luke Melton and Joe Chau and Peng Cheng and Yongqiang Xiong and Lidong Zhou}, + title = {{SuperBench}: Improving Cloud {AI} Infrastructure Reliability with Proactive Validation}, + booktitle = {2024 USENIX Annual Technical Conference (USENIX ATC 24)}, + year = {2024}, + isbn = {978-1-939133-41-0}, + address = {Santa Clara, CA}, + pages = {835--850}, + url = {https://www.usenix.org/conference/atc24/presentation/xiong}, + publisher = {USENIX Association}, + month = jul +} diff --git a/README.md b/README.md index 225bd7d71..7706fc472 100644 --- a/README.md +++ b/README.md @@ -19,6 +19,25 @@ __SuperBench__ is a validation and profiling tool for AI infrastructure. ## _Check [aka.ms/superbench](https://aka.ms/superbench) for more details._ +## Citations + +To cite SuperBench in your publications: + +```bib +@inproceedings {superbench, + author = {Yifan Xiong and Yuting Jiang and Ziyue Yang and Lei Qu and Guoshuai Zhao and Shuguang Liu and Dong Zhong and Boris Pinzur and Jie Zhang and Yang Wang and Jithin Jose and Hossein Pourreza and Jeff Baxter and Kushal Datta and Prabhat Ram and Luke Melton and Joe Chau and Peng Cheng and Yongqiang Xiong and Lidong Zhou}, + title = {{SuperBench}: Improving Cloud {AI} Infrastructure Reliability with Proactive Validation}, + booktitle = {2024 USENIX Annual Technical Conference (USENIX ATC 24)}, + year = {2024}, + isbn = {978-1-939133-41-0}, + address = {Santa Clara, CA}, + pages = {835--850}, + url = {https://www.usenix.org/conference/atc24/presentation/xiong}, + publisher = {USENIX Association}, + month = jul +} +``` + ## Trademarks This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft diff --git a/docs/cli.md b/docs/cli.md index b35595ccb..0a73e1f33 100644 --- a/docs/cli.md +++ b/docs/cli.md @@ -111,7 +111,7 @@ sb deploy [--docker-image] | `--docker-username` | `None` | Docker registry username if authentication is needed. | | `--host-file` `-f` | `None` | Path to Ansible inventory host file. | | `--host-list` `-l` | `None` | Comma separated host list. | -| `--host-password` | `None` | Host password or key passphase if needed. | +| `--host-password` | `None` | Host password or key passphrase if needed. | | `--host-username` | `None` | Host username if needed. | | `--no-image-pull` | `False` | Skip pull and use local Docker image. | | `--output-dir` | `None` | Path to output directory, outputs/{datetime} will be used if not specified. | @@ -373,7 +373,7 @@ sb run [--config-file] | `--get-info` | `False` | Collect system info. | | `--host-file` `-f` | `None` | Path to Ansible inventory host file. | | `--host-list` `-l` | `None` | Comma separated host list. | -| `--host-password` | `None` | Host password or key passphase if needed. | +| `--host-password` | `None` | Host password or key passphrase if needed. | | `--host-username` | `None` | Host username if needed. | | `--no-docker` | `False` | Run on host directly without Docker. | | `--output-dir` | `None` | Path to output directory, outputs/{datetime} will be used if not specified. | diff --git a/docs/superbench-config.mdx b/docs/superbench-config.mdx index 340c7d616..b8ad058fa 100644 --- a/docs/superbench-config.mdx +++ b/docs/superbench-config.mdx @@ -295,7 +295,7 @@ Enable current benchmark or not, can be overwritten by [`superbench.enable`](#su ### `timeout` -Set the timeout value in seconds, the benchmarking will stop early if timeout is triggerred. +Set the timeout value in seconds, the benchmarking will stop early if timeout is triggered. * default value: none @@ -336,16 +336,16 @@ A list of models to run, only supported in model-benchmark. Parameters for benchmark to use, varying for different benchmarks. -There have four common parameters for all benchmarks: -* run_count: how many times do user want to run this benchmark, default value is 1. +There are four common parameters for all benchmarks: +* run_count: how many times does user want to run this benchmark, default value is 1. * duration: the elapsed time of benchmark in seconds. It can work for all model-benchmark. But for micro-benchmark, benchmark authors should consume it by themselves. * log_raw_data: log raw data into file instead of saving it into result object, default value is `False`. Benchmarks who have large raw output may want to set it as `True`, such as `nccl-bw`/`rccl-bw`. * log_flushing: real-time log flushing, default value is `False`. -For Model-Benchmark, there have some parameters that can control the elapsed time. +For Model-Benchmark, there are some parameters that can control the elapsed time. * duration: the elapsed time of benchmark in seconds. -* num_warmup: the number of warmup step, should be positive integer. -* num_steps: the number of test step. +* num_warmup: the number of warmup steps, should be positive integer. +* num_steps: the number of test steps. If `duration > 0` and `num_steps > 0`, then benchmark will take the least as the elapsed time. Otherwise only one of them will take effect. @@ -429,7 +429,7 @@ while `proc_num: 8, node_num: null` will run 32-GPU distributed training on all Command prefix to use in the mode, in Python formatted string. -Available variables in formatted string includes: +Available variables in formatted string include: + `proc_rank` + `proc_num` diff --git a/setup.py b/setup.py index a0b859c14..4cd448939 100644 --- a/setup.py +++ b/setup.py @@ -164,7 +164,7 @@ def run(self): 'natsort>=7.1.1', 'networkx>=2.5', 'numpy>=1.19.2', - 'omegaconf==2.0.6', + 'omegaconf==2.3.0', 'openpyxl>=3.0.7', 'packaging>=21.0', 'pandas>=1.1.5', @@ -198,7 +198,7 @@ def run(self): 'pydocstyle>=5.1.1', 'pytest-cov>=2.11.1', 'pytest-subtests>=0.4.0', - 'pytest>=6.2.2', + 'pytest>=6.2.2, <=7.4.4', 'types-markdown', 'types-pkg_resources', 'types-pyyaml', diff --git a/superbench/executor/executor.py b/superbench/executor/executor.py index bfff5cb7c..c4a812a9c 100644 --- a/superbench/executor/executor.py +++ b/superbench/executor/executor.py @@ -71,13 +71,13 @@ def __get_enabled_benchmarks(self): Return: list: List of benchmarks which will be executed. """ - if self._sb_config.superbench.enable: + if 'enable' in self._sb_config.superbench and self._sb_config.superbench.enable: if isinstance(self._sb_config.superbench.enable, str): return [self._sb_config.superbench.enable] elif isinstance(self._sb_config.superbench.enable, (list, ListConfig)): return list(self._sb_config.superbench.enable) # TODO: may exist order issue - return [k for k, v in self._sb_benchmarks.items() if v.enable] + return [k for k, v in self._sb_benchmarks.items() if 'enable' in v and v.enable] def __get_platform(self): """Detect runninng platform by environment.""" @@ -228,32 +228,37 @@ def exec(self): logger.warning('Monitor can not support CPU platform.') benchmark_real_name = benchmark_name.split(':')[0] - for framework in benchmark_config.frameworks or [Framework.NONE.value]: - if benchmark_real_name == 'model-benchmarks' or ( - ':' not in benchmark_name and benchmark_name.endswith('_models') - ): - for model in benchmark_config.models: - full_name = f'{benchmark_name}/{framework}-{model}' + if 'frameworks' in benchmark_config: + for framework in benchmark_config.frameworks or [Framework.NONE.value]: + if benchmark_real_name == 'model-benchmarks' or ( + ':' not in benchmark_name and benchmark_name.endswith('_models') + ): + for model in benchmark_config.models: + full_name = f'{benchmark_name}/{framework}-{model}' + logger.info('Executor is going to execute %s.', full_name) + context = BenchmarkRegistry.create_benchmark_context( + model, + platform=self.__get_platform(), + framework=Framework(framework.lower()), + parameters=self.__get_arguments( + {} if 'parameters' not in benchmark_config else benchmark_config.parameters + ) + ) + result = self.__exec_benchmark(full_name, context) + benchmark_results.append(result) + else: + full_name = benchmark_name logger.info('Executor is going to execute %s.', full_name) context = BenchmarkRegistry.create_benchmark_context( - model, + benchmark_real_name, platform=self.__get_platform(), framework=Framework(framework.lower()), - parameters=self.__get_arguments(benchmark_config.parameters) + parameters=self.__get_arguments( + {} if 'parameters' not in benchmark_config else benchmark_config.parameters + ) ) result = self.__exec_benchmark(full_name, context) benchmark_results.append(result) - else: - full_name = benchmark_name - logger.info('Executor is going to execute %s.', full_name) - context = BenchmarkRegistry.create_benchmark_context( - benchmark_real_name, - platform=self.__get_platform(), - framework=Framework(framework.lower()), - parameters=self.__get_arguments(benchmark_config.parameters) - ) - result = self.__exec_benchmark(full_name, context) - benchmark_results.append(result) if monitor: monitor.stop() diff --git a/superbench/runner/runner.py b/superbench/runner/runner.py index 7e29f4dfe..cd0c8c4dc 100644 --- a/superbench/runner/runner.py +++ b/superbench/runner/runner.py @@ -67,24 +67,24 @@ def __validate_sb_config(self): # noqa: C901 InvalidConfigError: If input config is invalid. """ # TODO: add validation and defaulting - if not self._sb_config.superbench.env: + if 'env' not in self._sb_config.superbench: self._sb_config.superbench.env = {} for name in self._sb_benchmarks: - if not self._sb_benchmarks[name].modes: + if 'modes' not in self._sb_benchmarks[name]: self._sb_benchmarks[name].modes = [] for idx, mode in enumerate(self._sb_benchmarks[name].modes): - if not mode.env: + if 'env' not in mode: self._sb_benchmarks[name].modes[idx].env = {} if mode.name == 'local': - if not mode.proc_num: + if 'proc_num' not in mode: self._sb_benchmarks[name].modes[idx].proc_num = 1 - if not mode.prefix: + if 'prefix' not in mode: self._sb_benchmarks[name].modes[idx].prefix = '' elif mode.name == 'torch.distributed': - if not mode.proc_num: + if 'proc_num' not in mode: self._sb_benchmarks[name].modes[idx].proc_num = 8 elif mode.name == 'mpi': - if not mode.mca: + if 'machinefile' not in mode: self._sb_benchmarks[name].modes[idx].mca = { 'pml': 'ob1', 'btl': '^openib', @@ -93,8 +93,8 @@ def __validate_sb_config(self): # noqa: C901 } for key in ['PATH', 'LD_LIBRARY_PATH', 'SB_MICRO_PATH', 'SB_WORKSPACE']: self._sb_benchmarks[name].modes[idx].env.setdefault(key, None) - if mode.pattern: - if mode.pattern.type == 'topo-aware' and not mode.pattern.ibstat: + if 'pattern' in mode: + if mode.pattern.type == 'topo-aware' and 'ibstat' not in mode.pattern: self._sb_benchmarks[name].modes[idx].pattern.ibstat = gen_ibstat( self._ansible_config, str(self._output_path / 'ibstate_file.txt') ) @@ -105,12 +105,12 @@ def __get_enabled_benchmarks(self): Return: list: List of benchmarks which will be executed. """ - if self._sb_config.superbench.enable: + if 'enable' in self._sb_config.superbench and self._sb_config.superbench.enable: if isinstance(self._sb_config.superbench.enable, str): return [self._sb_config.superbench.enable] elif isinstance(self._sb_config.superbench.enable, (list, ListConfig)): return list(self._sb_config.superbench.enable) - return [k for k, v in self._sb_benchmarks.items() if v.enable] + return [k for k, v in self._sb_benchmarks.items() if 'enable' in v and v.enable] def __get_mode_command(self, benchmark_name, mode, timeout=None): """Get runner command for given mode. @@ -141,7 +141,7 @@ def __get_mode_command(self, benchmark_name, mode, timeout=None): elif mode.name == 'torch.distributed': # TODO: replace with torch.distributed.run in v1.9 # TODO: only supports node_num=1 and node_num=all currently - torch_dist_params = '' if mode.node_num == 1 else \ + torch_dist_params = '' if 'node_num' in mode and mode.node_num == 1 else \ '--nnodes=$NNODES --node_rank=$NODE_RANK --master_addr=$MASTER_ADDR --master_port=$MASTER_PORT ' mode_command = ( f'torchrun' @@ -158,8 +158,8 @@ def __get_mode_command(self, benchmark_name, mode, timeout=None): '-bind-to numa ' # bind processes to numa '{mca_list} {env_list} {command}' ).format( - host_list=f'-host localhost:{mode.proc_num}' if mode.node_num == 1 else - f'-hostfile hostfile -map-by ppr:{mode.proc_num}:node' if mode.host_list is None else '-host ' + + host_list=f'-host localhost:{mode.proc_num}' if 'node_num' in mode and mode.node_num == 1 else + f'-hostfile hostfile -map-by ppr:{mode.proc_num}:node' if 'host_list' not in mode else '-host ' + ','.join(f'{host}:{mode.proc_num}' for host in mode.host_list), mca_list=' '.join(f'-mca {k} {v}' for k, v in mode.mca.items()), env_list=' '.join( @@ -206,6 +206,9 @@ def run_sys_info(self): logger.info('Runner is going to get node system info.') fcmd = "docker exec sb-workspace bash -c '{command}'" + + if 'skip' not in self._docker_config: + self._docker_config.skip = False if self._docker_config.skip: fcmd = "bash -c 'cd $SB_WORKSPACE && {command}'" ansible_runner_config = self._ansible_client.get_shell_config( @@ -225,7 +228,7 @@ def check_env(self): # pragma: no cover self._ansible_client.get_playbook_config( 'check_env.yaml', extravars={ - 'no_docker': bool(self._docker_config.skip), + 'no_docker': False if 'skip' not in self._docker_config else self._docker_config.skip, 'output_dir': str(self._output_path), 'env': '\n'.join(f'{k}={v}' for k, v in self._sb_config.superbench.env.items()), } @@ -441,15 +444,17 @@ def _run_proc(self, benchmark_name, mode, vars): int: Process return code. """ mode.update(vars) - if mode.name == 'mpi' and mode.pattern: + if mode.name == 'mpi' and 'pattern' in mode: mode.env.update({'SB_MODE_SERIAL_INDEX': mode.serial_index, 'SB_MODE_PARALLEL_INDEX': mode.parallel_index}) logger.info('Runner is going to run %s in %s mode, proc rank %d.', benchmark_name, mode.name, mode.proc_rank) - timeout = self._sb_benchmarks[benchmark_name].timeout + timeout = self._sb_benchmarks[benchmark_name].get('timeout', 60) if isinstance(timeout, int): timeout = max(timeout, 60) env_list = '--env-file /tmp/sb.env' + if 'skip' not in self._docker_config: + self._docker_config.skip = False if self._docker_config.skip: env_list = 'set -o allexport && source /tmp/sb.env && set +o allexport' for k, v in mode.env.items(): @@ -463,7 +468,7 @@ def _run_proc(self, benchmark_name, mode, vars): ansible_runner_config = self._ansible_client.get_shell_config( fcmd.format(env_list=env_list, command=self.__get_mode_command(benchmark_name, mode, timeout)) ) - if mode.name == 'mpi' and mode.node_num != 1: + if mode.name == 'mpi' and 'node_num' in mode and mode.node_num != 1: ansible_runner_config = self._ansible_client.update_mpi_config(ansible_runner_config) if isinstance(timeout, int): @@ -495,7 +500,7 @@ def run(self): ) ansible_rc = sum(rc_list) elif mode.name == 'torch.distributed' or mode.name == 'mpi': - if not mode.pattern: + if 'pattern' not in mode: ansible_rc = self._run_proc(benchmark_name, mode, {'proc_rank': 0}) else: if not os.path.exists(self._output_path / 'hostfile'): diff --git a/tests/executor/test_executor.py b/tests/executor/test_executor.py index a9365e6c1..984f0437e 100644 --- a/tests/executor/test_executor.py +++ b/tests/executor/test_executor.py @@ -44,7 +44,7 @@ def test_set_logger(self): def test_get_enabled_benchmarks_enable_none(self): """Test enabled benchmarks when superbench.enable is none.""" benchmarks = self.default_config.superbench.benchmarks - expected_enabled_benchmarks = [x for x in benchmarks if benchmarks[x]['enable']] + expected_enabled_benchmarks = [x for x in benchmarks if 'enable' in benchmarks[x] and benchmarks[x]['enable']] self.assertListEqual(self.executor._sb_enabled, expected_enabled_benchmarks) def test_get_enabled_benchmarks_enable_str(self): diff --git a/website/package-lock.json b/website/package-lock.json index 4ec6973f3..2fbb1dfdc 100644 --- a/website/package-lock.json +++ b/website/package-lock.json @@ -5384,9 +5384,9 @@ } }, "follow-redirects": { - "version": "1.14.8", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.14.8.tgz", - "integrity": "sha512-1x0S9UVJHsQprFcEC/qnNzBLcIxsjAV905f/UkQxbclCsoTWlacCNOpQa/anodLl2uaEKFhfWOvM2Qg77+15zA==" + "version": "1.15.6", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.6.tgz", + "integrity": "sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==" }, "for-in": { "version": "1.0.2", @@ -6450,9 +6450,9 @@ "integrity": "sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA==" }, "ip": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/ip/-/ip-1.1.5.tgz", - "integrity": "sha1-vd7XARQpCCjAoDnnLvJfWq7ENUo=" + "version": "1.1.9", + "resolved": "https://registry.npmjs.org/ip/-/ip-1.1.9.tgz", + "integrity": "sha512-cyRxvOEpNHNtchU3Ln9KC/auJgup87llfQpQ+t5ghoC/UhL16SWzbueiCsdTnWmqAWl7LadfuwhlqmtOaqMHdQ==" }, "ip-regex": { "version": "2.1.0", @@ -11678,4 +11678,4 @@ "integrity": "sha512-V50KMwwzqJV0NpZIZFwfOD5/lyny3WlSzRiXgA0G7VUnRlqttta1L6UQIHzd6EuBY/cHGfwTIck7w1yH6Q5zUw==" } } -} \ No newline at end of file +}