From 0a198a3abfd9e8780563d073ac618bc3e6dfc643 Mon Sep 17 00:00:00 2001 From: Yadu Nand Babuji Date: Tue, 27 Aug 2024 13:08:02 -0500 Subject: [PATCH 1/2] Add breadcrumbs for users trying to match configs to their machines (#3603) Both Polaris and Perlmutter have configuration documentation hosted by their respective facilities. This is great, but it makes it harder for users trying to match an undocumented machine to existing configs. This PR adds some hints to both Polaris and Perlmutter listing the provider and launcher combo used in the hopes that it might lead the user to configs from those machines. --- docs/userguide/configuring.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/userguide/configuring.rst b/docs/userguide/configuring.rst index f3fe5cc407..3933695eb3 100644 --- a/docs/userguide/configuring.rst +++ b/docs/userguide/configuring.rst @@ -542,6 +542,9 @@ Perlmutter (NERSC) ------------------ NERSC provides documentation on `how to use Parsl on Perlmutter `_. +Perlmutter is a Slurm based HPC system and parsl uses `parsl.providers.SlurmProvider` with `parsl.launchers.SrunLauncher` +to launch tasks onto this machine. + Frontera (TACC) --------------- @@ -599,6 +602,8 @@ Polaris (ALCF) :width: 75% ALCF provides documentation on `how to use Parsl on Polaris `_. +Polaris uses `parsl.providers.PBSProProvider` and `parsl.launchers.MpiExecLauncher` to launch tasks onto the HPC system. + Stampede2 (TACC) From 3f2bf1865eea16cc44d6b7f8938a1ae1781c61fd Mon Sep 17 00:00:00 2001 From: Yadu Nand Babuji Date: Wed, 28 Aug 2024 11:28:54 -0500 Subject: [PATCH 2/2] Adding configuration example for Improv@LCRC (#3602) --- docs/reference.rst | 1 + docs/userguide/configuring.rst | 12 ++++++++++++ parsl/configs/improv.py | 34 ++++++++++++++++++++++++++++++++++ 3 files changed, 47 insertions(+) create mode 100644 parsl/configs/improv.py diff --git a/docs/reference.rst b/docs/reference.rst index d8e18bd244..f2d89afaf8 100644 --- a/docs/reference.rst +++ b/docs/reference.rst @@ -93,6 +93,7 @@ Launchers parsl.launchers.SrunMPILauncher parsl.launchers.GnuParallelLauncher parsl.launchers.MpiExecLauncher + parsl.launchers.MpiRunLauncher parsl.launchers.JsrunLauncher parsl.launchers.WrappedLauncher diff --git a/docs/userguide/configuring.rst b/docs/userguide/configuring.rst index 3933695eb3..a57e815fe7 100644 --- a/docs/userguide/configuring.rst +++ b/docs/userguide/configuring.rst @@ -536,6 +536,18 @@ Center's **Expanse** supercomputer. The example is designed to be executed on th .. literalinclude:: ../../parsl/configs/expanse.py +Improv (Argonne LCRC) +--------------------- + +.. image:: https://www.lcrc.anl.gov/sites/default/files/styles/965_wide/public/2023-12/20231214_114057.jpg?itok=A-Rz5pP9 + +**Improv** is a PBS Pro based supercomputer at Argonne's Laboratory Computing Resource +Center (LCRC). The following snippet is an example configuration that uses `parsl.providers.PBSProProvider` +and `parsl.launchers.MpiRunLauncher` to run on multinode jobs. + +.. literalinclude:: ../../parsl/configs/improv.py + + .. _configuring_nersc_cori: Perlmutter (NERSC) diff --git a/parsl/configs/improv.py b/parsl/configs/improv.py new file mode 100644 index 0000000000..8a40282829 --- /dev/null +++ b/parsl/configs/improv.py @@ -0,0 +1,34 @@ +from parsl.config import Config +from parsl.executors import HighThroughputExecutor +from parsl.launchers import MpiRunLauncher +from parsl.providers import PBSProProvider + +config = Config( + executors=[ + HighThroughputExecutor( + label="Improv_multinode", + max_workers_per_node=32, + provider=PBSProProvider( + account="YOUR_ALLOCATION_ON_IMPROV", + # PBS directives (header lines), for example: + # scheduler_options='#PBS -l mem=4gb', + scheduler_options='', + + queue="compute", + + # Command to be run before starting a worker: + # **WARNING** Improv requires an openmpi module to be + # loaded for the MpiRunLauncher. Add additional env + # load commands to this multiline string. + worker_init=''' +module load gcc/13.2.0; +module load openmpi/5.0.3-gcc-13.2.0; ''', + launcher=MpiRunLauncher(), + + # number of compute nodes allocated for each block + nodes_per_block=2, + walltime='00:10:00' + ), + ), + ], +)