Skip to content

Commit

Permalink
Merge branch 'master' into stable
Browse files Browse the repository at this point in the history
  • Loading branch information
colinthomas-z80 authored Sep 4, 2024
2 parents b9358fb + 3a256de commit e6bd3e3
Show file tree
Hide file tree
Showing 9 changed files with 71 additions and 46 deletions.
1 change: 1 addition & 0 deletions docs/reference.rst
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@ Launchers
parsl.launchers.SrunMPILauncher
parsl.launchers.GnuParallelLauncher
parsl.launchers.MpiExecLauncher
parsl.launchers.MpiRunLauncher
parsl.launchers.JsrunLauncher
parsl.launchers.WrappedLauncher

Expand Down
17 changes: 17 additions & 0 deletions docs/userguide/configuring.rst
Original file line number Diff line number Diff line change
Expand Up @@ -536,12 +536,27 @@ Center's **Expanse** supercomputer. The example is designed to be executed on th
.. literalinclude:: ../../parsl/configs/expanse.py


Improv (Argonne LCRC)
---------------------

.. image:: https://www.lcrc.anl.gov/sites/default/files/styles/965_wide/public/2023-12/20231214_114057.jpg?itok=A-Rz5pP9

**Improv** is a PBS Pro based supercomputer at Argonne's Laboratory Computing Resource
Center (LCRC). The following snippet is an example configuration that uses `parsl.providers.PBSProProvider`
and `parsl.launchers.MpiRunLauncher` to run on multinode jobs.

.. literalinclude:: ../../parsl/configs/improv.py


.. _configuring_nersc_cori:

Perlmutter (NERSC)
------------------

NERSC provides documentation on `how to use Parsl on Perlmutter <https://docs.nersc.gov/jobs/workflow/parsl/>`_.
Perlmutter is a Slurm based HPC system and parsl uses `parsl.providers.SlurmProvider` with `parsl.launchers.SrunLauncher`
to launch tasks onto this machine.


Frontera (TACC)
---------------
Expand Down Expand Up @@ -599,6 +614,8 @@ Polaris (ALCF)
:width: 75%

ALCF provides documentation on `how to use Parsl on Polaris <https://docs.alcf.anl.gov/polaris/workflows/parsl/>`_.
Polaris uses `parsl.providers.PBSProProvider` and `parsl.launchers.MpiExecLauncher` to launch tasks onto the HPC system.



Stampede2 (TACC)
Expand Down
34 changes: 34 additions & 0 deletions parsl/configs/improv.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
from parsl.config import Config
from parsl.executors import HighThroughputExecutor
from parsl.launchers import MpiRunLauncher
from parsl.providers import PBSProProvider

config = Config(
executors=[
HighThroughputExecutor(
label="Improv_multinode",
max_workers_per_node=32,
provider=PBSProProvider(
account="YOUR_ALLOCATION_ON_IMPROV",
# PBS directives (header lines), for example:
# scheduler_options='#PBS -l mem=4gb',
scheduler_options='',

queue="compute",

# Command to be run before starting a worker:
# **WARNING** Improv requires an openmpi module to be
# loaded for the MpiRunLauncher. Add additional env
# load commands to this multiline string.
worker_init='''
module load gcc/13.2.0;
module load openmpi/5.0.3-gcc-13.2.0; ''',
launcher=MpiRunLauncher(),

# number of compute nodes allocated for each block
nodes_per_block=2,
walltime='00:10:00'
),
),
],
)
18 changes: 1 addition & 17 deletions parsl/executors/high_throughput/executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -199,9 +199,6 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
will check the available memory at startup and limit the number of workers such that
the there's sufficient memory for each worker. Default: None
max_workers : int
Deprecated. Please use max_workers_per_node instead.
max_workers_per_node : int
Caps the number of workers launched per node. Default: None
Expand Down Expand Up @@ -239,7 +236,6 @@ def __init__(self,
worker_debug: bool = False,
cores_per_worker: float = 1.0,
mem_per_worker: Optional[float] = None,
max_workers: Optional[Union[int, float]] = None,
max_workers_per_node: Optional[Union[int, float]] = None,
cpu_affinity: str = 'none',
available_accelerators: Union[int, Sequence[str]] = (),
Expand Down Expand Up @@ -272,9 +268,7 @@ def __init__(self,
else:
self.all_addresses = ','.join(get_all_addresses())

if max_workers:
self._warn_deprecated("max_workers", "max_workers_per_node")
self.max_workers_per_node = max_workers_per_node or max_workers or float("inf")
self.max_workers_per_node = max_workers_per_node or float("inf")

mem_slots = self.max_workers_per_node
cpu_slots = self.max_workers_per_node
Expand Down Expand Up @@ -335,16 +329,6 @@ def _warn_deprecated(self, old: str, new: str):
stacklevel=2
)

@property
def max_workers(self):
self._warn_deprecated("max_workers", "max_workers_per_node")
return self.max_workers_per_node

@max_workers.setter
def max_workers(self, val: Union[int, float]):
self._warn_deprecated("max_workers", "max_workers_per_node")
self.max_workers_per_node = val

@property
def logdir(self):
return "{}/{}".format(self.run_dir, self.label)
Expand Down
29 changes: 15 additions & 14 deletions parsl/tests/site_tests/README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -46,27 +46,28 @@ Adding a new site
1. Specialized python builds for the system (for eg, Summit)
2. Anaconda available via modules
3. User's conda installation
* Add a new block to `conda_setup.sh` that installs a fresh environment and writes out
the activation commands to `~/setup_parsl_test_env.sh`
* Add a site config to `parsl/tests/configs/<SITE.py>` and add your local user options
to `parsl/tests/configs/local_user_opts.py`. For eg, `here's mine<https://gist.github.com/yadudoc/b71765284d2db0706c4f43605dd8b8d6>`_
Make sure that the site config uses the `fresh_config` pattern.
* Add a new block to ``conda_setup.sh`` that installs a fresh environment and writes out
the activation commands to ``~/setup_parsl_test_env.sh``
* Add a site config to ``parsl/tests/configs/<SITE.py>`` and add your local user options
to ``parsl/tests/configs/local_user_opts.py``. For example,
`here's mine<https://gist.github.com/yadudoc/b71765284d2db0706c4f43605dd8b8d6>`_
Make sure that the site config uses the ``fresh_config`` pattern.
Please ensure that the site config uses:
* max_workers = 1
* init_blocks = 1
* min_blocks = 0
* ``max_workers_per_node = 1``
* ``init_blocks = 1``
* ``min_blocks = 0``

* Add this site config to `parsl/tests/site_tests/site_config_selector.py`
* Reinstall parsl, using `pip install .`
* Test a single test: `python3 test_site.py -d` to confirm that the site works correctly.
* Once tests are passing run the whole site_test with `make site_test`
* Add this site config to ``parsl/tests/site_tests/site_config_selector.py``
* Reinstall parsl, using ``pip install .``
* Test a single test: ``python3 test_site.py -d`` to confirm that the site works correctly.
* Once tests are passing run the whole site_test with ``make site_test``


Shared filesystem option
------------------------

There is a new env variable "SHARED_FS_OPTIONS" to pass markers to pytest to skip certain tests.
There is a new env variable ``SHARED_FS_OPTIONS`` to pass markers to pytest to skip certain tests.

When there's a shared-FS, the default NoOpStaging works. However, when there's no shared-FS some tests
that uses File objects require a staging provider (eg. rsync). These tests can be turned off with
`-k "not staging_required"`
``-k "not staging_required"``
12 changes: 0 additions & 12 deletions parsl/tests/test_htex/test_htex.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,18 +126,6 @@ def kill_interchange(*args, **kwargs):
assert "HighThroughputExecutor has not started" in caplog.text


@pytest.mark.local
def test_max_workers_per_node():
with pytest.warns(DeprecationWarning) as record:
htex = HighThroughputExecutor(max_workers_per_node=1, max_workers=2)

warning_msg = "max_workers is deprecated"
assert any(warning_msg in str(warning.message) for warning in record)

# Ensure max_workers_per_node takes precedence
assert htex.max_workers_per_node == htex.max_workers == 1


@pytest.mark.local
@pytest.mark.parametrize("cmd", (None, "custom-launch-cmd"))
def test_htex_worker_pool_launch_cmd(cmd: Optional[str]):
Expand Down
2 changes: 1 addition & 1 deletion parsl/tests/test_mpi_apps/test_mpiex.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def test_init():

new_kwargs = {'max_workers_per_block', 'mpi_launcher'}
excluded_kwargs = {'available_accelerators', 'cores_per_worker', 'max_workers_per_node',
'mem_per_worker', 'cpu_affinity', 'max_workers', 'manager_selector'}
'mem_per_worker', 'cpu_affinity', 'manager_selector'}

# Get the kwargs from both HTEx and MPIEx
htex_kwargs = set(signature(HighThroughputExecutor.__init__).parameters)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def local_config():
poll_period=100,
label="htex_local",
address="127.0.0.1",
max_workers=1,
max_workers_per_node=1,
encrypted=True,
provider=LocalProvider(
channel=LocalChannel(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def local_config():
poll_period=100,
label="htex_local",
address="127.0.0.1",
max_workers=1,
max_workers_per_node=1,
encrypted=True,
launch_cmd="sleep inf",
provider=LocalProvider(
Expand Down

0 comments on commit e6bd3e3

Please sign in to comment.