Skip to content

Commit cc89bca

Browse files
pre-commit-ci[bot]jan-janssen
authored andcommitted
Refactor Interface
1 parent d22399d commit cc89bca

15 files changed

+428
-412
lines changed

executorlib/__init__.py

Lines changed: 37 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -40,31 +40,31 @@ class Executor:
4040
max_workers (int): for backwards compatibility with the standard library, max_workers also defines the number of
4141
cores which can be used in parallel - just like the max_cores parameter. Using max_cores is
4242
recommended, as computers have a limited number of compute cores.
43+
backend (str): Switch between the different backends "flux", "local" or "slurm". The default is "local".
4344
max_cores (int): defines the number cores which can be used in parallel
4445
cores_per_worker (int): number of MPI cores to be used for each function call
4546
threads_per_core (int): number of OpenMP threads to be used for each function call
4647
gpus_per_worker (int): number of GPUs per worker - defaults to 0
47-
oversubscribe (bool): adds the `--oversubscribe` command line flag (OpenMPI and SLURM only) - default False
4848
cwd (str/None): current working directory where the parallel python task is executed
49+
openmpi_oversubscribe (bool): adds the `--oversubscribe` command line flag (OpenMPI and SLURM only) - default False
50+
slurm_cmd_args (list): Additional command line arguments for the srun call (SLURM only)
51+
flux_executor (flux.job.FluxExecutor): Flux Python interface to submit the workers to flux
52+
flux_executor_pmi_mode (str): PMI interface to use (OpenMPI v5 requires pmix) default is None (Flux only)
53+
flux_executor_nesting (bool): Provide hierarchically nested Flux job scheduler inside the submitted function.
4954
conda_environment_name (str): name of the conda environment to initialize
5055
conda_environment_path (str): path of the conda environment to initialize
51-
executor (flux.job.FluxExecutor): Flux Python interface to submit the workers to flux
5256
hostname_localhost (boolean): use localhost instead of the hostname to establish the zmq connection. In the
5357
context of an HPC cluster this essential to be able to communicate to an
5458
Executor running on a different compute node within the same allocation. And
5559
in principle any computer should be able to resolve that their own hostname
5660
points to the same address as localhost. Still MacOS >= 12 seems to disable
5761
this look up for security reasons. So on MacOS it is required to set this
5862
option to true
59-
backend (str): Switch between the different backends "flux", "local" or "slurm". Alternatively, when "auto"
60-
is selected (the default) the available backend is determined automatically.
6163
block_allocation (boolean): To accelerate the submission of a series of python functions with the same resource
6264
requirements, executorlib supports block allocation. In this case all resources have
6365
to be defined on the executor, rather than during the submission of the individual
6466
function.
6567
init_function (None): optional function to preset arguments for functions which are submitted later
66-
command_line_argument_lst (list): Additional command line arguments for the srun call (SLURM only)
67-
pmi (str): PMI interface to use (OpenMPI v5 requires pmix) default is None (Flux only)
6868
disable_dependencies (boolean): Disable resolving future objects during the submission.
6969
refresh_rate (float): Set the refresh rate in seconds, how frequently the input queue is checked.
7070
plot_dependency_graph (bool): Plot the dependencies of multiple future objects without executing them. For
@@ -94,21 +94,22 @@ class Executor:
9494
def __init__(
9595
self,
9696
max_workers: int = 1,
97+
backend: str = "auto",
9798
max_cores: int = 1,
9899
cores_per_worker: int = 1,
99100
threads_per_core: int = 1,
100101
gpus_per_worker: int = 0,
101-
oversubscribe: bool = False,
102102
cwd: Optional[str] = None,
103+
openmpi_oversubscribe: bool = False,
104+
slurm_cmd_args: list[str] = [],
105+
flux_executor=None,
106+
flux_executor_pmi_mode: Optional[str] = None,
107+
flux_executor_nesting: bool = False,
103108
conda_environment_name: Optional[str] = None,
104109
conda_environment_path: Optional[str] = None,
105-
executor=None,
106110
hostname_localhost: bool = False,
107-
backend: str = "auto",
108111
block_allocation: bool = True,
109112
init_function: Optional[callable] = None,
110-
command_line_argument_lst: list[str] = [],
111-
pmi: Optional[str] = None,
112113
disable_dependencies: bool = False,
113114
refresh_rate: float = 0.01,
114115
plot_dependency_graph: bool = False,
@@ -119,22 +120,22 @@ def __init__(
119120
def __new__(
120121
cls,
121122
max_workers: int = 1,
123+
backend: str = "auto",
122124
max_cores: int = 1,
123125
cores_per_worker: int = 1,
124126
threads_per_core: int = 1,
125127
gpus_per_worker: int = 0,
126-
oversubscribe: bool = False,
127128
cwd: Optional[str] = None,
129+
openmpi_oversubscribe: bool = False,
130+
slurm_cmd_args: list[str] = [],
131+
flux_executor=None,
132+
flux_executor_pmi_mode: Optional[str] = None,
133+
flux_executor_nesting: bool = False,
128134
conda_environment_name: Optional[str] = None,
129135
conda_environment_path: Optional[str] = None,
130-
executor=None,
131136
hostname_localhost: bool = False,
132-
backend: str = "auto",
133-
block_allocation: bool = False,
137+
block_allocation: bool = True,
134138
init_function: Optional[callable] = None,
135-
command_line_argument_lst: list[str] = [],
136-
pmi: Optional[str] = None,
137-
nested_flux_executor: bool = False,
138139
disable_dependencies: bool = False,
139140
refresh_rate: float = 0.01,
140141
plot_dependency_graph: bool = False,
@@ -151,32 +152,31 @@ def __new__(
151152
max_workers (int): for backwards compatibility with the standard library, max_workers also defines the
152153
number of cores which can be used in parallel - just like the max_cores parameter. Using
153154
max_cores is recommended, as computers have a limited number of compute cores.
155+
backend (str): Switch between the different backends "flux", "local" or "slurm". The default is "local".
154156
max_cores (int): defines the number cores which can be used in parallel
155157
cores_per_worker (int): number of MPI cores to be used for each function call
156158
threads_per_core (int): number of OpenMP threads to be used for each function call
157159
gpus_per_worker (int): number of GPUs per worker - defaults to 0
158-
oversubscribe (bool): adds the `--oversubscribe` command line flag (OpenMPI and SLURM only) - default False
160+
openmpi_oversubscribe (bool): adds the `--oversubscribe` command line flag (OpenMPI and SLURM only) - default False
161+
slurm_cmd_args (list): Additional command line arguments for the srun call (SLURM only)
159162
cwd (str/None): current working directory where the parallel python task is executed
163+
flux_executor (flux.job.FluxExecutor): Flux Python interface to submit the workers to flux
164+
flux_executor_pmi_mode (str): PMI interface to use (OpenMPI v5 requires pmix) default is None (Flux only)
165+
flux_executor_nesting (bool): Provide hierarchically nested Flux job scheduler inside the submitted function.
160166
conda_environment_name (str): name of the conda environment to initialize
161167
conda_environment_path (str): path of the conda environment to initialize
162-
executor (flux.job.FluxExecutor): Flux Python interface to submit the workers to flux
163168
hostname_localhost (boolean): use localhost instead of the hostname to establish the zmq connection. In the
164169
context of an HPC cluster this essential to be able to communicate to an
165170
Executor running on a different compute node within the same allocation. And
166171
in principle any computer should be able to resolve that their own hostname
167172
points to the same address as localhost. Still MacOS >= 12 seems to disable
168173
this look up for security reasons. So on MacOS it is required to set this
169174
option to true
170-
backend (str): Switch between the different backends "flux", "local" or "slurm". Alternatively, when "auto"
171-
is selected (the default) the available backend is determined automatically.
172175
block_allocation (boolean): To accelerate the submission of a series of python functions with the same
173176
resource requirements, executorlib supports block allocation. In this case all
174177
resources have to be defined on the executor, rather than during the submission
175178
of the individual function.
176179
init_function (None): optional function to preset arguments for functions which are submitted later
177-
command_line_argument_lst (list): Additional command line arguments for the srun call (SLURM only)
178-
pmi (str): PMI interface to use (OpenMPI v5 requires pmix) default is None (Flux only)
179-
nested_flux_executor (bool): Provide hierarchically nested Flux job scheduler inside the submitted function.
180180
disable_dependencies (boolean): Disable resolving future objects during the submission.
181181
refresh_rate (float): Set the refresh rate in seconds, how frequently the input queue is checked.
182182
plot_dependency_graph (bool): Plot the dependencies of multiple future objects without executing them. For
@@ -186,22 +186,22 @@ def __new__(
186186
if not disable_dependencies:
187187
return ExecutorWithDependencies(
188188
max_workers=max_workers,
189+
backend=backend,
189190
max_cores=max_cores,
190191
cores_per_worker=cores_per_worker,
191192
threads_per_core=threads_per_core,
192193
gpus_per_worker=gpus_per_worker,
193-
oversubscribe=oversubscribe,
194194
cwd=cwd,
195+
openmpi_oversubscribe=openmpi_oversubscribe,
196+
slurm_cmd_args=slurm_cmd_args,
197+
flux_executor=flux_executor,
198+
flux_executor_pmi_mode=flux_executor_pmi_mode,
199+
flux_executor_nesting=flux_executor_nesting,
195200
conda_environment_name=conda_environment_name,
196201
conda_environment_path=conda_environment_path,
197-
executor=executor,
198202
hostname_localhost=hostname_localhost,
199-
backend=backend,
200203
block_allocation=block_allocation,
201204
init_function=init_function,
202-
command_line_argument_lst=command_line_argument_lst,
203-
pmi=pmi,
204-
nested_flux_executor=nested_flux_executor,
205205
refresh_rate=refresh_rate,
206206
plot_dependency_graph=plot_dependency_graph,
207207
)
@@ -210,20 +210,20 @@ def __new__(
210210
_check_refresh_rate(refresh_rate=refresh_rate)
211211
return create_executor(
212212
max_workers=max_workers,
213+
backend=backend,
213214
max_cores=max_cores,
214215
cores_per_worker=cores_per_worker,
215216
threads_per_core=threads_per_core,
216217
gpus_per_worker=gpus_per_worker,
217-
oversubscribe=oversubscribe,
218218
cwd=cwd,
219+
openmpi_oversubscribe=openmpi_oversubscribe,
220+
slurm_cmd_args=slurm_cmd_args,
221+
flux_executor=flux_executor,
222+
flux_executor_pmi_mode=flux_executor_pmi_mode,
223+
flux_executor_nesting=flux_executor_nesting,
219224
conda_environment_name=conda_environment_name,
220225
conda_environment_path=conda_environment_path,
221-
executor=executor,
222226
hostname_localhost=hostname_localhost,
223-
backend=backend,
224227
block_allocation=block_allocation,
225228
init_function=init_function,
226-
command_line_argument_lst=command_line_argument_lst,
227-
pmi=pmi,
228-
nested_flux_executor=nested_flux_executor,
229229
)

0 commit comments

Comments
 (0)