diff --git a/executorlib/executor/flux.py b/executorlib/executor/flux.py index 7afde9d9..b3aecf78 100644 --- a/executorlib/executor/flux.py +++ b/executorlib/executor/flux.py @@ -29,7 +29,7 @@ class FluxJobExecutor(BaseExecutor): max_workers (int): for backwards compatibility with the standard library, max_workers also defines the number of cores which can be used in parallel - just like the max_cores parameter. Using max_cores is recommended, as computers have a limited number of compute cores. - cache_directory (str, optional): The directory to store cache files. Defaults to "cache". + cache_directory (str, optional): The directory to store cache files. Defaults to "executorlib_cache". max_cores (int): defines the number cores which can be used in parallel resource_dict (dict): A dictionary of resources required by the task. With the following keys: - cores (int): number of MPI cores to be used for each function call @@ -114,7 +114,7 @@ def __init__( max_workers (int): for backwards compatibility with the standard library, max_workers also defines the number of cores which can be used in parallel - just like the max_cores parameter. Using max_cores is recommended, as computers have a limited number of compute cores. - cache_directory (str, optional): The directory to store cache files. Defaults to "cache". + cache_directory (str, optional): The directory to store cache files. Defaults to "executorlib_cache". max_cores (int): defines the number cores which can be used in parallel resource_dict (dict): A dictionary of resources required by the task. With the following keys: - cores (int): number of MPI cores to be used for each function call @@ -218,7 +218,7 @@ class FluxClusterExecutor(BaseExecutor): max_workers (int): for backwards compatibility with the standard library, max_workers also defines the number of cores which can be used in parallel - just like the max_cores parameter. Using max_cores is recommended, as computers have a limited number of compute cores. - cache_directory (str, optional): The directory to store cache files. Defaults to "cache". + cache_directory (str, optional): The directory to store cache files. Defaults to "executorlib_cache". max_cores (int): defines the number cores which can be used in parallel resource_dict (dict): A dictionary of resources required by the task. With the following keys: - cores (int): number of MPI cores to be used for each function call @@ -294,7 +294,7 @@ def __init__( max_workers (int): for backwards compatibility with the standard library, max_workers also defines the number of cores which can be used in parallel - just like the max_cores parameter. Using max_cores is recommended, as computers have a limited number of compute cores. - cache_directory (str, optional): The directory to store cache files. Defaults to "cache". + cache_directory (str, optional): The directory to store cache files. Defaults to "executorlib_cache". max_cores (int): defines the number cores which can be used in parallel resource_dict (dict): A dictionary of resources required by the task. With the following keys: - cores (int): number of MPI cores to be used for each function call @@ -409,7 +409,7 @@ def create_flux_executor( number of cores which can be used in parallel - just like the max_cores parameter. Using max_cores is recommended, as computers have a limited number of compute cores. max_cores (int): defines the number cores which can be used in parallel - cache_directory (str, optional): The directory to store cache files. Defaults to "cache". + cache_directory (str, optional): The directory to store cache files. Defaults to "executorlib_cache". resource_dict (dict): A dictionary of resources required by the task. With the following keys: - cores (int): number of MPI cores to be used for each function call - threads_per_core (int): number of OpenMP threads to be used for each function call diff --git a/executorlib/executor/single.py b/executorlib/executor/single.py index 3ddb03ec..5293cad2 100644 --- a/executorlib/executor/single.py +++ b/executorlib/executor/single.py @@ -29,7 +29,7 @@ class SingleNodeExecutor(BaseExecutor): max_workers (int): for backwards compatibility with the standard library, max_workers also defines the number of cores which can be used in parallel - just like the max_cores parameter. Using max_cores is recommended, as computers have a limited number of compute cores. - cache_directory (str, optional): The directory to store cache files. Defaults to "cache". + cache_directory (str, optional): The directory to store cache files. Defaults to "executorlib_cache". max_cores (int): defines the number cores which can be used in parallel resource_dict (dict): A dictionary of resources required by the task. With the following keys: - cores (int): number of MPI cores to be used for each function call @@ -104,7 +104,7 @@ def __init__( max_workers (int): for backwards compatibility with the standard library, max_workers also defines the number of cores which can be used in parallel - just like the max_cores parameter. Using max_cores is recommended, as computers have a limited number of compute cores. - cache_directory (str, optional): The directory to store cache files. Defaults to "cache". + cache_directory (str, optional): The directory to store cache files. Defaults to "executorlib_cache". max_cores (int): defines the number cores which can be used in parallel resource_dict (dict): A dictionary of resources required by the task. With the following keys: - cores (int): number of MPI cores to be used for each function call @@ -202,7 +202,7 @@ def create_single_node_executor( number of cores which can be used in parallel - just like the max_cores parameter. Using max_cores is recommended, as computers have a limited number of compute cores. max_cores (int): defines the number cores which can be used in parallel - cache_directory (str, optional): The directory to store cache files. Defaults to "cache". + cache_directory (str, optional): The directory to store cache files. Defaults to "executorlib_cache". resource_dict (dict): A dictionary of resources required by the task. With the following keys: - cores (int): number of MPI cores to be used for each function call - threads_per_core (int): number of OpenMP threads to be used for each function call diff --git a/executorlib/executor/slurm.py b/executorlib/executor/slurm.py index 48450014..505fe915 100644 --- a/executorlib/executor/slurm.py +++ b/executorlib/executor/slurm.py @@ -30,7 +30,7 @@ class SlurmClusterExecutor(BaseExecutor): max_workers (int): for backwards compatibility with the standard library, max_workers also defines the number of cores which can be used in parallel - just like the max_cores parameter. Using max_cores is recommended, as computers have a limited number of compute cores. - cache_directory (str, optional): The directory to store cache files. Defaults to "cache". + cache_directory (str, optional): The directory to store cache files. Defaults to "executorlib_cache". max_cores (int): defines the number cores which can be used in parallel resource_dict (dict): A dictionary of resources required by the task. With the following keys: - cores (int): number of MPI cores to be used for each function call @@ -106,7 +106,7 @@ def __init__( max_workers (int): for backwards compatibility with the standard library, max_workers also defines the number of cores which can be used in parallel - just like the max_cores parameter. Using max_cores is recommended, as computers have a limited number of compute cores. - cache_directory (str, optional): The directory to store cache files. Defaults to "cache". + cache_directory (str, optional): The directory to store cache files. Defaults to "executorlib_cache". max_cores (int): defines the number cores which can be used in parallel resource_dict (dict): A dictionary of resources required by the task. With the following keys: - cores (int): number of MPI cores to be used for each function call @@ -207,7 +207,7 @@ class SlurmJobExecutor(BaseExecutor): max_workers (int): for backwards compatibility with the standard library, max_workers also defines the number of cores which can be used in parallel - just like the max_cores parameter. Using max_cores is recommended, as computers have a limited number of compute cores. - cache_directory (str, optional): The directory to store cache files. Defaults to "cache". + cache_directory (str, optional): The directory to store cache files. Defaults to "executorlib_cache". max_cores (int): defines the number cores which can be used in parallel resource_dict (dict): A dictionary of resources required by the task. With the following keys: - cores (int): number of MPI cores to be used for each function call @@ -287,7 +287,7 @@ def __init__( max_workers (int): for backwards compatibility with the standard library, max_workers also defines the number of cores which can be used in parallel - just like the max_cores parameter. Using max_cores is recommended, as computers have a limited number of compute cores. - cache_directory (str, optional): The directory to store cache files. Defaults to "cache". + cache_directory (str, optional): The directory to store cache files. Defaults to "executorlib_cache". max_cores (int): defines the number cores which can be used in parallel resource_dict (dict): A dictionary of resources required by the task. With the following keys: - cores (int): number of MPI cores to be used for each function call @@ -389,7 +389,7 @@ def create_slurm_executor( number of cores which can be used in parallel - just like the max_cores parameter. Using max_cores is recommended, as computers have a limited number of compute cores. max_cores (int): defines the number cores which can be used in parallel - cache_directory (str, optional): The directory to store cache files. Defaults to "cache". + cache_directory (str, optional): The directory to store cache files. Defaults to "executorlib_cache". resource_dict (dict): A dictionary of resources required by the task. With the following keys: - cores (int): number of MPI cores to be used for each function call - threads_per_core (int): number of OpenMP threads to be used for each function call diff --git a/executorlib/task_scheduler/file/task_scheduler.py b/executorlib/task_scheduler/file/task_scheduler.py index 2a2ca099..74eaccdd 100644 --- a/executorlib/task_scheduler/file/task_scheduler.py +++ b/executorlib/task_scheduler/file/task_scheduler.py @@ -27,7 +27,7 @@ class FileTaskScheduler(TaskSchedulerBase): def __init__( self, - cache_directory: str = "cache", + cache_directory: str = "executorlib_cache", resource_dict: Optional[dict] = None, execute_function: Callable = execute_with_pysqa, terminate_function: Optional[Callable] = None, @@ -39,7 +39,7 @@ def __init__( Initialize the FileExecutor. Args: - cache_directory (str, optional): The directory to store cache files. Defaults to "cache". + cache_directory (str, optional): The directory to store cache files. Defaults to "executorlib_cache". resource_dict (dict): A dictionary of resources required by the task. With the following keys: - cores (int): number of MPI cores to be used for each function call - cwd (str/None): current working directory where the parallel python task is executed diff --git a/executorlib/task_scheduler/interactive/shared.py b/executorlib/task_scheduler/interactive/shared.py index 3ed3bc28..03d0e0d4 100644 --- a/executorlib/task_scheduler/interactive/shared.py +++ b/executorlib/task_scheduler/interactive/shared.py @@ -41,7 +41,7 @@ def execute_tasks( this look up for security reasons. So on MacOS it is required to set this option to true init_function (Callable): optional function to preset arguments for functions which are submitted later - cache_directory (str, optional): The directory to store cache files. Defaults to "cache". + cache_directory (str, optional): The directory to store cache files. Defaults to "executorlib_cache". queue_join_on_shutdown (bool): Join communication queue when thread is closed. Defaults to True. log_obj_size (bool): Enable debug mode which reports the size of the communicated objects. """ diff --git a/tests/test_cache_backend_execute.py b/tests/test_cache_backend_execute.py index f780070f..c1512068 100644 --- a/tests/test_cache_backend_execute.py +++ b/tests/test_cache_backend_execute.py @@ -28,7 +28,7 @@ def get_error(a): ) class TestSharedFunctions(unittest.TestCase): def test_execute_function_mixed(self): - cache_directory = os.path.abspath("cache") + cache_directory = os.path.abspath("executorlib_cache") os.makedirs(cache_directory, exist_ok=True) task_key, data_dict = serialize_funct_h5( fn=my_funct, @@ -56,7 +56,7 @@ def test_execute_function_mixed(self): self.assertEqual(future_file_obj.result(), 3) def test_execute_function_args(self): - cache_directory = os.path.abspath("cache") + cache_directory = os.path.abspath("executorlib_cache") os.makedirs(cache_directory, exist_ok=True) task_key, data_dict = serialize_funct_h5( fn=my_funct, @@ -84,7 +84,7 @@ def test_execute_function_args(self): self.assertEqual(future_file_obj.result(), 3) def test_execute_function_kwargs(self): - cache_directory = os.path.abspath("cache") + cache_directory = os.path.abspath("executorlib_cache") os.makedirs(cache_directory, exist_ok=True) task_key, data_dict = serialize_funct_h5( fn=my_funct, @@ -112,7 +112,7 @@ def test_execute_function_kwargs(self): self.assertEqual(future_file_obj.result(), 3) def test_execute_function_error(self): - cache_directory = os.path.abspath("cache") + cache_directory = os.path.abspath("executorlib_cache") os.makedirs(cache_directory, exist_ok=True) task_key, data_dict = serialize_funct_h5( fn=get_error, @@ -142,5 +142,4 @@ def test_execute_function_error(self): future_file_obj.result() def tearDown(self): - if os.path.exists("cache"): - shutil.rmtree("cache") + shutil.rmtree("executorlib_cache", ignore_errors=True) diff --git a/tests/test_cache_fileexecutor_mpi.py b/tests/test_cache_fileexecutor_mpi.py index d4a4f3a7..38f93a9c 100644 --- a/tests/test_cache_fileexecutor_mpi.py +++ b/tests/test_cache_fileexecutor_mpi.py @@ -40,5 +40,4 @@ def test_executor(self): self.assertTrue(fs1.done()) def tearDown(self): - if os.path.exists("cache"): - shutil.rmtree("cache") + shutil.rmtree("executorlib_cache", ignore_errors=True) diff --git a/tests/test_cache_fileexecutor_serial.py b/tests/test_cache_fileexecutor_serial.py index c28c858b..7978f04d 100644 --- a/tests/test_cache_fileexecutor_serial.py +++ b/tests/test_cache_fileexecutor_serial.py @@ -93,7 +93,7 @@ def test_executor_function(self): "resource_dict": {}, } ) - cache_dir = os.path.abspath("cache") + cache_dir = os.path.abspath("executorlib_cache") os.makedirs(cache_dir, exist_ok=True) process = Thread( target=execute_tasks_h5, @@ -134,7 +134,7 @@ def test_executor_function_dependence_kwargs(self): "resource_dict": {}, } ) - cache_dir = os.path.abspath("cache") + cache_dir = os.path.abspath("executorlib_cache") os.makedirs(cache_dir, exist_ok=True) process = Thread( target=execute_tasks_h5, @@ -175,7 +175,7 @@ def test_executor_function_dependence_args(self): "resource_dict": {}, } ) - cache_dir = os.path.abspath("cache") + cache_dir = os.path.abspath("executorlib_cache") os.makedirs(cache_dir, exist_ok=True) process = Thread( target=execute_tasks_h5, @@ -203,5 +203,4 @@ def test_execute_in_subprocess_errors(self): execute_in_subprocess(file_name=__file__, command=[], backend="flux") def tearDown(self): - if os.path.exists("cache"): - shutil.rmtree("cache") + shutil.rmtree("executorlib_cache", ignore_errors=True) diff --git a/tests/test_fluxclusterexecutor.py b/tests/test_fluxclusterexecutor.py index 73deb661..dab9985e 100644 --- a/tests/test_fluxclusterexecutor.py +++ b/tests/test_fluxclusterexecutor.py @@ -33,9 +33,9 @@ def mpi_funct(i): class TestCacheExecutorPysqa(unittest.TestCase): def test_executor(self): with FluxClusterExecutor( - resource_dict={"cores": 2, "cwd": "cache"}, + resource_dict={"cores": 2, "cwd": "executorlib_cache"}, block_allocation=False, - cache_directory="cache", + cache_directory="executorlib_cache", ) as exe: cloudpickle_register(ind=1) fs1 = exe.submit(mpi_funct, 1) @@ -44,5 +44,4 @@ def test_executor(self): self.assertTrue(fs1.done()) def tearDown(self): - if os.path.exists("cache"): - shutil.rmtree("cache") + shutil.rmtree("executorlib_cache", ignore_errors=True) diff --git a/tests/test_mpiexecspawner.py b/tests/test_mpiexecspawner.py index a1d08cdc..47733916 100644 --- a/tests/test_mpiexecspawner.py +++ b/tests/test_mpiexecspawner.py @@ -503,7 +503,7 @@ def test_execute_task_parallel(self): class TestFuturePoolCache(unittest.TestCase): def tearDown(self): - shutil.rmtree("./cache") + shutil.rmtree("executorlib_cache", ignore_errors=True) @unittest.skipIf( skip_h5py_test, "h5py is not installed, so the h5py tests are skipped." @@ -519,7 +519,7 @@ def test_execute_task_cache(self): cores=1, openmpi_oversubscribe=False, spawner=MpiExecSpawner, - cache_directory="./cache", + cache_directory="executorlib_cache", ) self.assertEqual(f.result(), 1) q.join() @@ -538,6 +538,6 @@ def test_execute_task_cache_failed_no_argument(self): cores=1, openmpi_oversubscribe=False, spawner=MpiExecSpawner, - cache_directory="./cache", + cache_directory="executorlib_cache", ) q.join() diff --git a/tests/test_singlenodeexecutor_cache.py b/tests/test_singlenodeexecutor_cache.py index a88866e8..609d5a1f 100644 --- a/tests/test_singlenodeexecutor_cache.py +++ b/tests/test_singlenodeexecutor_cache.py @@ -22,7 +22,7 @@ def get_error(a): ) class TestCacheFunctions(unittest.TestCase): def test_cache_data(self): - cache_directory = "./cache" + cache_directory = os.path.abspath("executorlib_cache") with SingleNodeExecutor(cache_directory=cache_directory) as exe: self.assertTrue(exe) future_lst = [exe.submit(sum, [i, i]) for i in range(1, 4)] @@ -35,7 +35,7 @@ def test_cache_data(self): ) def test_cache_error(self): - cache_directory = "./cache_error" + cache_directory = os.path.abspath("cache_error") with SingleNodeExecutor(cache_directory=cache_directory) as exe: self.assertTrue(exe) cloudpickle_register(ind=1) @@ -44,7 +44,5 @@ def test_cache_error(self): print(f.result()) def tearDown(self): - if os.path.exists("cache"): - shutil.rmtree("cache") - if os.path.exists("cache_error"): - shutil.rmtree("cache_error") + shutil.rmtree("executorlib_cache", ignore_errors=True) + shutil.rmtree("cache_error", ignore_errors=True) diff --git a/tests/test_singlenodeexecutor_mpi.py b/tests/test_singlenodeexecutor_mpi.py index ca7abe39..1c139af3 100644 --- a/tests/test_singlenodeexecutor_mpi.py +++ b/tests/test_singlenodeexecutor_mpi.py @@ -83,7 +83,7 @@ def test_errors(self): class TestExecutorBackendCache(unittest.TestCase): def tearDown(self): - shutil.rmtree("./cache") + shutil.rmtree("executorlib_cache", ignore_errors=True) @unittest.skipIf( skip_mpi4py_test, "mpi4py is not installed, so the mpi4py tests are skipped." @@ -93,7 +93,7 @@ def test_meta_executor_parallel_cache(self): max_workers=2, resource_dict={"cores": 2}, block_allocation=True, - cache_directory="./cache", + cache_directory="executorlib_cache", ) as exe: cloudpickle_register(ind=1) time_1 = time.time() diff --git a/tests/test_standalone_hdf.py b/tests/test_standalone_hdf.py index 4dc56168..addcce55 100644 --- a/tests/test_standalone_hdf.py +++ b/tests/test_standalone_hdf.py @@ -26,7 +26,7 @@ def my_funct(a, b): ) class TestSharedFunctions(unittest.TestCase): def test_hdf_mixed(self): - cache_directory = os.path.abspath("cache") + cache_directory = os.path.abspath("executorlib_cache") os.makedirs(cache_directory, exist_ok=True) file_name = os.path.join(cache_directory, "test_mixed.h5") a = 1 @@ -46,7 +46,7 @@ def test_hdf_mixed(self): self.assertIsNone(output) def test_hdf_args(self): - cache_directory = os.path.abspath("cache") + cache_directory = os.path.abspath("executorlib_cache") os.makedirs(cache_directory, exist_ok=True) file_name = os.path.join(cache_directory, "test_args.h5") a = 1 @@ -63,7 +63,7 @@ def test_hdf_args(self): self.assertIsNone(output) def test_hdf_kwargs(self): - cache_directory = os.path.abspath("cache") + cache_directory = os.path.abspath("executorlib_cache") os.makedirs(cache_directory, exist_ok=True) file_name = os.path.join(cache_directory, "test_kwargs.h5") a = 1 @@ -89,7 +89,7 @@ def test_hdf_kwargs(self): self.assertIsNone(output) def test_hdf_queue_id(self): - cache_directory = os.path.abspath("cache") + cache_directory = os.path.abspath("executorlib_cache") os.makedirs(cache_directory, exist_ok=True) file_name = os.path.join(cache_directory, "test_queue.h5") queue_id = 123 @@ -105,7 +105,7 @@ def test_hdf_queue_id(self): self.assertIsNone(output) def test_hdf_error(self): - cache_directory = os.path.abspath("cache") + cache_directory = os.path.abspath("executorlib_cache") os.makedirs(cache_directory, exist_ok=True) file_name = os.path.join(cache_directory, "test_error.h5") error = ValueError() @@ -120,5 +120,4 @@ def test_hdf_error(self): self.assertTrue(isinstance(output, error.__class__)) def tearDown(self): - if os.path.exists("cache"): - shutil.rmtree("cache") + shutil.rmtree("executorlib_cache", ignore_errors=True)