26
26
PENDING , RUNNING , CANCELLED , CANCELLED_AND_NOTIFIED , FINISHED , Future ,
27
27
BrokenExecutor )
28
28
from concurrent .futures .process import BrokenProcessPool
29
- from multiprocessing import get_context
30
29
31
30
import multiprocessing .process
32
31
import multiprocessing .util
32
+ import multiprocessing as mp
33
33
34
34
35
35
if support .check_sanitizer (address = True , memory = True ):
@@ -131,7 +131,6 @@ def setUp(self):
131
131
self .executor = self .executor_type (
132
132
max_workers = self .worker_count ,
133
133
** self .executor_kwargs )
134
- self ._prime_executor ()
135
134
136
135
def tearDown (self ):
137
136
self .executor .shutdown (wait = True )
@@ -145,15 +144,7 @@ def tearDown(self):
145
144
super ().tearDown ()
146
145
147
146
def get_context (self ):
148
- return get_context (self .ctx )
149
-
150
- def _prime_executor (self ):
151
- # Make sure that the executor is ready to do work before running the
152
- # tests. This should reduce the probability of timeouts in the tests.
153
- futures = [self .executor .submit (time .sleep , 0.1 )
154
- for _ in range (self .worker_count )]
155
- for f in futures :
156
- f .result ()
147
+ return mp .get_context (self .ctx )
157
148
158
149
159
150
class ThreadPoolMixin (ExecutorMixin ):
@@ -261,9 +252,6 @@ def test_initializer(self):
261
252
with self .assertRaises (BrokenExecutor ):
262
253
self .executor .submit (get_init_status )
263
254
264
- def _prime_executor (self ):
265
- pass
266
-
267
255
@contextlib .contextmanager
268
256
def _assert_logged (self , msg ):
269
257
if self .log_queue is not None :
@@ -350,14 +338,14 @@ def test_hang_issue12364(self):
350
338
f .result ()
351
339
352
340
def test_cancel_futures (self ):
353
- executor = self .executor_type ( max_workers = 3 )
354
- fs = [executor .submit (time .sleep , .1 ) for _ in range (50 )]
355
- executor .shutdown (cancel_futures = True )
341
+ assert self .worker_count <= 5 , "test needs few workers"
342
+ fs = [self . executor .submit (time .sleep , .1 ) for _ in range (50 )]
343
+ self . executor .shutdown (cancel_futures = True )
356
344
# We can't guarantee the exact number of cancellations, but we can
357
- # guarantee that *some* were cancelled. With setting max_workers to 3,
358
- # most of the submitted futures should have been cancelled.
345
+ # guarantee that *some* were cancelled. With few workers, many of
346
+ # the submitted futures should have been cancelled.
359
347
cancelled = [fut for fut in fs if fut .cancelled ()]
360
- self .assertTrue (len (cancelled ) >= 35 , msg = f" { len ( cancelled ) = } " )
348
+ self .assertGreater (len (cancelled ), 20 )
361
349
362
350
# Ensure the other futures were able to finish.
363
351
# Use "not fut.cancelled()" instead of "fut.done()" to include futures
@@ -370,33 +358,32 @@ def test_cancel_futures(self):
370
358
# Similar to the number of cancelled futures, we can't guarantee the
371
359
# exact number that completed. But, we can guarantee that at least
372
360
# one finished.
373
- self .assertTrue (len (others ) > 0 , msg = f" { len ( others ) = } " )
361
+ self .assertGreater (len (others ), 0 )
374
362
375
- def test_hang_issue39205 (self ):
363
+ def test_hang_gh83386 (self ):
376
364
"""shutdown(wait=False) doesn't hang at exit with running futures.
377
365
378
- See https://bugs. python.org/issue39205 .
366
+ See https://github.com/ python/cpython/issues/83386 .
379
367
"""
380
368
if self .executor_type == futures .ProcessPoolExecutor :
381
369
raise unittest .SkipTest (
382
- "Hangs due to https://bugs. python.org/issue39205 " )
370
+ "Hangs, see https://github.com/ python/cpython/issues/83386 " )
383
371
384
372
rc , out , err = assert_python_ok ('-c' , """if True:
385
373
from concurrent.futures import {executor_type}
386
374
from test.test_concurrent_futures import sleep_and_print
387
375
if __name__ == "__main__":
376
+ if {context!r}: multiprocessing.set_start_method({context!r})
388
377
t = {executor_type}(max_workers=3)
389
378
t.submit(sleep_and_print, 1.0, "apple")
390
379
t.shutdown(wait=False)
391
- """ .format (executor_type = self .executor_type .__name__ ))
380
+ """ .format (executor_type = self .executor_type .__name__ ,
381
+ context = getattr (self , 'ctx' , None )))
392
382
self .assertFalse (err )
393
383
self .assertEqual (out .strip (), b"apple" )
394
384
395
385
396
386
class ThreadPoolShutdownTest (ThreadPoolMixin , ExecutorShutdownTest , BaseTestCase ):
397
- def _prime_executor (self ):
398
- pass
399
-
400
387
def test_threads_terminate (self ):
401
388
def acquire_lock (lock ):
402
389
lock .acquire ()
@@ -491,14 +478,11 @@ def test_cancel_futures_wait_false(self):
491
478
492
479
493
480
class ProcessPoolShutdownTest (ExecutorShutdownTest ):
494
- def _prime_executor (self ):
495
- pass
496
-
497
481
def test_processes_terminate (self ):
498
482
def acquire_lock (lock ):
499
483
lock .acquire ()
500
484
501
- mp_context = get_context ()
485
+ mp_context = self . get_context ()
502
486
sem = mp_context .Semaphore (0 )
503
487
for _ in range (3 ):
504
488
self .executor .submit (acquire_lock , sem )
@@ -512,7 +496,8 @@ def acquire_lock(lock):
512
496
p .join ()
513
497
514
498
def test_context_manager_shutdown (self ):
515
- with futures .ProcessPoolExecutor (max_workers = 5 ) as e :
499
+ with futures .ProcessPoolExecutor (
500
+ max_workers = 5 , mp_context = self .get_context ()) as e :
516
501
processes = e ._processes
517
502
self .assertEqual (list (e .map (abs , range (- 5 , 5 ))),
518
503
[5 , 4 , 3 , 2 , 1 , 0 , 1 , 2 , 3 , 4 ])
@@ -521,7 +506,8 @@ def test_context_manager_shutdown(self):
521
506
p .join ()
522
507
523
508
def test_del_shutdown (self ):
524
- executor = futures .ProcessPoolExecutor (max_workers = 5 )
509
+ executor = futures .ProcessPoolExecutor (
510
+ max_workers = 5 , mp_context = self .get_context ())
525
511
res = executor .map (abs , range (- 5 , 5 ))
526
512
executor_manager_thread = executor ._executor_manager_thread
527
513
processes = executor ._processes
@@ -544,7 +530,8 @@ def test_del_shutdown(self):
544
530
def test_shutdown_no_wait (self ):
545
531
# Ensure that the executor cleans up the processes when calling
546
532
# shutdown with wait=False
547
- executor = futures .ProcessPoolExecutor (max_workers = 5 )
533
+ executor = futures .ProcessPoolExecutor (
534
+ max_workers = 5 , mp_context = self .get_context ())
548
535
res = executor .map (abs , range (- 5 , 5 ))
549
536
processes = executor ._processes
550
537
call_queue = executor ._call_queue
@@ -921,7 +908,7 @@ def submit(pool):
921
908
pool .submit (submit , pool )
922
909
923
910
for _ in range (50 ):
924
- with futures .ProcessPoolExecutor (1 , mp_context = get_context ('fork' )) as workers :
911
+ with futures .ProcessPoolExecutor (1 , mp_context = mp . get_context ('fork' )) as workers :
925
912
workers .submit (tuple )
926
913
927
914
@@ -991,7 +978,7 @@ def test_traceback(self):
991
978
def test_ressources_gced_in_workers (self ):
992
979
# Ensure that argument for a job are correctly gc-ed after the job
993
980
# is finished
994
- mgr = get_context ( self .ctx ).Manager ()
981
+ mgr = self .get_context ( ).Manager ()
995
982
obj = EventfulGCObj (mgr )
996
983
future = self .executor .submit (id , obj )
997
984
future .result ()
@@ -1007,36 +994,37 @@ def test_ressources_gced_in_workers(self):
1007
994
mgr .join ()
1008
995
1009
996
def test_saturation (self ):
1010
- executor = self .executor_type ( 4 )
1011
- mp_context = get_context ()
997
+ executor = self .executor
998
+ mp_context = self . get_context ()
1012
999
sem = mp_context .Semaphore (0 )
1013
1000
job_count = 15 * executor ._max_workers
1014
- try :
1015
- for _ in range (job_count ):
1016
- executor .submit (sem .acquire )
1017
- self .assertEqual (len (executor ._processes ), executor ._max_workers )
1018
- for _ in range (job_count ):
1019
- sem .release ()
1020
- finally :
1021
- executor .shutdown ()
1001
+ for _ in range (job_count ):
1002
+ executor .submit (sem .acquire )
1003
+ self .assertEqual (len (executor ._processes ), executor ._max_workers )
1004
+ for _ in range (job_count ):
1005
+ sem .release ()
1022
1006
1023
1007
def test_idle_process_reuse_one (self ):
1024
- executor = self .executor_type (4 )
1008
+ executor = self .executor
1009
+ assert executor ._max_workers >= 4
1025
1010
executor .submit (mul , 21 , 2 ).result ()
1026
1011
executor .submit (mul , 6 , 7 ).result ()
1027
1012
executor .submit (mul , 3 , 14 ).result ()
1028
1013
self .assertEqual (len (executor ._processes ), 1 )
1029
- executor .shutdown ()
1030
1014
1031
1015
def test_idle_process_reuse_multiple (self ):
1032
- executor = self .executor_type (4 )
1016
+ executor = self .executor
1017
+ assert executor ._max_workers <= 5
1033
1018
executor .submit (mul , 12 , 7 ).result ()
1034
1019
executor .submit (mul , 33 , 25 )
1035
1020
executor .submit (mul , 25 , 26 ).result ()
1036
1021
executor .submit (mul , 18 , 29 )
1037
- self .assertLessEqual (len (executor ._processes ), 2 )
1022
+ executor .submit (mul , 1 , 2 ).result ()
1023
+ executor .submit (mul , 0 , 9 )
1024
+ self .assertLessEqual (len (executor ._processes ), 3 )
1038
1025
executor .shutdown ()
1039
1026
1027
+
1040
1028
create_executor_tests (ProcessPoolExecutorTest ,
1041
1029
executor_mixins = (ProcessPoolForkMixin ,
1042
1030
ProcessPoolForkserverMixin ,
@@ -1138,7 +1126,7 @@ def _check_crash(self, error, func, *args, ignore_stderr=False):
1138
1126
self .executor .shutdown (wait = True )
1139
1127
1140
1128
executor = self .executor_type (
1141
- max_workers = 2 , mp_context = get_context ( self .ctx ))
1129
+ max_workers = 2 , mp_context = self .get_context ( ))
1142
1130
res = executor .submit (func , * args )
1143
1131
1144
1132
if ignore_stderr :
@@ -1217,7 +1205,7 @@ def test_shutdown_deadlock(self):
1217
1205
# if a worker fails after the shutdown call.
1218
1206
self .executor .shutdown (wait = True )
1219
1207
with self .executor_type (max_workers = 2 ,
1220
- mp_context = get_context ( self .ctx )) as executor :
1208
+ mp_context = self .get_context ( )) as executor :
1221
1209
self .executor = executor # Allow clean up in fail_on_deadlock
1222
1210
f = executor .submit (_crash , delay = .1 )
1223
1211
executor .shutdown (wait = True )
@@ -1230,7 +1218,7 @@ def test_shutdown_deadlock_pickle(self):
1230
1218
# Reported in bpo-39104.
1231
1219
self .executor .shutdown (wait = True )
1232
1220
with self .executor_type (max_workers = 2 ,
1233
- mp_context = get_context ( self .ctx )) as executor :
1221
+ mp_context = self .get_context ( )) as executor :
1234
1222
self .executor = executor # Allow clean up in fail_on_deadlock
1235
1223
1236
1224
# Start the executor and get the executor_manager_thread to collect
0 commit comments