5050from concurrent .futures import _base
5151import queue
5252from queue import Full
53- import multiprocessing
54- from multiprocessing import SimpleQueue
53+ import multiprocessing as mp
5554from multiprocessing .connection import wait
5655import threading
5756import weakref
7473# threads/processes finish.
7574
7675_threads_queues = weakref .WeakKeyDictionary ()
77- _shutdown = False
76+ _global_shutdown = False
7877
7978def _python_exit ():
80- global _shutdown
81- _shutdown = True
79+ global _global_shutdown
80+ _global_shutdown = True
8281 items = list (_threads_queues .items ())
8382 for t , q in items :
8483 q .put (None )
@@ -158,12 +157,10 @@ def _process_worker(call_queue, result_queue):
158157 This worker is run in a separate process.
159158
160159 Args:
161- call_queue: A multiprocessing .Queue of _CallItems that will be read and
160+ call_queue: A ctx .Queue of _CallItems that will be read and
162161 evaluated by the worker.
163- result_queue: A multiprocessing .Queue of _ResultItems that will written
162+ result_queue: A ctx .Queue of _ResultItems that will written
164163 to by the worker.
165- shutdown: A multiprocessing.Event that will be set as a signal to the
166- worker that it should exit when call_queue is empty.
167164 """
168165 while True :
169166 call_item = call_queue .get (block = True )
@@ -180,6 +177,11 @@ def _process_worker(call_queue, result_queue):
180177 result_queue .put (_ResultItem (call_item .work_id ,
181178 result = r ))
182179
180+ # Liberate the resource as soon as possible, to avoid holding onto
181+ # open files or shared memory that is not needed anymore
182+ del call_item
183+
184+
183185def _add_call_item_to_queue (pending_work_items ,
184186 work_ids ,
185187 call_queue ):
@@ -231,20 +233,21 @@ def _queue_management_worker(executor_reference,
231233 executor_reference: A weakref.ref to the ProcessPoolExecutor that owns
232234 this thread. Used to determine if the ProcessPoolExecutor has been
233235 garbage collected and that this function can exit.
234- process: A list of the multiprocessing .Process instances used as
236+ process: A list of the ctx .Process instances used as
235237 workers.
236238 pending_work_items: A dict mapping work ids to _WorkItems e.g.
237239 {5: <_WorkItem...>, 6: <_WorkItem...>, ...}
238240 work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]).
239- call_queue: A multiprocessing .Queue that will be filled with _CallItems
241+ call_queue: A ctx .Queue that will be filled with _CallItems
240242 derived from _WorkItems for processing by the process workers.
241- result_queue: A multiprocessing.Queue of _ResultItems generated by the
243+ result_queue: A ctx.SimpleQueue of _ResultItems generated by the
242244 process workers.
243245 """
244246 executor = None
245247
246248 def shutting_down ():
247- return _shutdown or executor is None or executor ._shutdown_thread
249+ return (_global_shutdown or executor is None
250+ or executor ._shutdown_thread )
248251
249252 def shutdown_worker ():
250253 # This is an upper bound
@@ -254,7 +257,7 @@ def shutdown_worker():
254257 # Release the queue's resources as soon as possible.
255258 call_queue .close ()
256259 # If .join() is not called on the created processes then
257- # some multiprocessing .Queue methods may deadlock on Mac OS X.
260+ # some ctx .Queue methods may deadlock on Mac OS X.
258261 for p in processes .values ():
259262 p .join ()
260263
@@ -377,13 +380,15 @@ class BrokenProcessPool(RuntimeError):
377380
378381
379382class ProcessPoolExecutor (_base .Executor ):
380- def __init__ (self , max_workers = None ):
383+ def __init__ (self , max_workers = None , mp_context = None ):
381384 """Initializes a new ProcessPoolExecutor instance.
382385
383386 Args:
384387 max_workers: The maximum number of processes that can be used to
385388 execute the given calls. If None or not given then as many
386389 worker processes will be created as the machine has processors.
390+ mp_context: A multiprocessing context to launch the workers. This
391+ object should provide SimpleQueue, Queue and Process.
387392 """
388393 _check_system_limits ()
389394
@@ -394,17 +399,20 @@ def __init__(self, max_workers=None):
394399 raise ValueError ("max_workers must be greater than 0" )
395400
396401 self ._max_workers = max_workers
402+ if mp_context is None :
403+ mp_context = mp .get_context ()
404+ self ._mp_context = mp_context
397405
398406 # Make the call queue slightly larger than the number of processes to
399407 # prevent the worker processes from idling. But don't make it too big
400408 # because futures in the call queue cannot be cancelled.
401- self . _call_queue = multiprocessing . Queue ( self ._max_workers +
402- EXTRA_QUEUED_CALLS )
409+ queue_size = self ._max_workers + EXTRA_QUEUED_CALLS
410+ self . _call_queue = mp_context . Queue ( queue_size )
403411 # Killed worker processes can produce spurious "broken pipe"
404412 # tracebacks in the queue's own worker thread. But we detect killed
405413 # processes anyway, so silence the tracebacks.
406414 self ._call_queue ._ignore_epipe = True
407- self ._result_queue = SimpleQueue ()
415+ self ._result_queue = mp_context . SimpleQueue ()
408416 self ._work_ids = queue .Queue ()
409417 self ._queue_management_thread = None
410418 # Map of pids to processes
@@ -426,23 +434,23 @@ def weakref_cb(_, q=self._result_queue):
426434 # Start the processes so that their sentinels are known.
427435 self ._adjust_process_count ()
428436 self ._queue_management_thread = threading .Thread (
429- target = _queue_management_worker ,
430- args = (weakref .ref (self , weakref_cb ),
431- self ._processes ,
432- self ._pending_work_items ,
433- self ._work_ids ,
434- self ._call_queue ,
435- self ._result_queue ))
437+ target = _queue_management_worker ,
438+ args = (weakref .ref (self , weakref_cb ),
439+ self ._processes ,
440+ self ._pending_work_items ,
441+ self ._work_ids ,
442+ self ._call_queue ,
443+ self ._result_queue ))
436444 self ._queue_management_thread .daemon = True
437445 self ._queue_management_thread .start ()
438446 _threads_queues [self ._queue_management_thread ] = self ._result_queue
439447
440448 def _adjust_process_count (self ):
441449 for _ in range (len (self ._processes ), self ._max_workers ):
442- p = multiprocessing .Process (
443- target = _process_worker ,
444- args = (self ._call_queue ,
445- self ._result_queue ))
450+ p = self . _mp_context .Process (
451+ target = _process_worker ,
452+ args = (self ._call_queue ,
453+ self ._result_queue ))
446454 p .start ()
447455 self ._processes [p .pid ] = p
448456
0 commit comments