mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Add torch.multiprocessing.spawn helper (#13518)
Summary: This helper addresses a common pattern where one spawns N processes to work on some common task (e.g. parallel preprocessing or multiple training loops). A straightforward approach is to use the multiprocessing API directly and then consecutively call join on the resulting processes. This pattern breaks down in the face of errors. If one of the processes terminates with an exception or via some signal, and it is not the first process that was launched, the join call on the first process won't be affected. This helper seeks to solve this by waiting on termination from any of the spawned processes. When any process terminates with a non-zero exit status, it terminates the remaining processes, and raises an exception in the parent process. If the process terminated with an exception, it is propagated to the parent. If the process terminated via a signal (e.g. SIGINT, SIGSEGV), this is mentioned in the exception as well. Requires Python >= 3.4. Pull Request resolved: https://github.com/pytorch/pytorch/pull/13518 Reviewed By: orionr Differential Revision: D12929045 Pulled By: pietern fbshipit-source-id: 00df19fa16a568d1e22f37a2ba65677ab0cce3fd
This commit is contained in:
committed by
Facebook Github Bot
parent
056f2cd238
commit
be424de869
@ -30,6 +30,7 @@ TESTS = [
|
||||
'indexing',
|
||||
'jit',
|
||||
'multiprocessing',
|
||||
'multiprocessing_spawn',
|
||||
'nccl',
|
||||
'nn',
|
||||
'numba_integration',
|
||||
|
123
test/test_multiprocessing_spawn.py
Normal file
123
test/test_multiprocessing_spawn.py
Normal file
@ -0,0 +1,123 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
import os
|
||||
import random
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
import unittest
|
||||
|
||||
from common_utils import (TestCase, run_tests, IS_WINDOWS, NO_MULTIPROCESSING_SPAWN)
|
||||
import torch.multiprocessing as mp
|
||||
|
||||
|
||||
def test_success_func(i):
|
||||
pass
|
||||
|
||||
|
||||
def test_success_single_arg_func(i, arg):
|
||||
if arg:
|
||||
arg.put(i)
|
||||
|
||||
|
||||
def test_exception_single_func(i, arg):
|
||||
if i == arg:
|
||||
raise ValueError("legitimate exception from process %d" % i)
|
||||
time.sleep(1.0)
|
||||
|
||||
|
||||
def test_exception_all_func(i):
|
||||
time.sleep(random.random() / 10)
|
||||
raise ValueError("legitimate exception from process %d" % i)
|
||||
|
||||
|
||||
def test_terminate_signal_func(i):
|
||||
if i == 0:
|
||||
os.kill(os.getpid(), signal.SIGABRT)
|
||||
time.sleep(1.0)
|
||||
|
||||
|
||||
def test_terminate_exit_func(i, arg):
|
||||
if i == 0:
|
||||
sys.exit(arg)
|
||||
time.sleep(1.0)
|
||||
|
||||
|
||||
def test_success_first_then_exception_func(i, arg):
|
||||
if i == 0:
|
||||
return
|
||||
time.sleep(0.1)
|
||||
raise ValueError("legitimate exception")
|
||||
|
||||
|
||||
@unittest.skipIf(
|
||||
NO_MULTIPROCESSING_SPAWN,
|
||||
"Disabled for environments that don't support the spawn start method")
|
||||
class SpawnTest(TestCase):
|
||||
def test_success(self):
|
||||
mp.spawn(test_success_func, nprocs=2)
|
||||
|
||||
def test_success_non_blocking(self):
|
||||
spawn_context = mp.spawn(test_success_func, nprocs=2, join=False)
|
||||
|
||||
# After all processes (nproc=2) have joined it must return True
|
||||
spawn_context.join(timeout=None)
|
||||
spawn_context.join(timeout=None)
|
||||
self.assertTrue(spawn_context.join(timeout=None))
|
||||
|
||||
def test_first_argument_index(self):
|
||||
context = mp.get_context("spawn")
|
||||
queue = context.SimpleQueue()
|
||||
mp.spawn(test_success_single_arg_func, args=(queue,), nprocs=2)
|
||||
self.assertEqual([0, 1], sorted([queue.get(), queue.get()]))
|
||||
|
||||
def test_exception_single(self):
|
||||
nprocs = 2
|
||||
for i in range(nprocs):
|
||||
with self.assertRaisesRegex(
|
||||
Exception,
|
||||
"\nValueError: legitimate exception from process %d$" % i,
|
||||
):
|
||||
mp.spawn(test_exception_single_func, args=(i,), nprocs=nprocs)
|
||||
|
||||
def test_exception_all(self):
|
||||
with self.assertRaisesRegex(
|
||||
Exception,
|
||||
"\nValueError: legitimate exception from process (0|1)$",
|
||||
):
|
||||
mp.spawn(test_exception_all_func, nprocs=2)
|
||||
|
||||
def test_terminate_signal(self):
|
||||
# SIGABRT is aliased with SIGIOT
|
||||
message = "process 0 terminated with signal (SIGABRT|SIGIOT)"
|
||||
|
||||
# Termination through with signal is expressed as a negative exit code
|
||||
# in multiprocessing, so we know it was a signal that caused the exit.
|
||||
# This doesn't appear to exist on Windows, where the exit code is always
|
||||
# positive, and therefore results in a different exception message.
|
||||
# Exit code 22 means "ERROR_BAD_COMMAND".
|
||||
if IS_WINDOWS:
|
||||
message = "process 0 terminated with exit code 22"
|
||||
|
||||
with self.assertRaisesRegex(Exception, message):
|
||||
mp.spawn(test_terminate_signal_func, nprocs=2)
|
||||
|
||||
def test_terminate_exit(self):
|
||||
exitcode = 123
|
||||
with self.assertRaisesRegex(
|
||||
Exception,
|
||||
"process 0 terminated with exit code %d" % exitcode,
|
||||
):
|
||||
mp.spawn(test_terminate_exit_func, args=(exitcode,), nprocs=2)
|
||||
|
||||
def test_success_first_then_exception(self):
|
||||
exitcode = 123
|
||||
with self.assertRaisesRegex(
|
||||
Exception,
|
||||
"ValueError: legitimate exception",
|
||||
):
|
||||
mp.spawn(test_success_first_then_exception_func, args=(exitcode,), nprocs=2)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_tests()
|
@ -34,6 +34,12 @@ if sys.version_info < (3, 3):
|
||||
from .pool import Pool
|
||||
|
||||
|
||||
if sys.version_info >= (3, 4):
|
||||
"""Add helper function to spawn N processes and wait for completion of any of
|
||||
them. This depends `mp.get_context` which was added in Python 3.4."""
|
||||
from .spawn import spawn
|
||||
|
||||
|
||||
if sys.platform == 'darwin' or sys.platform == 'win32':
|
||||
_sharing_strategy = 'file_system'
|
||||
_all_sharing_strategies = {'file_system'}
|
||||
|
138
torch/multiprocessing/spawn.py
Normal file
138
torch/multiprocessing/spawn.py
Normal file
@ -0,0 +1,138 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
import multiprocessing
|
||||
import multiprocessing.connection
|
||||
import signal
|
||||
import sys
|
||||
|
||||
|
||||
def _wrap(fn, i, args, error_queue):
|
||||
try:
|
||||
fn(i, *args)
|
||||
except KeyboardInterrupt:
|
||||
pass # SIGINT; Killed by parent, do nothing
|
||||
except Exception:
|
||||
# Propagate exception to parent process, keeping original traceback
|
||||
import traceback
|
||||
error_queue.put(traceback.format_exc())
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
class SpawnContext:
|
||||
def __init__(self, processes, error_queues):
|
||||
self.error_queues = error_queues
|
||||
self.processes = processes
|
||||
self.sentinels = {
|
||||
process.sentinel: index
|
||||
for index, process in enumerate(processes)
|
||||
}
|
||||
|
||||
def join(self, timeout=None):
|
||||
r"""
|
||||
Tries to join one or more processes in this spawn context.
|
||||
If one of them exited with a non-zero exit status, this function
|
||||
kills the remaining processes and raises an exception with the cause
|
||||
of the first process exiting.
|
||||
|
||||
Returns ``True`` if all processes have been joined successfully,
|
||||
``False`` if there are more processes that need to be joined.
|
||||
|
||||
Arguments:
|
||||
timeout (float): Wait this long before giving up on waiting.
|
||||
"""
|
||||
# Ensure this function can be called even when we're done.
|
||||
if len(self.sentinels) == 0:
|
||||
return True
|
||||
|
||||
# Wait for any process to fail or all of them to succeed.
|
||||
ready = multiprocessing.connection.wait(
|
||||
self.sentinels.keys(),
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
error_index = None
|
||||
for sentinel in ready:
|
||||
index = self.sentinels.pop(sentinel)
|
||||
process = self.processes[index]
|
||||
process.join()
|
||||
if process.exitcode != 0:
|
||||
error_index = index
|
||||
break
|
||||
|
||||
# Return if there was no error.
|
||||
if error_index is None:
|
||||
# Return whether or not all processes have been joined.
|
||||
return len(self.sentinels) == 0
|
||||
|
||||
# Assume failure. Terminate processes that are still alive.
|
||||
for process in self.processes:
|
||||
if process.is_alive():
|
||||
process.terminate()
|
||||
process.join()
|
||||
|
||||
# There won't be an error on the queue if the process crashed.
|
||||
if self.error_queues[error_index].empty():
|
||||
exitcode = self.processes[error_index].exitcode
|
||||
if exitcode < 0:
|
||||
name = signal.Signals(-exitcode).name
|
||||
raise Exception(
|
||||
"process %d terminated with signal %s" %
|
||||
(error_index, name)
|
||||
)
|
||||
else:
|
||||
raise Exception(
|
||||
"process %d terminated with exit code %d" %
|
||||
(error_index, exitcode)
|
||||
)
|
||||
|
||||
original_trace = self.error_queues[error_index].get()
|
||||
msg = "\n\n-- Process %d terminated with the following error:\n" % error_index
|
||||
msg += original_trace
|
||||
raise Exception(msg)
|
||||
|
||||
|
||||
def spawn(fn, args=(), nprocs=1, join=True):
|
||||
r"""Spawns ``nprocs`` processes that run ``fn`` with ``args``.
|
||||
|
||||
If one of the processes exits with a non-zero exit status, the
|
||||
remaining processes are killed and an exception is raised with the
|
||||
cause of termination. In the case an exception was caught in the
|
||||
child process, it is forwarded and its traceback is included in
|
||||
the exception raised in the parent process.
|
||||
|
||||
Arguments:
|
||||
fn (function): Function is called as the entrypoint of the
|
||||
spawned process. This function must be defined at the top
|
||||
level of a module so it can be pickled and spawned. This
|
||||
is a requirement imposed by multiprocessing.
|
||||
|
||||
The function is called as ``fn(i, *args)``, where ``i`` is
|
||||
the process index and ``args`` is the passed through tuple
|
||||
of arguments.
|
||||
|
||||
args (tuple): Arguments passed to ``fn``.
|
||||
nprocs (int): Number of processes to spawn.
|
||||
join (bool): Perform a blocking join on all processes.
|
||||
|
||||
"""
|
||||
mp = multiprocessing.get_context('spawn')
|
||||
error_queues = []
|
||||
processes = []
|
||||
for i in range(nprocs):
|
||||
error_queue = mp.SimpleQueue()
|
||||
process = mp.Process(
|
||||
target=_wrap,
|
||||
args=(fn, i, args, error_queue),
|
||||
daemon=True,
|
||||
)
|
||||
process.start()
|
||||
error_queues.append(error_queue)
|
||||
processes.append(process)
|
||||
|
||||
spawn_context = SpawnContext(processes, error_queues)
|
||||
if not join:
|
||||
return spawn_context
|
||||
|
||||
# Loop on join until it returns True or raises an exception.
|
||||
while not spawn_context.join():
|
||||
pass
|
Reference in New Issue
Block a user