mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Remove redundant code for unsupported Python versions (#49486)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/49486 Remove code for Python 3.5 and lower. There's more that can be removed/modernised, but sticking mainly to redundant version checks here, to keep the diff/PR smaller. Pull Request resolved: https://github.com/pytorch/pytorch/pull/46579 Reviewed By: zou3519 Differential Revision: D24453571 Pulled By: ezyang fbshipit-source-id: c2cfcf05d6c5f65df64d89c331692c9aec09248e
This commit is contained in:
committed by
Facebook GitHub Bot
parent
09eb468398
commit
473e78c0fa
@ -9,11 +9,6 @@ pip install -q hypothesis "librosa>=0.6.2" "numba<=0.49.1" psutil
|
||||
# TODO move this to docker
|
||||
pip install unittest-xml-reporting pytest
|
||||
|
||||
# faulthandler become built-in since 3.3
|
||||
if [[ ! $(python -c "import sys; print(int(sys.version_info >= (3, 3)))") == "1" ]]; then
|
||||
pip install -q faulthandler
|
||||
fi
|
||||
|
||||
if [ -z "${IN_CI}" ]; then
|
||||
rm -rf ${WORKSPACE_DIR}/miniconda3/lib/python3.6/site-packages/torch*
|
||||
fi
|
||||
|
@ -41,8 +41,6 @@ popd
|
||||
:: The version is fixed to avoid flakiness: https://github.com/pytorch/pytorch/issues/31136
|
||||
pip install "ninja==1.10.0.post1" future "hypothesis==4.53.2" "librosa>=0.6.2" psutil pillow unittest-xml-reporting pytest coverage
|
||||
if %errorlevel% neq 0 ( exit /b %errorlevel% )
|
||||
:: No need to install faulthandler since we only test Python >= 3.6 on Windows
|
||||
:: faulthandler is builtin since Python 3.3
|
||||
|
||||
set DISTUTILS_USE_SDK=1
|
||||
|
||||
|
@ -1,8 +0,0 @@
|
||||
from six import PY2, PY3
|
||||
|
||||
if PY2:
|
||||
import collections
|
||||
container_abcs = collections
|
||||
elif PY3:
|
||||
import collections.abc
|
||||
container_abcs = collections.abc
|
@ -17,7 +17,6 @@ import itertools
|
||||
# system protobuf.
|
||||
import onnx.backend
|
||||
from caffe2.python import core, workspace, rnn_cell, gru_cell
|
||||
from caffe2.python.compatibility import container_abcs
|
||||
from caffe2.python.model_helper import ModelHelper
|
||||
from caffe2.proto import caffe2_pb2
|
||||
import caffe2.python.utils
|
||||
@ -771,7 +770,7 @@ class Caffe2Backend(Backend):
|
||||
ops = translator(init_model, pred_model, OnnxNode(node_def), opset_version)
|
||||
if isinstance(ops, Caffe2Ops):
|
||||
return ops
|
||||
if not isinstance(ops, container_abcs.Iterable):
|
||||
if not isinstance(ops, collections.abc.Iterable):
|
||||
ops = [ops]
|
||||
return Caffe2Ops(ops, [], [])
|
||||
|
||||
|
@ -10,13 +10,12 @@ To run this, you will need to have Caffe2 installed as well.
|
||||
|
||||
|
||||
|
||||
|
||||
import collections
|
||||
import itertools
|
||||
import logging
|
||||
import re
|
||||
|
||||
from caffe2.python import core as caffe2_core
|
||||
from caffe2.python.compatibility import container_abcs
|
||||
from onnx import (checker, helper, numpy_helper, mapping,
|
||||
GraphProto, NodeProto, TensorProto, OperatorSetIdProto)
|
||||
from onnx.helper import make_tensor_value_info, make_model
|
||||
@ -153,7 +152,7 @@ class Caffe2Frontend(object):
|
||||
const_tensors = []
|
||||
if isinstance(nodes, tuple):
|
||||
nodes, const_tensors = nodes
|
||||
if not isinstance(nodes, container_abcs.Iterable):
|
||||
if not isinstance(nodes, collections.abc.Iterable):
|
||||
nodes = [nodes]
|
||||
return nodes, const_tensors
|
||||
|
||||
|
@ -232,7 +232,6 @@ class TensorFeeder : public BlobFeederBase {
|
||||
for (int i = 0; i < tensor.numel(); ++i) {
|
||||
char* str;
|
||||
Py_ssize_t strSize;
|
||||
#if PY_MAJOR_VERSION > 2
|
||||
if (PyBytes_Check(input[i])) {
|
||||
CAFFE_ENFORCE(
|
||||
PyBytes_AsStringAndSize(input[i], &str, &strSize) != -1,
|
||||
@ -246,11 +245,6 @@ class TensorFeeder : public BlobFeederBase {
|
||||
} else {
|
||||
CAFFE_THROW("Unsupported python object type passed into ndarray.");
|
||||
}
|
||||
#else
|
||||
CAFFE_ENFORCE(
|
||||
PyBytes_AsStringAndSize(input[i], &str, &strSize) != -1,
|
||||
"Unsupported python object type passed into ndarray.");
|
||||
#endif // PY_MAJOR_VERSION > 2
|
||||
outPtr[i] = std::string(str, strSize);
|
||||
}
|
||||
break;
|
||||
@ -342,18 +336,12 @@ class PythonOpBase : public Operator<Context> {
|
||||
try {
|
||||
builder_call = loads(py::bytes(pickled)).cast<py::tuple>();
|
||||
} catch (const py::error_already_set& e) {
|
||||
#if PY_MAJOR_VERSION >= 3
|
||||
LOG(INFO) << "Cannot unpickle python operator: " << e.what();
|
||||
LOG(INFO) << "Try latin1 encoding for python3 run";
|
||||
// to use the `_a` literal for arguments
|
||||
using namespace pybind11::literals;
|
||||
builder_call = loads(py::bytes(pickled), "encoding"_a = "latin1")
|
||||
.template cast<py::tuple>();
|
||||
#else
|
||||
// for py2, simply re-throw the exception, as there is no encoding
|
||||
// argument for pickle.loads
|
||||
throw;
|
||||
#endif
|
||||
}
|
||||
CAFFE_ENFORCE(builder_call);
|
||||
CAFFE_ENFORCE_EQ(py::len(builder_call), 3);
|
||||
|
@ -6,12 +6,12 @@
|
||||
|
||||
|
||||
from caffe2.proto import caffe2_pb2
|
||||
from caffe2.python.compatibility import container_abcs
|
||||
from future.utils import viewitems
|
||||
from google.protobuf.message import DecodeError, Message
|
||||
from google.protobuf import text_format
|
||||
|
||||
import sys
|
||||
import collections
|
||||
import copy
|
||||
import functools
|
||||
import numpy as np
|
||||
@ -126,7 +126,7 @@ def MakeArgument(key, value):
|
||||
"""Makes an argument based on the value type."""
|
||||
argument = caffe2_pb2.Argument()
|
||||
argument.name = key
|
||||
iterable = isinstance(value, container_abcs.Iterable)
|
||||
iterable = isinstance(value, collections.abc.Iterable)
|
||||
|
||||
# Fast tracking common use case where a float32 array of tensor parameters
|
||||
# needs to be serialized. The entire array is guaranteed to have the same
|
||||
|
@ -135,11 +135,6 @@ if [ -z "${INSTALL_SETUPTOOLS}" ]; then
|
||||
pip install -U pip setuptools!=38.5.2
|
||||
fi
|
||||
|
||||
# tornado 5.0 requires Python 2.7.9+ or 3.4+
|
||||
if [[ $($PYTHON -c 'import sys; print(int(sys.version_info <= (2, 7, 9) or sys.version_info <= (3, 4)))' == 1) ]]; then
|
||||
pip install 'tornado<5'
|
||||
fi
|
||||
|
||||
# Need networkx 2.0 because bellmand_ford was moved in 2.1 . Scikit-image by
|
||||
# defaults installs the most recent networkx version, so we install this lower
|
||||
# version explicitly before scikit-image pulls it in as a dependency
|
||||
|
@ -3,6 +3,7 @@ import sys
|
||||
import errno
|
||||
import os
|
||||
import ctypes
|
||||
import faulthandler
|
||||
import torch
|
||||
import gc
|
||||
import time
|
||||
@ -34,18 +35,6 @@ except ImportError:
|
||||
else:
|
||||
warnings.warn(err_msg)
|
||||
|
||||
try:
|
||||
import faulthandler
|
||||
HAS_FAULTHANDLER = True
|
||||
except ImportError:
|
||||
HAS_FAULTHANDLER = False
|
||||
err_msg = ("faulthandler not found. Some data loader tests use it for error "
|
||||
"reporting (e.g., TestDataLoader.test_proper_exit).")
|
||||
if IS_PYTORCH_CI:
|
||||
raise ImportError(err_msg) from None
|
||||
else:
|
||||
warnings.warn(err_msg)
|
||||
|
||||
|
||||
# load_tests from torch.testing._internal.common_utils is used to automatically filter tests for
|
||||
# sharding on sandcastle. This line silences flake warnings
|
||||
@ -86,9 +75,7 @@ if not NO_MULTIPROCESSING_SPAWN:
|
||||
JOIN_TIMEOUT = 60.0 # seconds
|
||||
|
||||
|
||||
supported_multiprocessing_contexts = [None]
|
||||
if torch.multiprocessing._supports_context:
|
||||
supported_multiprocessing_contexts += list(torch.multiprocessing.get_all_start_methods())
|
||||
supported_multiprocessing_contexts = [None] + list(torch.multiprocessing.get_all_start_methods())
|
||||
|
||||
|
||||
@unittest.skipIf(
|
||||
@ -312,29 +299,25 @@ class TestConcatDataset(TestCase):
|
||||
|
||||
# takes in dummy var so this can also be used as a `worker_init_fn`
|
||||
def set_faulthander_if_available(_=None):
|
||||
if HAS_FAULTHANDLER:
|
||||
faulthandler.enable(sys.__stderr__)
|
||||
if not IS_WINDOWS:
|
||||
# windows does not have faulthandler.register
|
||||
# chain=False prevents the default behavior of killing the process
|
||||
faulthandler.register(signal.SIGUSR1, file=sys.__stderr__, chain=False)
|
||||
faulthandler.enable(sys.__stderr__)
|
||||
if not IS_WINDOWS:
|
||||
# windows does not have faulthandler.register
|
||||
# chain=False prevents the default behavior of killing the process
|
||||
faulthandler.register(signal.SIGUSR1, file=sys.__stderr__, chain=False)
|
||||
|
||||
|
||||
set_faulthander_if_available()
|
||||
|
||||
# Process `pid` must have called `set_faulthander_if_available`
|
||||
def print_traces_of_all_threads(pid):
|
||||
if HAS_FAULTHANDLER:
|
||||
if not IS_WINDOWS:
|
||||
# use the custom signal if available
|
||||
os.kill(pid, signal.SIGUSR1)
|
||||
else:
|
||||
# otherwise we can still use the handler given by faulthandler.enable()
|
||||
# at the cost of killing the process.
|
||||
os.kill(pid, signal.SIGSEGV)
|
||||
if not IS_WINDOWS:
|
||||
# use the custom signal if available
|
||||
os.kill(pid, signal.SIGUSR1)
|
||||
else:
|
||||
# if there is no faulthandler, use SIGINT otherwise and hope for the best
|
||||
os.kill(pid, signal.SIGINT)
|
||||
# otherwise we can still use the handler given by faulthandler.enable()
|
||||
# at the cost of killing the process.
|
||||
os.kill(pid, signal.SIGSEGV)
|
||||
|
||||
# wait in parent process to give subprocess some time to print
|
||||
time.sleep(5)
|
||||
|
||||
@ -1037,17 +1020,13 @@ except RuntimeError as e:
|
||||
"batch_size=None option disables auto-batching and is mutually exclusive"):
|
||||
self._get_data_loader(self.dataset, batch_size=None, drop_last=True)
|
||||
|
||||
if torch.multiprocessing._supports_context:
|
||||
valid_ctx = list(torch.multiprocessing.get_all_start_methods())[-1]
|
||||
with self.assertRaisesRegex(ValueError, r"multi-process loading \(num_workers > 0\), but got"):
|
||||
self._get_data_loader(self.dataset, num_workers=0, multiprocessing_context=valid_ctx)
|
||||
with self.assertRaisesRegex(ValueError, "should specify a valid start method in"):
|
||||
self._get_data_loader(self.dataset, num_workers=1, multiprocessing_context='bad')
|
||||
with self.assertRaisesRegex(TypeError, "multiprocessing_context option should be a valid context "):
|
||||
self._get_data_loader(self.dataset, num_workers=1, multiprocessing_context=object())
|
||||
else:
|
||||
with self.assertRaisesRegex(ValueError, "multiprocessing_context relies on Python >= 3.4"):
|
||||
self._get_data_loader(self.dataset, num_workers=1, multiprocessing_context='fork')
|
||||
valid_ctx = list(torch.multiprocessing.get_all_start_methods())[-1]
|
||||
with self.assertRaisesRegex(ValueError, r"multi-process loading \(num_workers > 0\), but got"):
|
||||
self._get_data_loader(self.dataset, num_workers=0, multiprocessing_context=valid_ctx)
|
||||
with self.assertRaisesRegex(ValueError, "should specify a valid start method in"):
|
||||
self._get_data_loader(self.dataset, num_workers=1, multiprocessing_context='bad')
|
||||
with self.assertRaisesRegex(TypeError, "multiprocessing_context option should be a valid context "):
|
||||
self._get_data_loader(self.dataset, num_workers=1, multiprocessing_context=object())
|
||||
|
||||
# map-style
|
||||
sampler = torch.utils.data.SequentialSampler(self.dataset)
|
||||
@ -1504,7 +1483,7 @@ except RuntimeError as e:
|
||||
def test_sampler(self):
|
||||
self._test_sampler()
|
||||
self._test_sampler(num_workers=4)
|
||||
if not NO_MULTIPROCESSING_SPAWN and torch.multiprocessing._supports_context:
|
||||
if not NO_MULTIPROCESSING_SPAWN:
|
||||
self._test_batch_sampler(num_workers=4, multiprocessing_context='spawn')
|
||||
|
||||
def _test_batch_sampler(self, **kwargs):
|
||||
@ -1529,7 +1508,7 @@ except RuntimeError as e:
|
||||
def test_batch_sampler(self):
|
||||
self._test_batch_sampler()
|
||||
self._test_batch_sampler(num_workers=4)
|
||||
if not NO_MULTIPROCESSING_SPAWN and torch.multiprocessing._supports_context:
|
||||
if not NO_MULTIPROCESSING_SPAWN:
|
||||
self._test_batch_sampler(num_workers=4, multiprocessing_context='spawn')
|
||||
|
||||
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
|
||||
|
@ -4,7 +4,6 @@ from test_jit import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_tests()
|
||||
if not PY2:
|
||||
import test_jit_py3
|
||||
suite = unittest.findTestCases(test_jit_py3)
|
||||
unittest.TextTestRunner().run(suite)
|
||||
import test_jit_py3
|
||||
suite = unittest.findTestCases(test_jit_py3)
|
||||
unittest.TextTestRunner().run(suite)
|
||||
|
@ -4,7 +4,6 @@ from test_jit import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_tests()
|
||||
if not PY2:
|
||||
import test_jit_py3
|
||||
suite = unittest.findTestCases(test_jit_py3)
|
||||
unittest.TextTestRunner().run(suite)
|
||||
import test_jit_py3
|
||||
suite = unittest.findTestCases(test_jit_py3)
|
||||
unittest.TextTestRunner().run(suite)
|
||||
|
@ -1,5 +1,3 @@
|
||||
|
||||
|
||||
def import_module(name, path):
|
||||
import importlib.util
|
||||
spec = importlib.util.spec_from_file_location(name, path)
|
||||
|
@ -33,7 +33,6 @@ int_classes = int
|
||||
FileNotFoundError = builtins.FileNotFoundError
|
||||
StringIO = io.StringIO
|
||||
container_abcs = collections.abc
|
||||
PY3 = sys.version_info[0] == 3
|
||||
PY37 = sys.version_info[0] == 3 and sys.version_info[1] >= 7
|
||||
|
||||
def with_metaclass(meta: type, *bases) -> type:
|
||||
|
@ -63,20 +63,5 @@ __PySlice_Unpack(PyObject *_r,
|
||||
(PySlice_Unpack(SLICE, START, STOP, STEP) == 0)
|
||||
#endif
|
||||
|
||||
// https://bugsfiles.kde.org/attachment.cgi?id=61186
|
||||
#if PY_VERSION_HEX >= 0x03020000
|
||||
#define THPUtils_parseSlice(SLICE, LEN, START, STOP, LENGTH, STEP) \
|
||||
(PySlice_GetIndicesEx(SLICE, LEN, START, STOP, LENGTH, STEP) == 0)
|
||||
#else
|
||||
#define THPUtils_parseSlice(SLICE, LEN, START, STOP, LENGTH, STEP) \
|
||||
(PySlice_GetIndicesEx((PySliceObject*)SLICE, LEN, START, STOP, LENGTH, STEP) == 0)
|
||||
#endif
|
||||
|
||||
// This function was introduced in Python 3.4
|
||||
#if PY_VERSION_HEX < 0x03040000
|
||||
inline int
|
||||
PyGILState_Check() {
|
||||
PyThreadState * tstate = _PyThreadState_Current;
|
||||
return tstate && (tstate == PyGILState_GetThisThreadState());
|
||||
}
|
||||
#endif
|
||||
|
@ -23,11 +23,7 @@ inline bool isTuple(pybind11::handle input) {
|
||||
if (PyTuple_Check(input.ptr())) {
|
||||
return true;
|
||||
}
|
||||
#if PY_MAJOR_VERSION == 2
|
||||
return isStructSeq(input);
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
inline bool isTuple(PyObject* obj) {
|
||||
@ -40,12 +36,8 @@ inline bool isTuple(PyObject* obj) {
|
||||
// But on Python 2, structseq is not a subtype of tuple, so we need to manually create a
|
||||
// new tuple object from structseq.
|
||||
inline THPObjectPtr maybeAsTuple(PyStructSequence *obj) {
|
||||
#if PY_MAJOR_VERSION == 2
|
||||
return THPObjectPtr(torch::utils::structseq_slice(obj, 0, Py_SIZE(obj)));
|
||||
#else
|
||||
Py_INCREF(obj);
|
||||
return THPObjectPtr((PyObject *)obj);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline THPObjectPtr maybeAsTuple(PyObject *obj) {
|
||||
|
@ -153,15 +153,9 @@ def _lazy_init():
|
||||
# immediately, while we are still guaranteed to have the GIL, because some
|
||||
# of the C calls we make below will release the GIL
|
||||
if _is_in_bad_fork():
|
||||
from sys import version_info
|
||||
if version_info < (3, 4):
|
||||
msg = ("To use CUDA with multiprocessing, you must use Python "
|
||||
"3.4+ and the 'spawn' start method")
|
||||
else:
|
||||
msg = ("To use CUDA with multiprocessing, you must use the "
|
||||
"'spawn' start method")
|
||||
raise RuntimeError(
|
||||
"Cannot re-initialize CUDA in forked subprocess. " + msg)
|
||||
"Cannot re-initialize CUDA in forked subprocess. To use CUDA with "
|
||||
"multiprocessing, you must use the 'spawn' start method")
|
||||
if not hasattr(torch._C, '_cuda_getDeviceCount'):
|
||||
raise AssertionError("Torch not compiled with CUDA enabled")
|
||||
if _cudart is None:
|
||||
|
@ -35,7 +35,7 @@ torch._C._multiprocessing_init()
|
||||
|
||||
"""Add helper function to spawn N processes and wait for completion of any of
|
||||
them. This depends `mp.get_context` which was added in Python 3.4."""
|
||||
from .spawn import spawn, SpawnContext, _supports_context, start_processes, ProcessContext, \
|
||||
from .spawn import spawn, SpawnContext, start_processes, ProcessContext, \
|
||||
ProcessRaisedException, ProcessExitedException
|
||||
|
||||
|
||||
|
@ -66,24 +66,8 @@ def _wrap(fn, i, args, error_queue):
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# Multiprocessing contexts are introduced at Python 3.4
|
||||
_supports_context = sys.version_info >= (3, 4)
|
||||
|
||||
|
||||
def _python_version_check():
|
||||
if not _supports_context:
|
||||
raise RuntimeError("Requires python 3.4 or higher to use "
|
||||
"torch.multiprocessing.spawn and "
|
||||
"torch.multiprocessing.ProcessContext helper "
|
||||
"to launch multiple processes. If you are using "
|
||||
"this for distributed training and have a lower "
|
||||
"version of python, please use "
|
||||
"torch.distributed.launch instead.")
|
||||
|
||||
|
||||
class ProcessContext:
|
||||
def __init__(self, processes, error_queues):
|
||||
_python_version_check()
|
||||
self.error_queues = error_queues
|
||||
self.processes = processes
|
||||
self.sentinels = {
|
||||
@ -182,7 +166,6 @@ class SpawnContext(ProcessContext):
|
||||
# Currently we only add this API first, we can consider adding it to documentation as
|
||||
# needed in the future.
|
||||
def start_processes(fn, args=(), nprocs=1, join=True, daemon=False, start_method='spawn'):
|
||||
_python_version_check()
|
||||
mp = multiprocessing.get_context(start_method)
|
||||
error_queues = []
|
||||
processes = []
|
||||
|
@ -192,7 +192,7 @@ def storage_to_tensor_type(storage):
|
||||
|
||||
def _is_path(name_or_buffer):
|
||||
return isinstance(name_or_buffer, str) or \
|
||||
(sys.version_info[0] == 3 and isinstance(name_or_buffer, pathlib.Path))
|
||||
isinstance(name_or_buffer, pathlib.Path)
|
||||
|
||||
|
||||
class _opener(object):
|
||||
|
@ -4866,7 +4866,7 @@ class ModuleTest(TestBase):
|
||||
|
||||
if self.should_test_pickle:
|
||||
# TODO: do this with in-memory files as soon as torch.save will support it
|
||||
with TemporaryFile() as f:
|
||||
with tempfile.TemporaryFile() as f:
|
||||
test_case._forward(module, input)
|
||||
torch.save(module, f)
|
||||
f.seek(0)
|
||||
|
@ -308,10 +308,6 @@ class DataLoader(Generic[T_co]):
|
||||
def multiprocessing_context(self, multiprocessing_context):
|
||||
if multiprocessing_context is not None:
|
||||
if self.num_workers > 0:
|
||||
if not multiprocessing._supports_context:
|
||||
raise ValueError('multiprocessing_context relies on Python >= 3.4, with '
|
||||
'support for different start methods')
|
||||
|
||||
if isinstance(multiprocessing_context, string_classes):
|
||||
valid_start_methods = multiprocessing.get_all_start_methods()
|
||||
if multiprocessing_context not in valid_start_methods:
|
||||
|
Reference in New Issue
Block a user