mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/18598 ghimport-source-id: c74597e5e7437e94a43c163cee0639b20d0d0c6a Stack from [ghstack](https://github.com/ezyang/ghstack): * **#18598 Turn on F401: Unused import warning.** This was requested by someone at Facebook; this lint is turned on for Facebook by default. "Sure, why not." I had to noqa a number of imports in __init__. Hypothetically we're supposed to use __all__ in this case, but I was too lazy to fix it. Left for future work. Be careful! flake8-2 and flake8-3 behave differently with respect to import resolution for # type: comments. flake8-3 will report an import unused; flake8-2 will not. For now, I just noqa'd all these sites. All the changes were done by hand. Signed-off-by: Edward Z. Yang <ezyang@fb.com> Differential Revision: D14687478 fbshipit-source-id: 30d532381e914091aadfa0d2a5a89404819663e3
41 lines
1.0 KiB
Python
41 lines
1.0 KiB
Python
from __future__ import absolute_import
|
|
from __future__ import division
|
|
from __future__ import print_function
|
|
from __future__ import unicode_literals
|
|
|
|
import functools
|
|
import os
|
|
import unittest
|
|
import sys
|
|
import torch
|
|
import torch.autograd.function as function
|
|
|
|
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
|
sys.path.insert(-1, pytorch_test_dir)
|
|
|
|
from common_utils import * # noqa: F401
|
|
|
|
torch.set_default_tensor_type('torch.FloatTensor')
|
|
|
|
|
|
def _skipper(condition, reason):
|
|
def decorator(f):
|
|
@functools.wraps(f)
|
|
def wrapper(*args, **kwargs):
|
|
if condition():
|
|
raise unittest.SkipTest(reason)
|
|
return f(*args, **kwargs)
|
|
return wrapper
|
|
return decorator
|
|
|
|
|
|
skipIfNoCuda = _skipper(lambda: not torch.cuda.is_available(),
|
|
'CUDA is not available')
|
|
|
|
skipIfTravis = _skipper(lambda: os.getenv('TRAVIS'),
|
|
'Skip In Travis')
|
|
|
|
|
|
def flatten(x):
|
|
return tuple(function._iter_filter(lambda o: isinstance(o, torch.Tensor))(x))
|