WIP / TST: allow testing torch._numpy under Dynamo (#110401)

Use conditional imports: when running under dynamo, import the original NumPy not torch._numpy. This is what we want to trace, not our implementation.

With this, the test suite passes with and without `PYTORCH_TEST_WITH_DYNAMO=1` (modulo a couple of test modules which are not meant to be compiled, e.g. `test_nep50_examples`). There are two new decorators, `x{fail,pass}ifTorchDynamo`, the `xpass` in most cases indicates a graph break and a fallback to eager for things we do not implement.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/110401
Approved by: https://github.com/lezcano
This commit is contained in:
Evgeni Burovski
2023-10-25 16:02:16 +00:00
committed by PyTorch MergeBot
parent 6fd3659391
commit 5ed4a423de
36 changed files with 1200 additions and 648 deletions

View File

@ -171,13 +171,6 @@ function install_torchrec_and_fbgemm() {
pip_install --no-use-pep517 --user "git+https://github.com/pytorch/torchrec.git@${torchrec_commit}" pip_install --no-use-pep517 --user "git+https://github.com/pytorch/torchrec.git@${torchrec_commit}"
} }
function install_numpy_pytorch_interop() {
local commit
commit=$(get_pinned_commit numpy_pytorch_interop)
# TODO: --no-use-pep517 will result in failure.
pip_install --user "git+https://github.com/Quansight-Labs/numpy_pytorch_interop.git@${commit}"
}
function clone_pytorch_xla() { function clone_pytorch_xla() {
if [[ ! -d ./xla ]]; then if [[ ! -d ./xla ]]; then
git clone --recursive --quiet https://github.com/pytorch/xla.git git clone --recursive --quiet https://github.com/pytorch/xla.git

View File

@ -1061,12 +1061,10 @@ elif [[ "${TEST_CONFIG}" == *inductor* && "${SHARD_NUMBER}" == 1 ]]; then
elif [[ "${TEST_CONFIG}" == *dynamo* && "${SHARD_NUMBER}" == 1 && $NUM_TEST_SHARDS -gt 1 ]]; then elif [[ "${TEST_CONFIG}" == *dynamo* && "${SHARD_NUMBER}" == 1 && $NUM_TEST_SHARDS -gt 1 ]]; then
test_without_numpy test_without_numpy
install_torchvision install_torchvision
install_numpy_pytorch_interop
test_dynamo_shard 1 test_dynamo_shard 1
test_aten test_aten
elif [[ "${TEST_CONFIG}" == *dynamo* && "${SHARD_NUMBER}" == 2 && $NUM_TEST_SHARDS -gt 1 ]]; then elif [[ "${TEST_CONFIG}" == *dynamo* && "${SHARD_NUMBER}" == 2 && $NUM_TEST_SHARDS -gt 1 ]]; then
install_torchvision install_torchvision
install_numpy_pytorch_interop
test_dynamo_shard 2 test_dynamo_shard 2
elif [[ "${SHARD_NUMBER}" == 1 && $NUM_TEST_SHARDS -gt 1 ]]; then elif [[ "${SHARD_NUMBER}" == 1 && $NUM_TEST_SHARDS -gt 1 ]]; then
test_without_numpy test_without_numpy

View File

@ -1 +0,0 @@
0c4e82511d349358d2c8c492dd833334e742f27f

View File

@ -13,3 +13,5 @@ testpaths =
junit_logging_reruns = all junit_logging_reruns = all
filterwarnings = filterwarnings =
ignore:Module already imported so cannot be rewritten.*hypothesis:pytest.PytestAssertRewriteWarning ignore:Module already imported so cannot be rewritten.*hypothesis:pytest.PytestAssertRewriteWarning
strict_xfail = True

View File

@ -3,29 +3,41 @@
import functools import functools
import sys import sys
from unittest import expectedFailure as xfail, skipIf as skipif from unittest import skipIf as skipif
import numpy
import pytest import pytest
import torch import torch
import torch._numpy as np
from torch._numpy.testing import assert_array_equal
from torch.testing._internal.common_utils import ( from torch.testing._internal.common_utils import (
instantiate_parametrized_tests, instantiate_parametrized_tests,
parametrize, parametrize,
run_tests, run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase, TestCase,
xpassIfTorchDynamo,
) )
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy.testing import assert_array_equal
else:
import torch._numpy as np
from torch._numpy.testing import assert_array_equal
skip = functools.partial(skipif, True) skip = functools.partial(skipif, True)
IS_PYPY = False IS_PYPY = False
@skipif(numpy.__version__ < "1.24", reason="numpy.dlpack is new in numpy 1.23")
@instantiate_parametrized_tests @instantiate_parametrized_tests
class TestDLPack(TestCase): class TestDLPack(TestCase):
@xfail # (reason="pytorch seems to handle refcounts differently") @xpassIfTorchDynamo # (reason="pytorch seems to handle refcounts differently")
@skipif(IS_PYPY, reason="PyPy can't get refcounts.") @skipif(IS_PYPY, reason="PyPy can't get refcounts.")
def test_dunder_dlpack_refcount(self): def test_dunder_dlpack_refcount(self):
x = np.arange(5) x = np.arange(5)
@ -34,7 +46,7 @@ class TestDLPack(TestCase):
del y del y
assert sys.getrefcount(x) == 2 assert sys.getrefcount(x) == 2
@xfail # (reason="pytorch does not raise") @xpassIfTorchDynamo # (reason="pytorch does not raise")
def test_dunder_dlpack_stream(self): def test_dunder_dlpack_stream(self):
x = np.arange(5) x = np.arange(5)
x.__dlpack__(stream=None) x.__dlpack__(stream=None)
@ -42,7 +54,7 @@ class TestDLPack(TestCase):
with pytest.raises(RuntimeError): with pytest.raises(RuntimeError):
x.__dlpack__(stream=1) x.__dlpack__(stream=1)
@xfail # (reason="pytorch seems to handle refcounts differently") @xpassIfTorchDynamo # (reason="pytorch seems to handle refcounts differently")
@skipif(IS_PYPY, reason="PyPy can't get refcounts.") @skipif(IS_PYPY, reason="PyPy can't get refcounts.")
def test_from_dlpack_refcount(self): def test_from_dlpack_refcount(self):
x = np.arange(5) x = np.arange(5)

View File

@ -9,23 +9,31 @@ import types
from itertools import permutations from itertools import permutations
from typing import Any from typing import Any
from unittest import expectedFailure as xfail, skipIf as skipif from unittest import skipIf as skipif
import pytest import pytest
import torch._numpy as np
from pytest import raises as assert_raises from pytest import raises as assert_raises
from torch._numpy.testing import assert_, assert_equal
from torch.testing._internal.common_utils import ( from torch.testing._internal.common_utils import (
instantiate_parametrized_tests, instantiate_parametrized_tests,
parametrize, parametrize,
run_tests, run_tests,
subtest, subtest,
TEST_WITH_TORCHDYNAMO,
TestCase, TestCase,
xpassIfTorchDynamo,
) )
skip = functools.partial(skipif, True) skip = functools.partial(skipif, True)
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy.testing import assert_, assert_equal
else:
import torch._numpy as np
from torch._numpy.testing import assert_, assert_equal
import numpy
def assert_dtype_equal(a, b): def assert_dtype_equal(a, b):
assert_equal(a, b) assert_equal(a, b)
@ -102,6 +110,10 @@ class TestBuiltin(TestCase):
with pytest.raises(TypeError): with pytest.raises(TypeError):
operation(np.dtype(np.int32), 7) operation(np.dtype(np.int32), 7)
@skipif(
numpy.__version__ < "1.24",
reason="older numpies emit DeprecatioWarnings instead",
)
@parametrize( @parametrize(
"dtype", "dtype",
[ [
@ -195,8 +207,8 @@ class TestPickling(TestCase):
@parametrize( @parametrize(
"DType", "DType",
[ [
subtest(type(np.dtype(t)), name=f"{np.dtype(t).name}") subtest(type(np.dtype(t)), name=f"{np.dtype(t).name}_{i}")
for t in np.typecodes["All"] for i, t in enumerate(np.typecodes["All"])
] ]
+ [np.dtype], + [np.dtype],
) )
@ -208,6 +220,7 @@ class TestPickling(TestCase):
@skip(reason="XXX: value-based promotions, we don't have.") @skip(reason="XXX: value-based promotions, we don't have.")
@instantiate_parametrized_tests
class TestPromotion(TestCase): class TestPromotion(TestCase):
"""Test cases related to more complex DType promotions. Further promotion """Test cases related to more complex DType promotions. Further promotion
tests are defined in `test_numeric.py` tests are defined in `test_numeric.py`
@ -218,10 +231,12 @@ class TestPromotion(TestCase):
[ [
(2**16 - 1, np.complex64, None), (2**16 - 1, np.complex64, None),
(2**32 - 1, np.complex128, np.complex64), (2**32 - 1, np.complex128, np.complex64),
(np.float16(2), np.complex64, None), subtest((np.float16(2), np.complex64, None), name="float16_complex64_None"),
(np.float32(2), np.complex64, None), subtest((np.float32(2), np.complex64, None), name="float32_complex64_None"),
# repeat for complex scalars: # repeat for complex scalars:
(np.complex64(2), np.complex64, None), subtest(
(np.complex64(2), np.complex64, None), name="complex64_complex64_None"
),
], ],
) )
def test_complex_other_value_based( def test_complex_other_value_based(
@ -303,7 +318,7 @@ class TestMisc(TestCase):
assert bool(np.dtype("f8")) assert bool(np.dtype("f8"))
assert bool(np.dtype("i8")) assert bool(np.dtype("i8"))
@xfail # (reason="No keyword arg for dtype ctor.") @xpassIfTorchDynamo # (reason="No keyword arg for dtype ctor.")
def test_keyword_argument(self): def test_keyword_argument(self):
# test for https://github.com/numpy/numpy/pull/16574#issuecomment-642660971 # test for https://github.com/numpy/numpy/pull/16574#issuecomment-642660971
assert np.dtype(dtype=np.float64) == np.dtype(np.float64) assert np.dtype(dtype=np.float64) == np.dtype(np.float64)
@ -343,6 +358,7 @@ class TestFromDTypeAttribute(TestCase):
@skip(reason="Parameteric dtypes, our stuff is simpler.") @skip(reason="Parameteric dtypes, our stuff is simpler.")
@skipif(sys.version_info < (3, 9), reason="Requires python 3.9") @skipif(sys.version_info < (3, 9), reason="Requires python 3.9")
@instantiate_parametrized_tests
class TestClassGetItem(TestCase): class TestClassGetItem(TestCase):
def test_dtype(self) -> None: def test_dtype(self) -> None:
alias = np.dtype[Any] alias = np.dtype[Any]

View File

@ -5,23 +5,41 @@ import itertools
from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest
import torch._numpy as np import numpy
from pytest import raises as assert_raises from pytest import raises as assert_raises
from torch._numpy.testing import (
assert_,
assert_allclose,
assert_almost_equal,
assert_array_equal,
assert_equal,
suppress_warnings,
)
from torch.testing._internal.common_utils import ( from torch.testing._internal.common_utils import (
instantiate_parametrized_tests, instantiate_parametrized_tests,
parametrize, parametrize,
run_tests, run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase, TestCase,
xpassIfTorchDynamo,
) )
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy.testing import (
assert_,
assert_allclose,
assert_almost_equal,
assert_array_equal,
assert_equal,
suppress_warnings,
)
else:
import torch._numpy as np
from torch._numpy.testing import (
assert_,
assert_allclose,
assert_almost_equal,
assert_array_equal,
assert_equal,
suppress_warnings,
)
skip = functools.partial(skipif, True) skip = functools.partial(skipif, True)
@ -745,15 +763,15 @@ class TestEinsum(TestCase):
np.einsum("ij,i->", x, y, optimize=optimize), [2.0] np.einsum("ij,i->", x, y, optimize=optimize), [2.0]
) # contig_stride0_outstride0_two ) # contig_stride0_outstride0_two
@xfail # (reason="int overflow differs in numpy and pytorch") @xpassIfTorchDynamo # (reason="int overflow differs in numpy and pytorch")
def test_einsum_sums_int8(self): def test_einsum_sums_int8(self):
self.check_einsum_sums("i1") self.check_einsum_sums("i1")
@xfail # (reason="int overflow differs in numpy and pytorch") @xpassIfTorchDynamo # (reason="int overflow differs in numpy and pytorch")
def test_einsum_sums_uint8(self): def test_einsum_sums_uint8(self):
self.check_einsum_sums("u1") self.check_einsum_sums("u1")
@xfail # (reason="int overflow differs in numpy and pytorch") @xpassIfTorchDynamo # (reason="int overflow differs in numpy and pytorch")
def test_einsum_sums_int16(self): def test_einsum_sums_int16(self):
self.check_einsum_sums("i2") self.check_einsum_sums("i2")
@ -764,7 +782,7 @@ class TestEinsum(TestCase):
def test_einsum_sums_int64(self): def test_einsum_sums_int64(self):
self.check_einsum_sums("i8") self.check_einsum_sums("i8")
@xfail # (reason="np.float16(4641) == 4640.0") @xpassIfTorchDynamo # (reason="np.float16(4641) == 4640.0")
def test_einsum_sums_float16(self): def test_einsum_sums_float16(self):
self.check_einsum_sums("f2") self.check_einsum_sums("f2")
@ -948,7 +966,7 @@ class TestEinsum(TestCase):
y = tensor.trace(axis1=0, axis2=2).trace() y = tensor.trace(axis1=0, axis2=2).trace()
assert_allclose(x, y) assert_allclose(x, y)
@xfail # (reason="no base") @xpassIfTorchDynamo # (reason="no base")
def test_einsum_all_contig_non_contig_output(self): def test_einsum_all_contig_non_contig_output(self):
# Issue gh-5907, tests that the all contiguous special case # Issue gh-5907, tests that the all contiguous special case
# actually checks the contiguity of the output # actually checks the contiguity of the output
@ -972,7 +990,12 @@ class TestEinsum(TestCase):
np.einsum("ij,jk->ik", x, x, out=out) np.einsum("ij,jk->ik", x, x, out=out)
assert_array_equal(out.base, correct_base) assert_array_equal(out.base, correct_base)
@parametrize("dtype", np.typecodes["AllFloat"] + np.typecodes["AllInteger"]) @skipif(
numpy.__version__ < "1.23",
reason="https://github.com/numpy/numpy/issues/20305 is in NumPy 1.22",
)
# @parametrize("dtype", np.typecodes["AllFloat"] + np.typecodes["AllInteger"])
@parametrize("dtype", "efdFD" + "Bbhil")
def test_different_paths(self, dtype): def test_different_paths(self, dtype):
# Test originally added to cover broken float16 path: gh-20305 # Test originally added to cover broken float16 path: gh-20305
# Likely most are covered elsewhere, at least partially. # Likely most are covered elsewhere, at least partially.
@ -1158,7 +1181,7 @@ class TestEinsum(TestCase):
g = np.arange(64).reshape(2, 4, 8) g = np.arange(64).reshape(2, 4, 8)
self.optimize_compare("obk,ijk->ioj", operands=[g, g]) self.optimize_compare("obk,ijk->ioj", operands=[g, g])
@xfail # (reason="order='F' not supported") @xpassIfTorchDynamo # (reason="order='F' not supported")
def test_output_order(self): def test_output_order(self):
# Ensure output order is respected for optimize cases, the below # Ensure output order is respected for optimize cases, the below
# conraction should yield a reshaped tensor view # conraction should yield a reshaped tensor view

View File

@ -8,13 +8,27 @@ import warnings
# from numpy.core.getlimits import _discovered_machar, _float_ma # from numpy.core.getlimits import _discovered_machar, _float_ma
from unittest import expectedFailure as xfail, skipIf from unittest import skipIf
import numpy
import torch._numpy as np
from pytest import raises as assert_raises from pytest import raises as assert_raises
from torch._numpy import double, finfo, half, iinfo, single from torch.testing._internal.common_utils import (
from torch._numpy.testing import assert_, assert_equal run_tests,
from torch.testing._internal.common_utils import run_tests, TestCase TEST_WITH_TORCHDYNAMO,
TestCase,
xpassIfTorchDynamo,
)
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy import double, finfo, half, iinfo, single
from numpy.testing import assert_, assert_equal
else:
import torch._numpy as np
from torch._numpy import double, finfo, half, iinfo, single
from torch._numpy.testing import assert_, assert_equal
skip = functools.partial(skipIf, True) skip = functools.partial(skipIf, True)
@ -54,6 +68,7 @@ class TestDouble(TestCase):
class TestFinfo(TestCase): class TestFinfo(TestCase):
@skipIf(numpy.__version__ < "1.23", reason=".smallest_normal is new")
def test_basic(self): def test_basic(self):
dts = list( dts = list(
zip( zip(
@ -75,7 +90,7 @@ class TestFinfo(TestCase):
with assert_raises((TypeError, ValueError)): with assert_raises((TypeError, ValueError)):
finfo("i4") finfo("i4")
@xfail # (reason="These attributes are not implemented yet.") @skip # (reason="Some of these attributes are not implemented vs NP versions")
def test_basic_missing(self): def test_basic_missing(self):
dt = np.float32 dt = np.float32
for attr in [ for attr in [
@ -125,6 +140,7 @@ class TestRepr(TestCase):
expected = "iinfo(min=-32768, max=32767, dtype=int16)" expected = "iinfo(min=-32768, max=32767, dtype=int16)"
assert_equal(repr(np.iinfo(np.int16)), expected) assert_equal(repr(np.iinfo(np.int16)), expected)
@skipIf(TEST_WITH_TORCHDYNAMO, reason="repr differs")
def test_finfo_repr(self): def test_finfo_repr(self):
repr_f32 = repr(np.finfo(np.float32)) repr_f32 = repr(np.finfo(np.float32))
assert "finfo(resolution=1e-06, min=-3.40282e+38," in repr_f32 assert "finfo(resolution=1e-06, min=-3.40282e+38," in repr_f32
@ -186,7 +202,7 @@ class TestMisc(TestCase):
# This test may fail on some platforms # This test may fail on some platforms
assert len(w) == 0 assert len(w) == 0
@xfail # (reason="None of nmant, minexp, maxexp is implemented.") @xpassIfTorchDynamo # (reason="None of nmant, minexp, maxexp is implemented.")
def test_plausible_finfo(self): def test_plausible_finfo(self):
# Assert that finfo returns reasonable results for all types # Assert that finfo returns reasonable results for all types
for ftype in np.sctypes["float"] + np.sctypes["complex"]: for ftype in np.sctypes["float"] + np.sctypes["complex"]:

View File

@ -12,22 +12,37 @@ from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest
import pytest import pytest
import torch._numpy as np
from pytest import raises as assert_raises from pytest import raises as assert_raises
from torch._numpy.testing import (
assert_,
assert_array_equal,
assert_equal,
assert_warns,
HAS_REFCOUNT,
)
from torch.testing._internal.common_utils import ( from torch.testing._internal.common_utils import (
instantiate_parametrized_tests, instantiate_parametrized_tests,
parametrize, parametrize,
run_tests, run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase, TestCase,
xfailIfTorchDynamo,
xpassIfTorchDynamo,
) )
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy.testing import (
assert_,
assert_array_equal,
assert_equal,
assert_warns,
HAS_REFCOUNT,
)
else:
import torch._numpy as np
from torch._numpy.testing import (
assert_,
assert_array_equal,
assert_equal,
assert_warns,
HAS_REFCOUNT,
)
skip = functools.partial(skipif, True) skip = functools.partial(skipif, True)
@ -122,15 +137,13 @@ class TestIndexing(TestCase):
assert_equal(a[None], a[np.newaxis]) assert_equal(a[None], a[np.newaxis])
assert_equal(a[None].ndim, a.ndim + 1) assert_equal(a[None].ndim, a.ndim + 1)
@skip
def test_empty_tuple_index(self): def test_empty_tuple_index(self):
# Empty tuple index creates a view # Empty tuple index creates a view
a = np.array([1, 2, 3]) a = np.array([1, 2, 3])
assert_equal(a[()], a) assert_equal(a[()], a)
assert_(a[()].tensor._base is a.tensor) assert_(a[()].tensor._base is a.tensor)
a = np.array(0) a = np.array(0)
raise SkipTest(
"torch doesn't have scalar types with distinct instancing behaviours"
)
assert_(isinstance(a[()], np.int_)) assert_(isinstance(a[()], np.int_))
def test_same_kind_index_casting(self): def test_same_kind_index_casting(self):
@ -172,7 +185,6 @@ class TestIndexing(TestCase):
assert_(a[...] is not a) assert_(a[...] is not a)
assert_equal(a[...], a) assert_equal(a[...], a)
# `a[...]` was `a` in numpy <1.9. # `a[...]` was `a` in numpy <1.9.
assert_(a[...].tensor._base is a.tensor)
# Slicing with ellipsis can skip an # Slicing with ellipsis can skip an
# arbitrary number of dimensions # arbitrary number of dimensions
@ -189,6 +201,14 @@ class TestIndexing(TestCase):
b[(Ellipsis,)] = 2 b[(Ellipsis,)] = 2
assert_equal(b, 2) assert_equal(b, 2)
@xfailIfTorchDynamo # numpy ndarrays do not have `.tensor` attribute
def test_ellipsis_index_2(self):
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_(a[...] is not a)
assert_equal(a[...], a)
# `a[...]` was `a` in numpy <1.9.
assert_(a[...].tensor._base is a.tensor)
def test_single_int_index(self): def test_single_int_index(self):
# Single integer index selects one row # Single integer index selects one row
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
@ -233,6 +253,7 @@ class TestIndexing(TestCase):
a[b] = 1.0 a[b] = 1.0
assert_equal(a, [[1.0, 1.0, 1.0]]) assert_equal(a, [[1.0, 1.0, 1.0]])
@skip(reason="NP_VER: fails on CI")
def test_boolean_assignment_value_mismatch(self): def test_boolean_assignment_value_mismatch(self):
# A boolean assignment should fail when the shape of the values # A boolean assignment should fail when the shape of the values
# cannot be broadcast to the subscription. (see also gh-3458) # cannot be broadcast to the subscription. (see also gh-3458)
@ -400,7 +421,7 @@ class TestIndexing(TestCase):
# Unlike the non nd-index: # Unlike the non nd-index:
assert_(arr[index,].shape != (1,)) assert_(arr[index,].shape != (1,))
@xfail # (reason="XXX: low-prio behaviour to support") @xpassIfTorchDynamo # (reason="XXX: low-prio behaviour to support")
def test_broken_sequence_not_nd_index(self): def test_broken_sequence_not_nd_index(self):
# See https://github.com/numpy/numpy/issues/5063 # See https://github.com/numpy/numpy/issues/5063
# If we have an object which claims to be a sequence, but fails # If we have an object which claims to be a sequence, but fails
@ -558,7 +579,7 @@ class TestBroadcastedAssignments(TestCase):
class TestFancyIndexingCast(TestCase): class TestFancyIndexingCast(TestCase):
@xfail # ( @xpassIfTorchDynamo # (
# reason="XXX: low-prio to support assigning complex values on floating arrays" # reason="XXX: low-prio to support assigning complex values on floating arrays"
# ) # )
def test_boolean_index_cast_assign(self): def test_boolean_index_cast_assign(self):

View File

@ -20,33 +20,57 @@ from decimal import Decimal
from unittest import expectedFailure as xfail, skipIf as skipif from unittest import expectedFailure as xfail, skipIf as skipif
import numpy
import pytest import pytest
import torch._numpy as np
from pytest import raises as assert_raises from pytest import raises as assert_raises
from torch._numpy.testing import (
assert_,
assert_allclose, # IS_PYPY, IS_PYSTON, HAS_REFCOUNT,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_array_less,
assert_equal,
assert_raises_regex,
assert_warns,
# runstring, temppath,
suppress_warnings, # break_cycles,
)
from torch.testing._internal.common_utils import ( from torch.testing._internal.common_utils import (
instantiate_parametrized_tests, instantiate_parametrized_tests,
parametrize, parametrize,
run_tests, run_tests,
slowTest as slow, slowTest as slow,
subtest, subtest,
TEST_WITH_TORCHDYNAMO,
TestCase, TestCase,
xfailIfTorchDynamo,
xpassIfTorchDynamo,
) )
# If we are going to trace through these, we should use NumPy
# If testing on eager mode, we use torch._numpy
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy.testing import (
assert_,
assert_allclose, # IS_PYPY, IS_PYSTON, HAS_REFCOUNT,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_array_less,
assert_equal,
assert_raises_regex,
assert_warns,
# runstring, temppath,
suppress_warnings, # break_cycles,
)
else:
import torch._numpy as np
from torch._numpy.testing import (
assert_,
assert_allclose, # IS_PYPY, IS_PYSTON, HAS_REFCOUNT,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_array_less,
assert_equal,
assert_raises_regex,
assert_warns,
# runstring, temppath,
suppress_warnings, # break_cycles,
)
skip = functools.partial(skipif, True) skip = functools.partial(skipif, True)
IS_PYPY = False IS_PYPY = False
@ -130,7 +154,7 @@ def _aligned_zeros(shape, dtype=float, order="C", align=None):
return data return data
@xfail # (reason="TODO: flags") @xpassIfTorchDynamo # (reason="TODO: flags")
@instantiate_parametrized_tests @instantiate_parametrized_tests
class TestFlag(TestCase): class TestFlag(TestCase):
def setUp(self): def setUp(self):
@ -250,6 +274,7 @@ class TestFlag(TestCase):
assert a.__array_interface__["data"][1] is not writeable assert a.__array_interface__["data"][1] is not writeable
assert np.asarray(MyArr()).flags.writeable is writeable assert np.asarray(MyArr()).flags.writeable is writeable
@xpassIfTorchDynamo
def test_otherflags(self): def test_otherflags(self):
assert_equal(self.a.flags.carray, True) assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags["C"], True) assert_equal(self.a.flags["C"], True)
@ -276,7 +301,7 @@ class TestFlag(TestCase):
assert_(a.flags.aligned) assert_(a.flags.aligned)
@xfail # (reason="TODO: hash") @xpassIfTorchDynamo # (reason="TODO: hash")
class TestHash(TestCase): class TestHash(TestCase):
# see #3793 # see #3793
def test_int(self): def test_int(self):
@ -314,7 +339,7 @@ class TestHash(TestCase):
) )
@xfail # (reason="TODO: hash") @xpassIfTorchDynamo # (reason="TODO: hash")
class TestAttributes(TestCase): class TestAttributes(TestCase):
def setUp(self): def setUp(self):
self.one = np.arange(10) self.one = np.arange(10)
@ -502,7 +527,7 @@ class TestArrayConstruction(TestCase):
d[1] = 3 d[1] = 3
assert_array_equal(e, [1, 3, 3]) assert_array_equal(e, [1, 3, 3])
@xfail # (reason="order='F'") @xpassIfTorchDynamo # (reason="order='F'")
def test_array_copy_false_2(self): def test_array_copy_false_2(self):
d = np.array([1, 2, 3]) d = np.array([1, 2, 3])
e = np.array(d, copy=False, order="F") e = np.array(d, copy=False, order="F")
@ -528,6 +553,7 @@ class TestArrayConstruction(TestCase):
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]]) assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1, 2, 3]]) assert_array_equal(d, [[1, 5, 3], [1, 2, 3]])
@xfailIfTorchDynamo
def test_array_cont(self): def test_array_cont(self):
d = np.ones(10)[::2] d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous) assert_(np.ascontiguousarray(d).flags.c_contiguous)
@ -865,7 +891,7 @@ class TestScalarIndexing(TestCase):
# this assersion fails because 50 > NPY_MAXDIMS = 32 # this assersion fails because 50 > NPY_MAXDIMS = 32
# assert_raises(IndexError, subscript, a, (np.newaxis,)*50) # assert_raises(IndexError, subscript, a, (np.newaxis,)*50)
@xfail # (reason="pytorch disallows overlapping assignments") @xpassIfTorchDynamo # (reason="pytorch disallows overlapping assignments")
def test_overlapping_assignment(self): def test_overlapping_assignment(self):
# With positive strides # With positive strides
a = np.arange(4) a = np.arange(4)
@ -1280,7 +1306,7 @@ class TestCreation(TestCase):
class TestBool(TestCase): class TestBool(TestCase):
@xfail # (reason="bools not interned") @xpassIfTorchDynamo # (reason="bools not interned")
def test_test_interning(self): def test_test_interning(self):
a0 = np.bool_(0) a0 = np.bool_(0)
b0 = np.bool_(False) b0 = np.bool_(False)
@ -1297,7 +1323,7 @@ class TestBool(TestCase):
assert_equal(d[::2].sum(), d[::2].size) assert_equal(d[::2].sum(), d[::2].size)
# assert_equal(d[::-2].sum(), d[::-2].size) # assert_equal(d[::-2].sum(), d[::-2].size)
@xfail # (reason="frombuffer") @xpassIfTorchDynamo # (reason="frombuffer")
def test_sum_2(self): def test_sum_2(self):
d = np.frombuffer(b"\xff\xff" * 100, dtype=bool) d = np.frombuffer(b"\xff\xff" * 100, dtype=bool)
assert_equal(d.sum(), d.size) assert_equal(d.sum(), d.size)
@ -1377,7 +1403,7 @@ class TestBool(TestCase):
class TestMethods(TestCase): class TestMethods(TestCase):
sort_kinds = ["quicksort", "heapsort", "stable"] sort_kinds = ["quicksort", "heapsort", "stable"]
@xfail # (reason="all(..., where=...)") @xpassIfTorchDynamo # (reason="all(..., where=...)")
def test_all_where(self): def test_all_where(self):
a = np.array([[True, False, True], [False, False, False], [True, True, True]]) a = np.array([[True, False, True], [False, False, False], [True, True, True]])
wh_full = np.array( wh_full = np.array(
@ -1397,7 +1423,7 @@ class TestMethods(TestCase):
assert_equal(a.all(where=False), True) assert_equal(a.all(where=False), True)
assert_equal(np.all(a, where=False), True) assert_equal(np.all(a, where=False), True)
@xfail # (reason="any(..., where=...)") @xpassIfTorchDynamo # (reason="any(..., where=...)")
def test_any_where(self): def test_any_where(self):
a = np.array([[True, False, True], [False, False, False], [True, True, True]]) a = np.array([[True, False, True], [False, False, False], [True, True, True]])
wh_full = np.array( wh_full = np.array(
@ -1418,7 +1444,7 @@ class TestMethods(TestCase):
assert_equal(a.any(where=False), False) assert_equal(a.any(where=False), False)
assert_equal(np.any(a, where=False), False) assert_equal(np.any(a, where=False), False)
@xfail # (reason="TODO: compress") @xpassIfTorchDynamo # (reason="TODO: compress")
def test_compress(self): def test_compress(self):
tgt = [[5, 6, 7, 8, 9]] tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5) arr = np.arange(10).reshape(2, 5)
@ -1459,7 +1485,7 @@ class TestMethods(TestCase):
assert out is ret assert out is ret
assert_equal(out[()], 20) assert_equal(out[()], 20)
@xfail # (reason="choose(..., mode=...) not implemented") @xpassIfTorchDynamo # (reason="choose(..., mode=...) not implemented")
def test_choose_2(self): def test_choose_2(self):
# gh-6272 check overlap on out # gh-6272 check overlap on out
x = np.arange(5) x = np.arange(5)
@ -1510,7 +1536,7 @@ class TestMethods(TestCase):
A = m_rect.repeat(2, axis=1) A = m_rect.repeat(2, axis=1)
assert_equal(A, [[1, 1, 2, 2, 3, 3], [4, 4, 5, 5, 6, 6]]) assert_equal(A, [[1, 1, 2, 2, 3, 3], [4, 4, 5, 5, 6, 6]])
@xfail # (reason="reshape(..., order='F')") @xpassIfTorchDynamo # (reason="reshape(..., order='F')")
def test_reshape(self): def test_reshape(self):
arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
@ -1569,7 +1595,7 @@ class TestMethods(TestCase):
b = np.sort(a) b = np.sort(a)
assert_equal(b, np.flip(a), msg) assert_equal(b, np.flip(a), msg)
@xfail # (reason="sort complex") @xpassIfTorchDynamo # (reason="sort complex")
def test_sort_complex_nans(self): def test_sort_complex_nans(self):
# check complex # check complex
msg = "Test complex sort order with nans" msg = "Test complex sort order with nans"
@ -1614,7 +1640,7 @@ class TestMethods(TestCase):
c.sort(kind=kind) c.sort(kind=kind)
assert_equal(c, a, msg) assert_equal(c, a, msg)
@xfail # (reason="sort complex") @xpassIfTorchDynamo # (reason="sort complex")
@parametrize("dtype", [np.float32, np.float64]) @parametrize("dtype", [np.float32, np.float64])
@parametrize("part", ["real", "imag"]) @parametrize("part", ["real", "imag"])
def test_sort_complex(self, part, dtype): def test_sort_complex(self, part, dtype):
@ -1681,7 +1707,7 @@ class TestMethods(TestCase):
assert_equal(np.sort(d), do) assert_equal(np.sort(d), do)
assert_equal(d[np.argsort(d)], do) assert_equal(d[np.argsort(d)], do)
@xfail # (reason="order='F'") @xpassIfTorchDynamo # (reason="order='F'")
def test_copy(self): def test_copy(self):
def assert_fortran(arr): def assert_fortran(arr):
assert_(arr.flags.fortran) assert_(arr.flags.fortran)
@ -1763,7 +1789,7 @@ class TestMethods(TestCase):
msg = f"byte-swapped complex argsort, dtype={dt}" msg = f"byte-swapped complex argsort, dtype={dt}"
assert_equal(arr.argsort(), np.arange(len(arr), dtype=np.intp), msg) assert_equal(arr.argsort(), np.arange(len(arr), dtype=np.intp), msg)
@xfail # (reason="argsort axis TODO") @xpassIfTorchDynamo # (reason="argsort axis TODO")
def test_argsort_axis(self): def test_argsort_axis(self):
# check axis handling. This should be the same for all type # check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind # specific argsorts, so we only check it for one type and one kind
@ -1800,13 +1826,13 @@ class TestMethods(TestCase):
a = np.array(["aaaaaaaaa" for i in range(100)], dtype=np.unicode_) a = np.array(["aaaaaaaaa" for i in range(100)], dtype=np.unicode_)
assert_equal(a.argsort(kind="m"), r) assert_equal(a.argsort(kind="m"), r)
@xfail # (reason="TODO: searchsorted with nans differs in pytorch") @xpassIfTorchDynamo # (reason="TODO: searchsorted with nans differs in pytorch")
@parametrize( @parametrize(
"a", "a",
[ [
np.array([0, 1, np.nan], dtype=np.float16), subtest(np.array([0, 1, np.nan], dtype=np.float16), name="f16"),
np.array([0, 1, np.nan], dtype=np.float32), subtest(np.array([0, 1, np.nan], dtype=np.float32), name="f32"),
np.array([0, 1, np.nan]), subtest(np.array([0, 1, np.nan]), name="default_dtype"),
], ],
) )
def test_searchsorted_floats(self, a): def test_searchsorted_floats(self, a):
@ -1825,7 +1851,7 @@ class TestMethods(TestCase):
y = np.searchsorted(x, x[-1]) y = np.searchsorted(x, x[-1])
assert_equal(y, 2) assert_equal(y, 2)
@xfail # ( @xpassIfTorchDynamo # (
# reason="'searchsorted_out_cpu' not implemented for 'ComplexDouble'" # reason="'searchsorted_out_cpu' not implemented for 'ComplexDouble'"
# ) # )
def test_searchsorted_complex(self): def test_searchsorted_complex(self):
@ -1872,7 +1898,7 @@ class TestMethods(TestCase):
b = a.searchsorted([0, 1, 2], "right") b = a.searchsorted([0, 1, 2], "right")
assert_equal(b, [0, 2, 2]) assert_equal(b, [0, 2, 2])
@xfail # ( @xpassIfTorchDynamo # (
# reason="RuntimeError: self.storage_offset() must be divisible by 8" # reason="RuntimeError: self.storage_offset() must be divisible by 8"
# ) # )
def test_searchsorted_unaligned_array(self): def test_searchsorted_unaligned_array(self):
@ -1915,7 +1941,7 @@ class TestMethods(TestCase):
b = a.searchsorted(a, "right") b = a.searchsorted(a, "right")
assert_equal(b, out + 1) assert_equal(b, out + 1)
@xfail # (reason="ndarray ctor") @xpassIfTorchDynamo # (reason="ndarray ctor")
def test_searchsorted_type_specific_2(self): def test_searchsorted_type_specific_2(self):
# Test all type specific binary search functions # Test all type specific binary search functions
types = "".join((np.typecodes["AllInteger"], np.typecodes["AllFloat"], "?")) types = "".join((np.typecodes["AllInteger"], np.typecodes["AllFloat"], "?"))
@ -1951,7 +1977,7 @@ class TestMethods(TestCase):
# assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3]) # assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
# assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3]) # assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
@xfail # (reason="self.storage_offset() must be divisible by 8") @xpassIfTorchDynamo # (reason="self.storage_offset() must be divisible by 8")
def test_searchsorted_with_sorter(self): def test_searchsorted_with_sorter(self):
a = np.random.rand(300) a = np.random.rand(300)
s = a.argsort() s = a.argsort()
@ -2026,23 +2052,23 @@ class TestMethods(TestCase):
b = a.searchsorted(a, "right", s) b = a.searchsorted(a, "right", s)
assert_equal(b, out + 1) assert_equal(b, out + 1)
@xfail # (reason="TODO argpartition") @xpassIfTorchDynamo # (reason="TODO argpartition")
@parametrize("dtype", np.typecodes["All"]) @parametrize("dtype", "efdFDBbhil?")
def test_argpartition_out_of_range(self, dtype): def test_argpartition_out_of_range(self, dtype):
# Test out of range values in kth raise an error, gh-5469 # Test out of range values in kth raise an error, gh-5469
d = np.arange(10).astype(dtype=dtype) d = np.arange(10).astype(dtype=dtype)
assert_raises(ValueError, d.argpartition, 10) assert_raises(ValueError, d.argpartition, 10)
assert_raises(ValueError, d.argpartition, -11) assert_raises(ValueError, d.argpartition, -11)
@xfail # (reason="TODO partition") @xpassIfTorchDynamo # (reason="TODO partition")
@parametrize("dtype", np.typecodes["All"]) @parametrize("dtype", "efdFDBbhil?")
def test_partition_out_of_range(self, dtype): def test_partition_out_of_range(self, dtype):
# Test out of range values in kth raise an error, gh-5469 # Test out of range values in kth raise an error, gh-5469
d = np.arange(10).astype(dtype=dtype) d = np.arange(10).astype(dtype=dtype)
assert_raises(ValueError, d.partition, 10) assert_raises(ValueError, d.partition, 10)
assert_raises(ValueError, d.partition, -11) assert_raises(ValueError, d.partition, -11)
@xfail # (reason="TODO argpartition") @xpassIfTorchDynamo # (reason="TODO argpartition")
def test_argpartition_integer(self): def test_argpartition_integer(self):
# Test non-integer values in kth raise an error/ # Test non-integer values in kth raise an error/
d = np.arange(10) d = np.arange(10)
@ -2052,7 +2078,7 @@ class TestMethods(TestCase):
d_obj = np.arange(10, dtype=object) d_obj = np.arange(10, dtype=object)
assert_raises(TypeError, d_obj.argpartition, 9.0) assert_raises(TypeError, d_obj.argpartition, 9.0)
@xfail # (reason="TODO partition") @xpassIfTorchDynamo # (reason="TODO partition")
def test_partition_integer(self): def test_partition_integer(self):
# Test out of range values in kth raise an error, gh-5469 # Test out of range values in kth raise an error, gh-5469
d = np.arange(10) d = np.arange(10)
@ -2062,8 +2088,8 @@ class TestMethods(TestCase):
d_obj = np.arange(10, dtype=object) d_obj = np.arange(10, dtype=object)
assert_raises(TypeError, d_obj.partition, 9.0) assert_raises(TypeError, d_obj.partition, 9.0)
@xfail # (reason="TODO partition") @xpassIfTorchDynamo # (reason="TODO partition")
@parametrize("kth_dtype", np.typecodes["AllInteger"]) @parametrize("kth_dtype", "Bbhil")
def test_partition_empty_array(self, kth_dtype): def test_partition_empty_array(self, kth_dtype):
# check axis handling for multidimensional empty arrays # check axis handling for multidimensional empty arrays
kth = np.array(0, dtype=kth_dtype)[()] kth = np.array(0, dtype=kth_dtype)[()]
@ -2075,8 +2101,8 @@ class TestMethods(TestCase):
msg = "test empty array partition with axis=None" msg = "test empty array partition with axis=None"
assert_equal(np.partition(a, kth, axis=None), a.ravel(), msg) assert_equal(np.partition(a, kth, axis=None), a.ravel(), msg)
@xfail # (reason="TODO argpartition") @xpassIfTorchDynamo # (reason="TODO argpartition")
@parametrize("kth_dtype", np.typecodes["AllInteger"]) @parametrize("kth_dtype", "Bbhil")
def test_argpartition_empty_array(self, kth_dtype): def test_argpartition_empty_array(self, kth_dtype):
# check axis handling for multidimensional empty arrays # check axis handling for multidimensional empty arrays
kth = np.array(0, dtype=kth_dtype)[()] kth = np.array(0, dtype=kth_dtype)[()]
@ -2094,7 +2120,7 @@ class TestMethods(TestCase):
msg, msg,
) )
@xfail # (reason="TODO partition") @xpassIfTorchDynamo # (reason="TODO partition")
def test_partition(self): def test_partition(self):
d = np.arange(10) d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1) assert_raises(TypeError, np.partition, d, 2, kind=1)
@ -2355,7 +2381,7 @@ class TestMethods(TestCase):
) )
prev = k + 1 prev = k + 1
@xfail # (reason="TODO partition") @xpassIfTorchDynamo # (reason="TODO partition")
def test_partition_iterative(self): def test_partition_iterative(self):
d = np.arange(17) d = np.arange(17)
kth = (0, 1, 2, 429, 231) kth = (0, 1, 2, 429, 231)
@ -2422,7 +2448,7 @@ class TestMethods(TestCase):
for i in range(d0.shape[1]): for i in range(d0.shape[1]):
self.assert_partitioned(p[:, i], kth) self.assert_partitioned(p[:, i], kth)
@xfail # (reason="TODO partition") @xpassIfTorchDynamo # (reason="TODO partition")
def test_partition_fuzz(self): def test_partition_fuzz(self):
# a few rounds of random data testing # a few rounds of random data testing
for j in range(10, 30): for j in range(10, 30):
@ -2439,8 +2465,8 @@ class TestMethods(TestCase):
err_msg=f"data: {d!r}\n kth: {kth!r}", err_msg=f"data: {d!r}\n kth: {kth!r}",
) )
@xfail # (reason="TODO partition") @xpassIfTorchDynamo # (reason="TODO partition")
@parametrize("kth_dtype", np.typecodes["AllInteger"]) @parametrize("kth_dtype", "Bbhil")
def test_argpartition_gh5524(self, kth_dtype): def test_argpartition_gh5524(self, kth_dtype):
# A test for functionality of argpartition on lists. # A test for functionality of argpartition on lists.
kth = np.array(1, dtype=kth_dtype)[()] kth = np.array(1, dtype=kth_dtype)[()]
@ -2448,7 +2474,7 @@ class TestMethods(TestCase):
p = np.argpartition(d, kth) p = np.argpartition(d, kth)
self.assert_partitioned(np.array(d)[p], [1]) self.assert_partitioned(np.array(d)[p], [1])
@xfail # (reason="TODO order='F'") @xpassIfTorchDynamo # (reason="TODO order='F'")
def test_flatten(self): def test_flatten(self):
x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32) x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32) x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
@ -2591,7 +2617,7 @@ class TestMethods(TestCase):
a.dot(b=b, out=c) a.dot(b=b, out=c)
assert_equal(c, np.dot(a, b)) assert_equal(c, np.dot(a, b))
@xfail # (reason="_aligned_zeros") @xpassIfTorchDynamo # (reason="_aligned_zeros")
def test_dot_out_mem_overlap(self): def test_dot_out_mem_overlap(self):
np.random.seed(1) np.random.seed(1)
@ -2613,7 +2639,7 @@ class TestMethods(TestCase):
assert_raises(ValueError, np.dot, a, b, out=b[::2]) assert_raises(ValueError, np.dot, a, b, out=b[::2])
assert_raises(ValueError, np.dot, a, b, out=b.T) assert_raises(ValueError, np.dot, a, b, out=b.T)
@xfail # (reason="TODO: overlapping memor in matmul") @xpassIfTorchDynamo # (reason="TODO: overlapping memor in matmul")
def test_matmul_out(self): def test_matmul_out(self):
# overlapping memory # overlapping memory
a = np.arange(18).reshape(2, 3, 3) a = np.arange(18).reshape(2, 3, 3)
@ -2649,7 +2675,7 @@ class TestMethods(TestCase):
# Order of axis argument doesn't matter: # Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]]) assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
@xfail # (reason="no readonly views") @xpassIfTorchDynamo # (reason="no readonly views")
def test_diagonal_view_notwriteable(self): def test_diagonal_view_notwriteable(self):
a = np.eye(3).diagonal() a = np.eye(3).diagonal()
assert_(not a.flags.writeable) assert_(not a.flags.writeable)
@ -2740,7 +2766,7 @@ class TestMethods(TestCase):
bad_array = [1, 2, 3] bad_array = [1, 2, 3]
assert_raises(TypeError, np.put, bad_array, [0, 2], 5) assert_raises(TypeError, np.put, bad_array, [0, 2], 5)
@xfail # (reason="TODO: implement order='F'") @xpassIfTorchDynamo # (reason="TODO: implement order='F'")
def test_ravel(self): def test_ravel(self):
a = np.array([[0, 1], [2, 3]]) a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3]) assert_equal(a.ravel(), [0, 1, 2, 3])
@ -2943,10 +2969,10 @@ class TestMethods(TestCase):
] ]
for dt in dtypes: for dt in dtypes:
a = np.array([1, 2, 3], dtype=dt) a = np.array([1, 2, 3], dtype=dt)
assert_raises(ValueError, complex, a) assert_raises((TypeError, ValueError), complex, a)
c = np.array([(1.0, 3), (2e-3, 7)], dtype=dt) c = np.array([(1.0, 3), (2e-3, 7)], dtype=dt)
assert_raises(ValueError, complex, c) assert_raises((TypeError, ValueError), complex, c)
class TestCequenceMethods(TestCase): class TestCequenceMethods(TestCase):
@ -2986,7 +3012,7 @@ class TestBinop(TestCase):
assert_equal(b, 3) assert_equal(b, 3)
@xfail # (reason="TODO") @xpassIfTorchDynamo # (reason="TODO")
class TestSubscripting(TestCase): class TestSubscripting(TestCase):
def test_test_zero_rank(self): def test_test_zero_rank(self):
x = np.array([1, 2, 3]) x = np.array([1, 2, 3])
@ -3081,6 +3107,7 @@ class TestArgmaxArgminCommon(TestCase):
) )
), ),
) )
@skipif(numpy.__version__ < "1.23", reason="keepdims is new in numpy 1.22")
@parametrize("method", [np.argmax, np.argmin]) @parametrize("method", [np.argmax, np.argmin])
def test_np_argmin_argmax_keepdims(self, size, axis, method): def test_np_argmin_argmax_keepdims(self, size, axis, method):
arr = np.random.normal(size=size) arr = np.random.normal(size=size)
@ -3150,7 +3177,7 @@ class TestArgmaxArgminCommon(TestCase):
with pytest.raises(ValueError): with pytest.raises(ValueError):
method(arr.T, axis=axis, out=wrong_outarray, keepdims=True) method(arr.T, axis=axis, out=wrong_outarray, keepdims=True)
@xfail # (reason="TODO: implement choose") @xpassIfTorchDynamo # (reason="TODO: implement choose")
@parametrize("method", ["max", "min"]) @parametrize("method", ["max", "min"])
def test_all(self, method): def test_all(self, method):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8)) a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
@ -3433,7 +3460,7 @@ class TestArgmin(TestCase):
class TestMinMax(TestCase): class TestMinMax(TestCase):
@xfail @xpassIfTorchDynamo
def test_scalar(self): def test_scalar(self):
assert_raises(np.AxisError, np.amax, 1, 1) assert_raises(np.AxisError, np.amax, 1, 1)
assert_raises(np.AxisError, np.amin, 1, 1) assert_raises(np.AxisError, np.amin, 1, 1)
@ -3529,7 +3556,7 @@ class TestClip(TestCase):
assert_array_equal(result, expected) assert_array_equal(result, expected)
@xfail # (reason="TODO") @xpassIfTorchDynamo # (reason="TODO")
class TestCompress(TestCase): class TestCompress(TestCase):
def test_axis(self): def test_axis(self):
tgt = [[5, 6, 7, 8, 9]] tgt = [[5, 6, 7, 8, 9]]
@ -3553,7 +3580,7 @@ class TestCompress(TestCase):
assert_equal(out, 1) assert_equal(out, 1)
@xfail # (reason="TODO") @xpassIfTorchDynamo # (reason="TODO")
@instantiate_parametrized_tests @instantiate_parametrized_tests
class TestPutmask(TestCase): class TestPutmask(TestCase):
def tst_basic(self, x, T, mask, val): def tst_basic(self, x, T, mask, val):
@ -3567,7 +3594,7 @@ class TestPutmask(TestCase):
mask = x < 40 mask = x < 40
for val in [-100, 0, 15]: for val in [-100, 0, 15]:
for types in np.sctypes.values(): for types in "efdFDBbhil?":
for T in types: for T in types:
if T not in unchecked_types: if T not in unchecked_types:
if val < 0 and np.dtype(T).kind == "u": if val < 0 and np.dtype(T).kind == "u":
@ -3645,7 +3672,7 @@ class TestTake(TestCase):
def test_ip_types(self): def test_ip_types(self):
x = np.random.random(24) * 100 x = np.random.random(24) * 100
x = np.reshape(x, (2, 3, 4)) x = np.reshape(x, (2, 3, 4))
for types in np.sctypes.values(): for types in "efdFDBbhil?":
for T in types: for T in types:
self.tst_basic(x.copy().astype(T)) self.tst_basic(x.copy().astype(T))
@ -3656,14 +3683,14 @@ class TestTake(TestCase):
assert_raises(IndexError, np.take, x, [-3], axis=0) assert_raises(IndexError, np.take, x, [-3], axis=0)
assert_array_equal(np.take(x, [-1], axis=0)[0], x[1]) assert_array_equal(np.take(x, [-1], axis=0)[0], x[1])
@xfail # (reason="XXX: take(..., mode='clip')") @xpassIfTorchDynamo # (reason="XXX: take(..., mode='clip')")
def test_clip(self): def test_clip(self):
x = np.random.random(24) * 100 x = np.random.random(24) * 100
x = np.reshape(x, (2, 3, 4)) x = np.reshape(x, (2, 3, 4))
assert_array_equal(np.take(x, [-1], axis=0, mode="clip")[0], x[0]) assert_array_equal(np.take(x, [-1], axis=0, mode="clip")[0], x[0])
assert_array_equal(np.take(x, [2], axis=0, mode="clip")[0], x[1]) assert_array_equal(np.take(x, [2], axis=0, mode="clip")[0], x[1])
@xfail # (reason="XXX: take(..., mode='wrap')") @xpassIfTorchDynamo # (reason="XXX: take(..., mode='wrap')")
def test_wrap(self): def test_wrap(self):
x = np.random.random(24) * 100 x = np.random.random(24) * 100
x = np.reshape(x, (2, 3, 4)) x = np.reshape(x, (2, 3, 4))
@ -3671,7 +3698,7 @@ class TestTake(TestCase):
assert_array_equal(np.take(x, [2], axis=0, mode="wrap")[0], x[0]) assert_array_equal(np.take(x, [2], axis=0, mode="wrap")[0], x[0])
assert_array_equal(np.take(x, [3], axis=0, mode="wrap")[0], x[1]) assert_array_equal(np.take(x, [3], axis=0, mode="wrap")[0], x[1])
@xfail # (reason="XXX: take(mode='wrap')") @xpassIfTorchDynamo # (reason="XXX: take(mode='wrap')")
def test_out_overlap(self): def test_out_overlap(self):
# gh-6272 check overlap on out # gh-6272 check overlap on out
x = np.arange(5) x = np.arange(5)
@ -3688,7 +3715,7 @@ class TestTake(TestCase):
assert ret is out assert ret is out
@xfail # (reason="TODO") @xpassIfTorchDynamo # (reason="TODO")
@instantiate_parametrized_tests @instantiate_parametrized_tests
class TestLexsort(TestCase): class TestLexsort(TestCase):
@parametrize( @parametrize(
@ -4248,7 +4275,7 @@ class TestIO(TestCase):
assert_array_equal(res, expected) assert_array_equal(res, expected)
@xfail # (reason="TODO") @xpassIfTorchDynamo # (reason="TODO")
@instantiate_parametrized_tests @instantiate_parametrized_tests
class TestFromBuffer(TestCase): class TestFromBuffer(TestCase):
@parametrize( @parametrize(
@ -4261,7 +4288,10 @@ class TestFromBuffer(TestCase):
buf = x.tobytes() buf = x.tobytes()
assert_array_equal(np.frombuffer(buf, dtype=dt), x.flat) assert_array_equal(np.frombuffer(buf, dtype=dt), x.flat)
@parametrize("obj", [np.arange(10), "12345678"]) # @xpassIfTorchDynamo
@parametrize(
"obj", [np.arange(10), subtest("12345678", decorators=[xfailIfTorchDynamo])]
)
def test_array_base(self, obj): def test_array_base(self, obj):
# Objects (including NumPy arrays), which do not use the # Objects (including NumPy arrays), which do not use the
# `release_buffer` slot should be directly used as a base object. # `release_buffer` slot should be directly used as a base object.
@ -4275,6 +4305,7 @@ class TestFromBuffer(TestCase):
def test_empty(self): def test_empty(self):
assert_array_equal(np.frombuffer(b""), np.array([])) assert_array_equal(np.frombuffer(b""), np.array([]))
@skip("fails on CI, we are unlikely to implement this")
@skipif( @skipif(
IS_PYPY, IS_PYPY,
reason="PyPy's memoryview currently does not track exports. See: " reason="PyPy's memoryview currently does not track exports. See: "
@ -4634,7 +4665,7 @@ class TestStats(TestCase):
with assert_raises(np.AxisError): with assert_raises(np.AxisError):
np.arange(10).mean(axis=2) np.arange(10).mean(axis=2)
@xfail # (reason="implement mean(..., where=...)") @xpassIfTorchDynamo # (reason="implement mean(..., where=...)")
def test_mean_where(self): def test_mean_where(self):
a = np.arange(16).reshape((4, 4)) a = np.arange(16).reshape((4, 4))
wh_full = np.array( wh_full = np.array(
@ -4722,7 +4753,7 @@ class TestStats(TestCase):
with assert_raises(np.AxisError): with assert_raises(np.AxisError):
np.arange(10).var(axis=2) np.arange(10).var(axis=2)
@xfail # (reason="implement var(..., where=...)") @xpassIfTorchDynamo # (reason="implement var(..., where=...)")
def test_var_where(self): def test_var_where(self):
a = np.arange(25).reshape((5, 5)) a = np.arange(25).reshape((5, 5))
wh_full = np.array( wh_full = np.array(
@ -4767,7 +4798,7 @@ class TestStats(TestCase):
res = _std(mat, axis=axis) res = _std(mat, axis=axis)
assert_almost_equal(res, tgt) assert_almost_equal(res, tgt)
@xfail # (reason="implement std(..., where=...)") @xpassIfTorchDynamo # (reason="implement std(..., where=...)")
def test_std_where(self): def test_std_where(self):
a = np.arange(25).reshape((5, 5))[::-1] a = np.arange(25).reshape((5, 5))[::-1]
whf = np.array( whf = np.array(
@ -4837,7 +4868,7 @@ class TestVdot(TestCase):
assert_(np.isscalar(res)) assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True) assert_equal(np.vdot(b, b), True)
@xfail # (reason="implement order='F'") @xpassIfTorchDynamo # (reason="implement order='F'")
def test_vdot_array_order(self): def test_vdot_array_order(self):
a = np.array([[1, 2], [3, 4]], order="C") a = np.array([[1, 2], [3, 4]], order="C")
b = np.array([[1, 2], [3, 4]], order="F") b = np.array([[1, 2], [3, 4]], order="F")
@ -4863,7 +4894,7 @@ class TestVdot(TestCase):
assert_equal(np.vdot(a, b.copy()), np.vdot(a.flatten(), b.flatten())) assert_equal(np.vdot(a, b.copy()), np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy(), b), np.vdot(a.flatten(), b.flatten())) assert_equal(np.vdot(a.copy(), b), np.vdot(a.flatten(), b.flatten()))
@xfail # (reason="implement order='F'") @xpassIfTorchDynamo # (reason="implement order='F'")
def test_vdot_uncontiguous_2(self): def test_vdot_uncontiguous_2(self):
# test order='F' separately # test order='F' separately
for size in [2, 1000]: for size in [2, 1000]:
@ -5075,7 +5106,7 @@ class TestDot(TestCase):
r = np.empty((1024, 32), dtype=int) r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r) assert_raises(ValueError, dot, f, v, r)
@xfail # (reason="TODO order='F'") @xpassIfTorchDynamo # (reason="TODO order='F'")
def test_dot_array_order(self): def test_dot_array_order(self):
a = np.array([[1, 2], [3, 4]], order="C") a = np.array([[1, 2], [3, 4]], order="C")
b = np.array([[1, 2], [3, 4]], order="F") b = np.array([[1, 2], [3, 4]], order="F")
@ -5222,7 +5253,7 @@ class MatmulCommon:
res = self.matmul(*arg) res = self.matmul(*arg)
assert_(res.dtype == dt) assert_(res.dtype == dt)
@xfail # (reason="no scalars") @xpassIfTorchDynamo # (reason="no scalars")
def test_result_types_2(self): def test_result_types_2(self):
# in numpy, vector vector returns scalars # in numpy, vector vector returns scalars
# we return a 0D array instead # we return a 0D array instead
@ -5445,7 +5476,6 @@ class TestMatmul(MatmulCommon, TestCase):
# test out non-contiguous # test out non-contiguous
out = np.ones((5, 2, 2), dtype=float) out = np.ones((5, 2, 2), dtype=float)
c = self.matmul(a, b, out=out[..., 0]) c = self.matmul(a, b, out=out[..., 0])
assert c.tensor._base is out.tensor
assert_array_equal(c, tgt) assert_array_equal(c, tgt)
c = self.matmul(a, v, out=out[:, 0, 0]) c = self.matmul(a, v, out=out[:, 0, 0])
assert_array_equal(c, tgt_mv) assert_array_equal(c, tgt_mv)
@ -5462,6 +5492,16 @@ class TestMatmul(MatmulCommon, TestCase):
c = self.matmul(b.T, a.T, out=out.T) c = self.matmul(b.T, a.T, out=out.T)
assert_array_equal(out, tgt) assert_array_equal(out, tgt)
@xfailIfTorchDynamo
def test_out_contiguous_2(self):
a = np.ones((5, 2), dtype=float)
b = np.array([[1, 3], [5, 7]], dtype=float)
# test out non-contiguous
out = np.ones((5, 2, 2), dtype=float)
c = self.matmul(a, b, out=out[..., 0])
assert c.tensor._base is out.tensor
m1 = np.arange(15.0).reshape(5, 3) m1 = np.arange(15.0).reshape(5, 3)
m2 = np.arange(21.0).reshape(3, 7) m2 = np.arange(21.0).reshape(3, 7)
m3 = np.arange(30.0).reshape(5, 6)[:, ::2] # non-contiguous m3 = np.arange(30.0).reshape(5, 6)[:, ::2] # non-contiguous
@ -5591,7 +5631,7 @@ class TestMatmulOperator(MatmulCommon, TestCase):
def test_matmul_raises(self): def test_matmul_raises(self):
assert_raises((RuntimeError, TypeError), self.matmul, np.int8(5), np.int8(5)) assert_raises((RuntimeError, TypeError), self.matmul, np.int8(5), np.int8(5))
@xfail # (reason="torch supports inplace matmul, and so do we") @xpassIfTorchDynamo # (reason="torch supports inplace matmul, and so do we")
def test_matmul_inplace(self): def test_matmul_inplace(self):
# It would be nice to support in-place matmul eventually, but for now # It would be nice to support in-place matmul eventually, but for now
# we don't have a working implementation, so better just to error out # we don't have a working implementation, so better just to error out
@ -5604,7 +5644,7 @@ class TestMatmulOperator(MatmulCommon, TestCase):
assert_raises(TypeError, operator.imatmul, a, b) assert_raises(TypeError, operator.imatmul, a, b)
assert_raises(TypeError, exec, "a @= b", globals(), locals()) assert_raises(TypeError, exec, "a @= b", globals(), locals())
@xfail # (reason="matmul_axes") @xpassIfTorchDynamo # (reason="matmul_axes")
def test_matmul_axes(self): def test_matmul_axes(self):
a = np.arange(3 * 4 * 5).reshape(3, 4, 5) a = np.arange(3 * 4 * 5).reshape(3, 4, 5)
c = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (1, 2)]) c = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (1, 2)])
@ -5706,6 +5746,8 @@ class TestChoose(TestCase):
A = np.choose(self.ind, (self.x, self.y2)) A = np.choose(self.ind, (self.x, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]]) assert_equal(A, [[2, 2, 3], [2, 2, 3]])
# XXX: revisit xfails when NEP 50 lands in numpy
@skip(reason="XXX: revisit xfails when NEP 50 lands in numpy")
@parametrize( @parametrize(
"ops", "ops",
[ [
@ -5779,7 +5821,7 @@ class TestRepeat(TestCase):
NEIGH_MODE = {"zero": 0, "one": 1, "constant": 2, "circular": 3, "mirror": 4} NEIGH_MODE = {"zero": 0, "one": 1, "constant": 2, "circular": 3, "mirror": 4}
@xfail # (reason="TODO") @xpassIfTorchDynamo # (reason="TODO")
class TestWarnings(TestCase): class TestWarnings(TestCase):
def test_complex_warning(self): def test_complex_warning(self):
x = np.array([1, 2]) x = np.array([1, 2])
@ -5928,7 +5970,8 @@ class TestPEP3118Dtype(TestCase):
self._check("i:f0:", [("f0", "i")]) self._check("i:f0:", [("f0", "i")])
@xfail # (reason="TODO") @skipif(numpy.__version__ < "1.23", reason="CopyMode is new in NumPy 1.22")
@xpassIfTorchDynamo
@instantiate_parametrized_tests @instantiate_parametrized_tests
class TestArrayCreationCopyArgument(TestCase): class TestArrayCreationCopyArgument(TestCase):
class RaiseOnBool: class RaiseOnBool:
@ -6188,7 +6231,7 @@ class TestArrayAttributeDeletion(TestCase):
assert_raises(AttributeError, delattr, a, s) assert_raises(AttributeError, delattr, a, s)
@xfail # (reason="TODO") @xpassIfTorchDynamo # (reason="TODO")
@instantiate_parametrized_tests @instantiate_parametrized_tests
class TestArrayInterface(TestCase): class TestArrayInterface(TestCase):
class Foo: class Foo:
@ -6239,7 +6282,7 @@ class TestArrayInterface(TestCase):
class TestDelMisc(TestCase): class TestDelMisc(TestCase):
@xfail # (reason="TODO") @xpassIfTorchDynamo # (reason="TODO")
def test_flat_element_deletion(self): def test_flat_element_deletion(self):
it = np.ones(3).flat it = np.ones(3).flat
try: try:
@ -6608,58 +6651,6 @@ class TestWhere(TestCase):
np.where(a, x=a, y=a) np.where(a, x=a, y=a)
if not IS_PYPY:
# sys.getsizeof() is not valid on PyPy
@xfail # (reason="TODO")
class TestSizeOf(TestCase):
def test_empty_array(self):
pytest.xpass()
x = np.array([])
assert_(sys.getsizeof(x) > 0)
def check_array(self, dtype):
pytest.xpass()
elem_size = dtype(0).itemsize
for length in [10, 50, 100, 500]:
x = np.arange(length, dtype=dtype)
assert_(sys.getsizeof(x) > length * elem_size)
def test_array_int32(self):
self.check_array(np.int32)
def test_array_int64(self):
self.check_array(np.int64)
def test_array_float32(self):
self.check_array(np.float32)
def test_array_float64(self):
self.check_array(np.float64)
def test_view(self):
d = np.ones(100)
assert_(sys.getsizeof(d[...]) < sys.getsizeof(d))
def test_reshape(self):
d = np.ones(100)
assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
@_no_tracing
def test_resize(self):
d = np.ones(100)
old = sys.getsizeof(d)
d.resize(50)
assert_(old > sys.getsizeof(d))
d.resize(150)
assert_(old < sys.getsizeof(d))
def test_error(self):
pytest.xpass()
d = np.ones(100)
assert_raises(TypeError, d.__sizeof__, "a")
class TestHashing(TestCase): class TestHashing(TestCase):
def test_arrays_not_hashable(self): def test_arrays_not_hashable(self):
x = np.ones(3) x = np.ones(3)
@ -6671,7 +6662,7 @@ class TestHashing(TestCase):
class TestFormat(TestCase): class TestFormat(TestCase):
@xfail # (reason="TODO") @xpassIfTorchDynamo # (reason="TODO")
def test_0d(self): def test_0d(self):
a = np.array(np.pi) a = np.array(np.pi)
assert_equal(f"{a:0.3g}", "3.14") assert_equal(f"{a:0.3g}", "3.14")
@ -6704,7 +6695,7 @@ class TestWritebackIfCopy(TestCase):
res = np.argmin(mat, 0, out=out) res = np.argmin(mat, 0, out=out)
assert_equal(res, range(5)) assert_equal(res, range(5))
@xfail # (reason="XXX: place()") @xpassIfTorchDynamo # (reason="XXX: place()")
def test_insert_noncontiguous(self): def test_insert_noncontiguous(self):
a = np.arange(6).reshape(2, 3).T # force non-c-contiguous a = np.arange(6).reshape(2, 3).T # force non-c-contiguous
# uses arr_insert # uses arr_insert
@ -6719,7 +6710,7 @@ class TestWritebackIfCopy(TestCase):
np.put(a, [0, 2], [44, 55]) np.put(a, [0, 2], [44, 55])
assert_equal(a, np.array([[44, 3], [55, 4], [2, 5]])) assert_equal(a, np.array([[44, 3], [55, 4], [2, 5]]))
@xfail # (reason="XXX: putmask()") @xpassIfTorchDynamo # (reason="XXX: putmask()")
def test_putmask_noncontiguous(self): def test_putmask_noncontiguous(self):
a = np.arange(6).reshape(2, 3).T # force non-c-contiguous a = np.arange(6).reshape(2, 3).T # force non-c-contiguous
# uses arr_putmask # uses arr_putmask
@ -6739,7 +6730,7 @@ class TestWritebackIfCopy(TestCase):
np.choose(a, choices, out=out, mode="raise") np.choose(a, choices, out=out, mode="raise")
assert_equal(out, np.array([[10, -10, 10], [-10, 10, -10], [10, -10, 10]])) assert_equal(out, np.array([[10, -10, 10], [-10, 10, -10], [10, -10, 10]]))
@xfail # (reason="XXX: ndarray.flat") @xpassIfTorchDynamo # (reason="XXX: ndarray.flat")
def test_flatiter__array__(self): def test_flatiter__array__(self):
a = np.arange(9).reshape(3, 3) a = np.arange(9).reshape(3, 3)
b = a.T.flat b = a.T.flat
@ -6783,7 +6774,7 @@ class TestArange(TestCase):
assert_raises(TypeError, np.arange, step=3) assert_raises(TypeError, np.arange, step=3)
assert_raises(TypeError, np.arange, dtype="int64") assert_raises(TypeError, np.arange, dtype="int64")
@xfail # (reason="weird arange signature (optionals before required args)") @xpassIfTorchDynamo # (reason="weird arange signature (optionals before required args)")
def test_require_range_2(self): def test_require_range_2(self):
assert_raises(TypeError, np.arange, start=4) assert_raises(TypeError, np.arange, start=4)
@ -6829,19 +6820,13 @@ class TestArange(TestCase):
args[which] = np.float64(2.0) args[which] = np.float64(2.0)
assert np.arange(*args).dtype == np.float64 assert np.arange(*args).dtype == np.float64
# Cover stranger error path, test only to achieve code coverage!
args[which] = [None, []]
with pytest.raises((ValueError, RuntimeError)):
# Fails discovering start dtype
np.arange(*args)
@parametrize("dt", [np.float32, np.uint8, complex]) @parametrize("dt", [np.float32, np.uint8, complex])
def test_explicit_dtype(self, dt): def test_explicit_dtype(self, dt):
assert np.arange(5.0, dtype=dt).dtype == dt assert np.arange(5.0, dtype=dt).dtype == dt
class TestRichcompareScalar(TestCase): class TestRichcompareScalar(TestCase):
@xfail # (reason="comparison: builtin.bools or...?") @xpassIfTorchDynamo # (reason="comparison: builtin.bools or...?")
def test_richcompare_scalar_boolean_singleton_return(self): def test_richcompare_scalar_boolean_singleton_return(self):
# These are currently guaranteed to be the boolean singletons, but maybe # These are currently guaranteed to be the boolean singletons, but maybe
# returning NumPy booleans would also be OK: # returning NumPy booleans would also be OK:
@ -6851,7 +6836,7 @@ class TestRichcompareScalar(TestCase):
assert (np.int16(0) != "a") is True assert (np.int16(0) != "a") is True
@xfail # (reason="implement views/dtypes") @skip # (reason="implement views/dtypes")
class TestViewDtype(TestCase): class TestViewDtype(TestCase):
""" """
Verify that making a view of a non-contiguous array works as expected. Verify that making a view of a non-contiguous array works as expected.

View File

@ -7,19 +7,10 @@ import platform
import sys import sys
import warnings import warnings
import numpy
import pytest import pytest
import torch._numpy as np
from torch._numpy.random import rand, randint, randn
from torch._numpy.testing import (
assert_,
assert_allclose,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_equal,
assert_warns, # assert_array_max_ulp, HAS_REFCOUNT, IS_WASM
)
IS_WASM = False IS_WASM = False
HAS_REFCOUNT = True HAS_REFCOUNT = True
@ -36,8 +27,38 @@ from torch.testing._internal.common_utils import (
subtest, subtest,
TEST_WITH_TORCHDYNAMO, TEST_WITH_TORCHDYNAMO,
TestCase, TestCase,
xfailIfTorchDynamo,
xpassIfTorchDynamo,
) )
# If we are going to trace through these, we should use NumPy
# If testing on eager mode, we use torch._numpy
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy.random import rand, randint, randn
from numpy.testing import (
assert_,
assert_allclose,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_equal,
assert_warns, # assert_array_max_ulp, HAS_REFCOUNT, IS_WASM
)
else:
import torch._numpy as np
from torch._numpy.random import rand, randint, randn
from torch._numpy.testing import (
assert_,
assert_allclose,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_equal,
assert_warns, # assert_array_max_ulp, HAS_REFCOUNT, IS_WASM
)
skip = functools.partial(skipif, True) skip = functools.partial(skipif, True)
@ -108,7 +129,7 @@ class TestNonarrayArgs(TestCase):
tgt = [2, 5, 2, 3, 7, 2, 2] tgt = [2, 5, 2, 3, 7, 2, 2]
assert_equal(out, tgt) assert_equal(out, tgt)
@xfail # (reason="TODO implement compress(...)") @xpassIfTorchDynamo # (reason="TODO implement compress(...)")
def test_compress(self): def test_compress(self):
arr = [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] arr = [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
tgt = [[5, 6, 7, 8, 9]] tgt = [[5, 6, 7, 8, 9]]
@ -178,12 +199,12 @@ class TestNonarrayArgs(TestCase):
s = np.float64(1.0) s = np.float64(1.0)
assert_equal(s.round(), 1.0) assert_equal(s.round(), 1.0)
@xfail # (reason="scalar instances") @xpassIfTorchDynamo # (reason="scalar instances")
def test_round_2(self): def test_round_2(self):
s = np.float64(1.0) s = np.float64(1.0)
assert_(isinstance(s.round(), np.float64)) assert_(isinstance(s.round(), np.float64))
@xfail # (reason="scalar instances") @xpassIfTorchDynamo # (reason="scalar instances")
@parametrize( @parametrize(
"dtype", "dtype",
[ [
@ -206,7 +227,6 @@ class TestNonarrayArgs(TestCase):
assert_equal(round(s, None), 1) assert_equal(round(s, None), 1)
assert_equal(round(s, ndigits=None), 1) assert_equal(round(s, ndigits=None), 1)
@xfail # (reason="scalar instances")
@parametrize( @parametrize(
"val, ndigits", "val, ndigits",
[ [
@ -214,8 +234,14 @@ class TestNonarrayArgs(TestCase):
# 2**31 - 1, -1, marks=pytest.mark.xfail(reason="Out of range of int32") # 2**31 - 1, -1, marks=pytest.mark.xfail(reason="Out of range of int32")
# ), # ),
subtest((2**31 - 1, -1), decorators=[xfail]), subtest((2**31 - 1, -1), decorators=[xfail]),
(2**31 - 1, 1 - math.ceil(math.log10(2**31 - 1))), subtest(
(2**31 - 1, -math.ceil(math.log10(2**31 - 1))), (2**31 - 1, 1 - math.ceil(math.log10(2**31 - 1))),
decorators=[xpassIfTorchDynamo],
),
subtest(
(2**31 - 1, -math.ceil(math.log10(2**31 - 1))),
decorators=[xpassIfTorchDynamo],
),
], ],
) )
def test_dunder_round_edgecases(self, val, ndigits): def test_dunder_round_edgecases(self, val, ndigits):
@ -318,7 +344,7 @@ class TestNonarrayArgs(TestCase):
# assert_(w[0].category is RuntimeWarning) # assert_(w[0].category is RuntimeWarning)
@xfail # (reason="TODO") @xpassIfTorchDynamo # (reason="TODO")
class TestIsscalar(TestCase): class TestIsscalar(TestCase):
def test_isscalar(self): def test_isscalar(self):
assert_(np.isscalar(3.1)) assert_(np.isscalar(3.1))
@ -557,7 +583,7 @@ class TestBoolCmp(TestCase):
assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:]) assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:])
@xfail # (reason="TODO") @xpassIfTorchDynamo # (reason="TODO")
class TestSeterr(TestCase): class TestSeterr(TestCase):
def test_default(self): def test_default(self):
err = np.geterr() err = np.geterr()
@ -577,6 +603,7 @@ class TestSeterr(TestCase):
np.seterr(**old) np.seterr(**old)
assert_(np.geterr() == old) assert_(np.geterr() == old)
@xfail
@skipif(IS_WASM, reason="no wasm fp exception support") @skipif(IS_WASM, reason="no wasm fp exception support")
@skipif(platform.machine() == "armv5tel", reason="See gh-413.") @skipif(platform.machine() == "armv5tel", reason="See gh-413.")
def test_divide_err(self): def test_divide_err(self):
@ -821,7 +848,7 @@ class TestTypes(TestCase):
# assert_equal(b, [0.0, 1.5]) # assert_equal(b, [0.0, 1.5])
# assert_equal(b.dtype, np.dtype('f4')) # assert_equal(b.dtype, np.dtype('f4'))
@xfail # (reason="'Scalars do not upcast arrays' rule") @xpassIfTorchDynamo # (reason="'Scalars do not upcast arrays' rule")
def test_coercion_2(self): def test_coercion_2(self):
def res_type(a, b): def res_type(a, b):
return np.add(a, b).dtype return np.add(a, b).dtype
@ -872,7 +899,7 @@ class TestTypes(TestCase):
# Also test keyword arguments # Also test keyword arguments
assert_(np.can_cast(from_=np.int32, to=np.int64)) assert_(np.can_cast(from_=np.int32, to=np.int64))
@xfail # (reason="value-based casting?") @xpassIfTorchDynamo # (reason="value-based casting?")
def test_can_cast_values(self): def test_can_cast_values(self):
# gh-5917 # gh-5917
for dt in np.sctypes["int"] + np.sctypes["uint"]: for dt in np.sctypes["int"] + np.sctypes["uint"]:
@ -893,7 +920,8 @@ class NIterError(Exception):
pass pass
@xfail # (reason="TODO") @skip(reason="NP_VER: fails on CI")
@xpassIfTorchDynamo # (reason="TODO")
@instantiate_parametrized_tests @instantiate_parametrized_tests
class TestFromiter(TestCase): class TestFromiter(TestCase):
def makegen(self): def makegen(self):
@ -939,6 +967,7 @@ class TestFromiter(TestCase):
with pytest.raises(NIterError): with pytest.raises(NIterError):
np.fromiter(iterable, dtype=dtype, count=count) np.fromiter(iterable, dtype=dtype, count=count)
@skip(reason="NP_VER: fails on CI")
def test_empty_result(self): def test_empty_result(self):
class MyIter: class MyIter:
def __length_hint__(self): def __length_hint__(self):
@ -987,6 +1016,8 @@ class TestNonzeroAndCountNonzero(TestCase):
assert_equal(np.count_nonzero(np.array([1], dtype="?")), 1) assert_equal(np.count_nonzero(np.array([1], dtype="?")), 1)
assert_equal(np.nonzero(np.array([1])), ([0],)) assert_equal(np.nonzero(np.array([1])), ([0],))
@xfailIfTorchDynamo # numpy returns a python int, we return a 0D array
def test_nonzero_trivial_differs(self):
assert isinstance(np.count_nonzero([]), np.ndarray) assert isinstance(np.count_nonzero([]), np.ndarray)
def test_nonzero_zerod(self): def test_nonzero_zerod(self):
@ -996,6 +1027,8 @@ class TestNonzeroAndCountNonzero(TestCase):
assert_equal(np.count_nonzero(np.array(1)), 1) assert_equal(np.count_nonzero(np.array(1)), 1)
assert_equal(np.count_nonzero(np.array(1, dtype="?")), 1) assert_equal(np.count_nonzero(np.array(1, dtype="?")), 1)
@xfailIfTorchDynamo # numpy returns a python int, we return a 0D array
def test_nonzero_zerod_differs(self):
assert isinstance(np.count_nonzero(np.array(1)), np.ndarray) assert isinstance(np.count_nonzero(np.array(1)), np.ndarray)
def test_nonzero_onedim(self): def test_nonzero_onedim(self):
@ -1004,6 +1037,9 @@ class TestNonzeroAndCountNonzero(TestCase):
assert_equal(np.count_nonzero(x), 4) assert_equal(np.count_nonzero(x), 4)
assert_equal(np.nonzero(x), ([0, 2, 3, 6],)) assert_equal(np.nonzero(x), ([0, 2, 3, 6],))
@xfailIfTorchDynamo # numpy returns a python int, we return a 0D array
def test_nonzero_onedim_differs(self):
x = np.array([1, 0, 2, -1, 0, 0, 8])
assert isinstance(np.count_nonzero(x), np.ndarray) assert isinstance(np.count_nonzero(x), np.ndarray)
def test_nonzero_twodim(self): def test_nonzero_twodim(self):
@ -1053,7 +1089,7 @@ class TestNonzeroAndCountNonzero(TestCase):
assert_raises(np.AxisError, np.count_nonzero, m, axis=3) assert_raises(np.AxisError, np.count_nonzero, m, axis=3)
assert_raises(TypeError, np.count_nonzero, m, axis=np.array([[1], [2]])) assert_raises(TypeError, np.count_nonzero, m, axis=np.array([[1], [2]]))
@parametrize("typecode", np.typecodes["All"]) @parametrize("typecode", "efdFDBbhil?")
def test_count_nonzero_axis_all_dtypes(self, typecode): def test_count_nonzero_axis_all_dtypes(self, typecode):
# More thorough test that the axis argument is respected # More thorough test that the axis argument is respected
# for all dtypes and responds correctly when presented with # for all dtypes and responds correctly when presented with
@ -1111,7 +1147,7 @@ class TestIndex(TestCase):
assert_equal(c.dtype, np.dtype("int32")) assert_equal(c.dtype, np.dtype("int32"))
@xfail # (reason="TODO") @xpassIfTorchDynamo # (reason="TODO")
class TestBinaryRepr(TestCase): class TestBinaryRepr(TestCase):
def test_zero(self): def test_zero(self):
assert_equal(np.binary_repr(0), "0") assert_equal(np.binary_repr(0), "0")
@ -1149,7 +1185,7 @@ class TestBinaryRepr(TestCase):
assert_equal(np.binary_repr(np.int64(-(2**62)), width=64), "11" + "0" * 62) assert_equal(np.binary_repr(np.int64(-(2**62)), width=64), "11" + "0" * 62)
@xfail # (reason="TODO") @xpassIfTorchDynamo # (reason="TODO")
class TestBaseRepr(TestCase): class TestBaseRepr(TestCase):
def test_base3(self): def test_base3(self):
assert_equal(np.base_repr(3**5, 3), "100000") assert_equal(np.base_repr(3**5, 3), "100000")
@ -1361,7 +1397,7 @@ class TestClip(TestCase):
act = self.clip(a, m, M) act = self.clip(a, m, M)
assert_array_equal(ac, act) assert_array_equal(ac, act)
@xfail # (reason="byteorder not supported in torch") @xpassIfTorchDynamo # (reason="byteorder not supported in torch")
def test_simple_nonnative(self): def test_simple_nonnative(self):
# Test non native double input with scalar min/max. # Test non native double input with scalar min/max.
# Test native double input with non native double scalar min/max. # Test native double input with non native double scalar min/max.
@ -1381,7 +1417,7 @@ class TestClip(TestCase):
act = self.clip(a, m, M) act = self.clip(a, m, M)
assert_array_equal(ac, act) assert_array_equal(ac, act)
@xfail # (reason="clamp not supported for complex") @xpassIfTorchDynamo # (reason="clamp not supported for complex")
def test_simple_complex(self): def test_simple_complex(self):
# Test native complex input with native double scalar min/max. # Test native complex input with native double scalar min/max.
# Test native input with complex double scalar min/max. # Test native input with complex double scalar min/max.
@ -1434,8 +1470,14 @@ class TestClip(TestCase):
self.clip(a, m, M, act) self.clip(a, m, M, act)
assert_array_equal(ac, act) assert_array_equal(ac, act)
@xfail # (reason="casting not supported") # @xpassIfTorchDynamo # (reason="casting not supported")
@parametrize("casting", [None, "unsafe"]) @parametrize(
"casting",
[
subtest(None, decorators=[xfail]),
subtest("unsafe", decorators=[xpassIfTorchDynamo]),
],
)
def test_simple_int32_inout(self, casting): def test_simple_int32_inout(self, casting):
# Test native int32 input with double min/max and int32 out. # Test native int32 input with double min/max and int32 out.
a = self._generate_int32_data(self.nr, self.nc) a = self._generate_int32_data(self.nr, self.nc)
@ -1561,7 +1603,7 @@ class TestClip(TestCase):
act = self.clip(a, m * np.zeros(a.shape), M) act = self.clip(a, m * np.zeros(a.shape), M)
assert_array_equal(ac, act) assert_array_equal(ac, act)
@xfail # (reason="newbyteorder not supported") @xpassIfTorchDynamo # (reason="newbyteorder not supported")
def test_type_cast_06(self): def test_type_cast_06(self):
# Test native with NON native scalar min/max. # Test native with NON native scalar min/max.
a = self._generate_data(self.nr, self.nc) a = self._generate_data(self.nr, self.nc)
@ -1572,7 +1614,7 @@ class TestClip(TestCase):
ac = self.fastclip(a, m_s, M) ac = self.fastclip(a, m_s, M)
assert_array_equal(ac, act) assert_array_equal(ac, act)
@xfail # (reason="newbyteorder not supported") @xpassIfTorchDynamo # (reason="newbyteorder not supported")
def test_type_cast_07(self): def test_type_cast_07(self):
# Test NON native with native array min/max. # Test NON native with native array min/max.
a = self._generate_data(self.nr, self.nc) a = self._generate_data(self.nr, self.nc)
@ -1584,7 +1626,7 @@ class TestClip(TestCase):
ac = self.fastclip(a_s, m, M) ac = self.fastclip(a_s, m, M)
assert_array_equal(ac, act) assert_array_equal(ac, act)
@xfail # (reason="newbyteorder not supported") @xpassIfTorchDynamo # (reason="newbyteorder not supported")
def test_type_cast_08(self): def test_type_cast_08(self):
# Test NON native with native scalar min/max. # Test NON native with native scalar min/max.
a = self._generate_data(self.nr, self.nc) a = self._generate_data(self.nr, self.nc)
@ -1596,7 +1638,7 @@ class TestClip(TestCase):
act = a_s.clip(m, M) act = a_s.clip(m, M)
assert_array_equal(ac, act) assert_array_equal(ac, act)
@xfail # (reason="newbyteorder not supported") @xpassIfTorchDynamo # (reason="newbyteorder not supported")
def test_type_cast_09(self): def test_type_cast_09(self):
# Test native with NON native array min/max. # Test native with NON native array min/max.
a = self._generate_data(self.nr, self.nc) a = self._generate_data(self.nr, self.nc)
@ -1618,7 +1660,7 @@ class TestClip(TestCase):
ac = self.fastclip(a, m, M, out=b) ac = self.fastclip(a, m, M, out=b)
assert_array_equal(ac, act) assert_array_equal(ac, act)
@xfail # (reason="newbyteorder not supported") @xpassIfTorchDynamo # (reason="newbyteorder not supported")
def test_type_cast_11(self): def test_type_cast_11(self):
# Test non native with native scalar, min/max, out non native # Test non native with native scalar, min/max, out non native
a = self._generate_non_native_data(self.nr, self.nc) a = self._generate_non_native_data(self.nr, self.nc)
@ -2091,7 +2133,8 @@ class TestCreationFuncs(TestCase):
# Test ones, zeros, empty and full. # Test ones, zeros, empty and full.
def setUp(self): def setUp(self):
dtypes = {np.dtype(tp) for tp in itertools.chain(*np.sctypes.values())} # dtypes = {np.dtype(tp) for tp in itertools.chain(*np.sctypes.values())}
dtypes = {np.dtype(tp) for tp in "efdFDBbhil?"}
self.dtypes = dtypes self.dtypes = dtypes
self.orders = { self.orders = {
"C": "c_contiguous" "C": "c_contiguous"
@ -2464,7 +2507,7 @@ class TestArgwhere(TestCase):
assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]]) assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]])
@xfail # (reason="TODO") @xpassIfTorchDynamo # (reason="TODO")
class TestStringFunction(TestCase): class TestStringFunction(TestCase):
def test_set_string_function(self): def test_set_string_function(self):
a = np.array([1]) a = np.array([1])
@ -2570,7 +2613,7 @@ class TestRollaxis(TestCase):
assert_raises(np.AxisError, np.rollaxis, a, 4, 0) assert_raises(np.AxisError, np.rollaxis, a, 4, 0)
assert_raises(np.AxisError, np.rollaxis, a, 0, 5) assert_raises(np.AxisError, np.rollaxis, a, 0, 5)
@xfail # (reason="needs fancy indexing") @xpassIfTorchDynamo # (reason="needs fancy indexing")
def test_results(self): def test_results(self):
a = np.arange(1 * 2 * 3 * 4).reshape(1, 2, 3, 4).copy() a = np.arange(1 * 2 * 3 * 4).reshape(1, 2, 3, 4).copy()
aind = np.indices(a.shape) aind = np.indices(a.shape)
@ -2771,6 +2814,7 @@ class TestCross(TestCase):
for axisc in range(-2, 2): for axisc in range(-2, 2):
assert_equal(np.cross(u, u, axisc=axisc).shape, (3, 4)) assert_equal(np.cross(u, u, axisc=axisc).shape, (3, 4))
@skipif(numpy.__version__ < "1.24", reason="fix landed in NumPy 1.24")
def test_uint8_int32_mixed_dtypes(self): def test_uint8_int32_mixed_dtypes(self):
# regression test for gh-19138 # regression test for gh-19138
u = np.array([[195, 8, 9]], np.uint8) u = np.array([[195, 8, 9]], np.uint8)
@ -2827,7 +2871,7 @@ class TestIndices(TestCase):
assert_(arr.dtype == dtype) assert_(arr.dtype == dtype)
@xfail # (reason="TODO") @xpassIfTorchDynamo # (reason="TODO")
class TestRequire(TestCase): class TestRequire(TestCase):
flag_names = [ flag_names = [
"C", "C",
@ -2894,7 +2938,7 @@ class TestRequire(TestCase):
assert_raises(ValueError, np.require, a, None, ["C", "F"]) assert_raises(ValueError, np.require, a, None, ["C", "F"])
@xfail # (reason="TODO") @xpassIfTorchDynamo # (reason="TODO")
class TestBroadcast(TestCase): class TestBroadcast(TestCase):
def test_broadcast_in_args(self): def test_broadcast_in_args(self):
# gh-5881 # gh-5881
@ -2953,7 +2997,7 @@ class TestBroadcast(TestCase):
assert_raises(ValueError, np.broadcast, 1, **{"x": 1}) assert_raises(ValueError, np.broadcast, 1, **{"x": 1})
def test_shape_mismatch_error_message(self): def test_shape_mismatch_error_message(self):
with pytest.raises( with assert_raises(
ValueError, ValueError,
match=r"arg 0 with shape \(1, 3\) and " r"arg 2 with shape \(2,\)", match=r"arg 0 with shape \(1, 3\) and " r"arg 2 with shape \(2,\)",
): ):

View File

@ -4,22 +4,30 @@ import functools
import itertools import itertools
import sys import sys
from unittest import expectedFailure as xfail, skipIf as skipif from unittest import skipIf as skipif
import torch._numpy as np
from pytest import raises as assert_raises from pytest import raises as assert_raises
from torch._numpy.testing import assert_
from torch.testing._internal.common_utils import ( from torch.testing._internal.common_utils import (
instantiate_parametrized_tests, instantiate_parametrized_tests,
parametrize, parametrize,
run_tests, run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase, TestCase,
xpassIfTorchDynamo,
) )
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy.testing import assert_
else:
import torch._numpy as np
from torch._numpy.testing import assert_
skip = functools.partial(skipif, True) skip = functools.partial(skipif, True)
@xfail # ( @xpassIfTorchDynamo # (
# reason="We do not disctinguish between scalar and array types." # reason="We do not disctinguish between scalar and array types."
# " Thus, scalars can upcast arrays." # " Thus, scalars can upcast arrays."
# ) # )
@ -101,7 +109,7 @@ class TestIsSubDType(TestCase):
assert np.issubdtype(np.float32, "f") assert np.issubdtype(np.float32, "f")
@xfail # ( @xpassIfTorchDynamo # (
# reason="We do not have (or need) np.core.numerictypes." # reason="We do not have (or need) np.core.numerictypes."
# " Our type aliases are in _dtypes.py." # " Our type aliases are in _dtypes.py."
# ) # )

View File

@ -5,25 +5,33 @@ Test the scalar constructors, which also do type-coercion
""" """
import functools import functools
from unittest import expectedFailure as xfail, skipIf as skipif from unittest import skipIf as skipif
import pytest import pytest
import torch._numpy as np
from torch._numpy.testing import assert_almost_equal, assert_equal
from torch.testing._internal.common_utils import ( from torch.testing._internal.common_utils import (
instantiate_parametrized_tests, instantiate_parametrized_tests,
parametrize, parametrize,
run_tests, run_tests,
subtest, subtest,
TEST_WITH_TORCHDYNAMO,
TestCase, TestCase,
xpassIfTorchDynamo,
) )
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy.testing import assert_almost_equal, assert_equal
else:
import torch._numpy as np
from torch._numpy.testing import assert_almost_equal, assert_equal
skip = functools.partial(skipif, True) skip = functools.partial(skipif, True)
class TestFromString(TestCase): class TestFromString(TestCase):
@xfail # (reason="XXX: floats from strings") @xpassIfTorchDynamo # (reason="XXX: floats from strings")
def test_floating(self): def test_floating(self):
# Ticket #640, floats from string # Ticket #640, floats from string
fsingle = np.single("1.234") fsingle = np.single("1.234")
@ -31,7 +39,7 @@ class TestFromString(TestCase):
assert_almost_equal(fsingle, 1.234) assert_almost_equal(fsingle, 1.234)
assert_almost_equal(fdouble, 1.234) assert_almost_equal(fdouble, 1.234)
@xfail # (reason="XXX: floats from strings") @xpassIfTorchDynamo # (reason="XXX: floats from strings")
def test_floating_overflow(self): def test_floating_overflow(self):
"""Strings containing an unrepresentable float overflow""" """Strings containing an unrepresentable float overflow"""
fhalf = np.half("1e10000") fhalf = np.half("1e10000")

View File

@ -13,16 +13,24 @@ from unittest import skipIf as skipif, SkipTest
import pytest import pytest
import torch._numpy as np
from pytest import raises as assert_raises from pytest import raises as assert_raises
from torch._numpy.testing import assert_equal
from torch.testing._internal.common_utils import ( from torch.testing._internal.common_utils import (
instantiate_parametrized_tests, instantiate_parametrized_tests,
parametrize, parametrize,
run_tests, run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase, TestCase,
) )
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy.testing import assert_equal
else:
import torch._numpy as np
from torch._numpy.testing import assert_equal
skip = functools.partial(skipif, True) skip = functools.partial(skipif, True)

View File

@ -9,32 +9,48 @@ import warnings
from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest
import numpy
# from numpy._utils import _pep440 # from numpy._utils import _pep440
import pytest import pytest
from pytest import raises as assert_raises
# from hypothesis import given, settings # from hypothesis import given, settings
# from hypothesis.strategies import sampled_from # from hypothesis.strategies import sampled_from
# from hypothesis.extra import numpy as hynp # from hypothesis.extra import numpy as hynp
import torch._numpy as np
from pytest import raises as assert_raises
from torch._numpy.testing import (
_gen_alignment_data,
assert_,
assert_almost_equal,
assert_equal,
# assert_array_equal, suppress_warnings, _gen_alignment_data,
# assert_warns,
)
from torch.testing._internal.common_utils import ( from torch.testing._internal.common_utils import (
instantiate_parametrized_tests, instantiate_parametrized_tests,
parametrize, parametrize,
run_tests, run_tests,
slowTest as slow, slowTest as slow,
subtest, subtest,
TEST_WITH_TORCHDYNAMO,
TestCase, TestCase,
xpassIfTorchDynamo,
) )
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy.testing import (
_gen_alignment_data,
assert_,
assert_almost_equal,
assert_equal,
)
else:
import torch._numpy as np
from torch._numpy.testing import (
_gen_alignment_data,
assert_,
assert_almost_equal,
assert_equal,
# assert_array_equal, suppress_warnings, _gen_alignment_data,
# assert_warns,
)
skip = functools.partial(skipif, True) skip = functools.partial(skipif, True)
IS_PYPY = False IS_PYPY = False
@ -155,7 +171,7 @@ class TestBaseMath(TestCase):
np.add(2, inp2, out=out) np.add(2, inp2, out=out)
assert_almost_equal(out, exp1 + 2, err_msg=msg) assert_almost_equal(out, exp1 + 2, err_msg=msg)
@xfail # (reason="pytorch does not have .view") @xpassIfTorchDynamo # (reason="pytorch does not have .view")
def test_lower_align(self): def test_lower_align(self):
# check data that is not aligned to element size # check data that is not aligned to element size
# i.e doubles are aligned to 4 bytes on i386 # i.e doubles are aligned to 4 bytes on i386
@ -186,7 +202,8 @@ class TestPower(TestCase):
else: else:
assert_almost_equal(b, 6765201, err_msg=msg) assert_almost_equal(b, 6765201, err_msg=msg)
@xfail # (reason="Value-based casting: (2)**(-2) -> 0 in pytorch.") @skip(reason="NP_VER: fails on CI on older NumPy")
@xpassIfTorchDynamo # (reason="Value-based casting: (2)**(-2) -> 0 in pytorch.")
def test_integers_to_negative_integer_power(self): def test_integers_to_negative_integer_power(self):
# Note that the combination of uint64 with a signed integer # Note that the combination of uint64 with a signed integer
# has common type np.float64. The other combinations should all # has common type np.float64. The other combinations should all
@ -272,7 +289,8 @@ def _signs(dt):
@instantiate_parametrized_tests @instantiate_parametrized_tests
class TestModulus(TestCase): class TestModulus(TestCase):
def test_modulus_basic(self): def test_modulus_basic(self):
dt = np.typecodes["AllInteger"] + np.typecodes["Float"] # dt = np.typecodes["AllInteger"] + np.typecodes["Float"]
dt = "Bbhil" + "efd"
for op in [floordiv_and_mod, divmod]: for op in [floordiv_and_mod, divmod]:
for dt1, dt2 in itertools.product(dt, dt): for dt1, dt2 in itertools.product(dt, dt):
for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)): for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)):
@ -317,7 +335,8 @@ class TestModulus(TestCase):
def test_float_modulus_roundoff(self): def test_float_modulus_roundoff(self):
# gh-6127 # gh-6127
dt = np.typecodes["Float"] # dt = np.typecodes["Float"]
dt = "efd"
for op in [floordiv_and_mod, divmod]: for op in [floordiv_and_mod, divmod]:
for dt1, dt2 in itertools.product(dt, dt): for dt1, dt2 in itertools.product(dt, dt):
for sg1, sg2 in itertools.product((+1, -1), (+1, -1)): for sg1, sg2 in itertools.product((+1, -1), (+1, -1)):
@ -333,7 +352,7 @@ class TestModulus(TestCase):
else: else:
assert_(b > rem >= 0, msg) assert_(b > rem >= 0, msg)
@parametrize("dt", np.typecodes["Float"]) @parametrize("dt", "efd")
def test_float_modulus_corner_cases(self, dt): def test_float_modulus_corner_cases(self, dt):
if dt == "e": if dt == "e":
# FIXME: make xfail # FIXME: make xfail
@ -353,7 +372,7 @@ class TestModulus(TestCase):
# sup.filter(RuntimeWarning, "divide by zero encountered in floor_divide") # sup.filter(RuntimeWarning, "divide by zero encountered in floor_divide")
# sup.filter(RuntimeWarning, "divide by zero encountered in divmod") # sup.filter(RuntimeWarning, "divide by zero encountered in divmod")
# sup.filter(RuntimeWarning, "invalid value encountered in divmod") # sup.filter(RuntimeWarning, "invalid value encountered in divmod")
for dt in np.typecodes["Float"]: for dt in "efd":
fone = np.array(1.0, dtype=dt) fone = np.array(1.0, dtype=dt)
fzer = np.array(0.0, dtype=dt) fzer = np.array(0.0, dtype=dt)
finf = np.array(np.inf, dtype=dt) finf = np.array(np.inf, dtype=dt)
@ -451,7 +470,8 @@ class TestConversion(TestCase):
a = np.array(l, dtype=T) a = np.array(l, dtype=T)
assert_equal([int(_m) for _m in a], li) assert_equal([int(_m) for _m in a], li)
@xfail # (reason="pytorch does not emit this warning.") @skipif(numpy.__version__ < "1.24", reason="NP_VER: fails on NumPy 1.23.x")
@xpassIfTorchDynamo # (reason="pytorch does not emit this warning.")
def test_iinfo_long_values_1(self): def test_iinfo_long_values_1(self):
for code in "bBh": for code in "bBh":
with pytest.warns(DeprecationWarning): with pytest.warns(DeprecationWarning):
@ -475,7 +495,7 @@ class TestConversion(TestCase):
dtype(np.iinfo(dtype).max + 1) dtype(np.iinfo(dtype).max + 1)
for code in [np.int_, np.longlong]: for code in [np.int_, np.longlong]:
assert_raises(RuntimeError, overflow_error_func, code) assert_raises((OverflowError, RuntimeError), overflow_error_func, code)
def test_numpy_scalar_relational_operators(self): def test_numpy_scalar_relational_operators(self):
# All integer # All integer
@ -554,7 +574,7 @@ class TestConversion(TestCase):
# assert_equal( val, val2 ) # assert_equal( val, val2 )
@xfail # (reason="can delegate repr to pytorch") @xpassIfTorchDynamo # (reason="can delegate repr to pytorch")
class TestRepr(TestCase): class TestRepr(TestCase):
def _test_type_repr(self, t): def _test_type_repr(self, t):
finfo = np.finfo(t) finfo = np.finfo(t)
@ -789,7 +809,7 @@ def recursionlimit(n):
@instantiate_parametrized_tests @instantiate_parametrized_tests
class TestScalarOpsMisc(TestCase): class TestScalarOpsMisc(TestCase):
@xfail # (reason="pytorch does not warn on overflow") @xfail # (reason="pytorch does not warn on overflow")
@parametrize("dtype", np.typecodes["AllInteger"]) @parametrize("dtype", "Bbhil")
@parametrize( @parametrize(
"operation", "operation",
[ [
@ -807,7 +827,7 @@ class TestScalarOpsMisc(TestCase):
operation(min, max) operation(min, max)
@skip(reason="integer overflow UB: crashes pytorch under ASAN") @skip(reason="integer overflow UB: crashes pytorch under ASAN")
@parametrize("dtype", np.typecodes["Integer"]) @parametrize("dtype", "bhil")
@parametrize( @parametrize(
"operation", "operation",
[ [
@ -829,8 +849,9 @@ class TestScalarOpsMisc(TestCase):
with pytest.warns(RuntimeWarning, match="overflow encountered"): with pytest.warns(RuntimeWarning, match="overflow encountered"):
operation(min, neg_1) operation(min, neg_1)
@xfail # (reason="pytorch does not warn on overflow") @skipif(numpy.__version__ < "1.24", reason="NP_VER: fails on NumPy 1.23.x")
@parametrize("dtype", np.typecodes["UnsignedInteger"]) @xpassIfTorchDynamo # (reason="pytorch does not warn on overflow")
@parametrize("dtype", "B")
def test_scalar_unsigned_integer_overflow(self, dtype): def test_scalar_unsigned_integer_overflow(self, dtype):
val = np.dtype(dtype).type(8) val = np.dtype(dtype).type(8)
with pytest.warns(RuntimeWarning, match="overflow encountered"): with pytest.warns(RuntimeWarning, match="overflow encountered"):

View File

@ -4,30 +4,54 @@ import functools
from unittest import expectedFailure as xfail, skipIf as skipif from unittest import expectedFailure as xfail, skipIf as skipif
import pytest import numpy
import torch._numpy as np import pytest
from pytest import raises as assert_raises from pytest import raises as assert_raises
from torch._numpy import (
array,
atleast_1d,
atleast_2d,
atleast_3d,
AxisError,
concatenate,
hstack,
newaxis,
stack,
vstack,
)
from torch._numpy.testing import assert_, assert_array_equal, assert_equal
from torch.testing._internal.common_utils import ( from torch.testing._internal.common_utils import (
instantiate_parametrized_tests, instantiate_parametrized_tests,
parametrize, parametrize,
run_tests, run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase, TestCase,
xpassIfTorchDynamo,
) )
# If we are going to trace through these, we should use NumPy
# If testing on eager mode, we use torch._numpy
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy import (
array,
atleast_1d,
atleast_2d,
atleast_3d,
AxisError,
concatenate,
hstack,
newaxis,
stack,
vstack,
)
from numpy.testing import assert_, assert_array_equal, assert_equal
else:
import torch._numpy as np
from torch._numpy import (
array,
atleast_1d,
atleast_2d,
atleast_3d,
AxisError,
concatenate,
hstack,
newaxis,
stack,
vstack,
)
from torch._numpy.testing import assert_, assert_array_equal, assert_equal
skip = functools.partial(skipif, True) skip = functools.partial(skipif, True)
@ -178,6 +202,7 @@ class TestHstack(TestCase):
# with assert_warns(FutureWarning): # with assert_warns(FutureWarning):
hstack(x for x in np.ones((3, 2))) hstack(x for x in np.ones((3, 2)))
@skipif(numpy.__version__ < "1.24", reason="NP_VER: fails on NumPy 1.23.x")
def test_casting_and_dtype(self): def test_casting_and_dtype(self):
a = np.array([1, 2, 3]) a = np.array([1, 2, 3])
b = np.array([2.5, 3.5, 4.5]) b = np.array([2.5, 3.5, 4.5])
@ -232,6 +257,7 @@ class TestVstack(TestCase):
with pytest.raises(TypeError, match="arrays to stack must be"): with pytest.raises(TypeError, match="arrays to stack must be"):
vstack(np.arange(3) for _ in range(2)) vstack(np.arange(3) for _ in range(2))
@skipif(numpy.__version__ < "1.24", reason="casting kwarg is new in NumPy 1.24")
def test_casting_and_dtype(self): def test_casting_and_dtype(self):
a = np.array([1, 2, 3]) a = np.array([1, 2, 3])
b = np.array([2.5, 3.5, 4.5]) b = np.array([2.5, 3.5, 4.5])
@ -239,6 +265,7 @@ class TestVstack(TestCase):
expected_res = np.array([[1, 2, 3], [2, 3, 4]]) expected_res = np.array([[1, 2, 3], [2, 3, 4]])
assert_array_equal(res, expected_res) assert_array_equal(res, expected_res)
@skipif(numpy.__version__ < "1.24", reason="casting kwarg is new in NumPy 1.24")
def test_casting_and_dtype_type_error(self): def test_casting_and_dtype_type_error(self):
a = np.array([1, 2, 3]) a = np.array([1, 2, 3])
b = np.array([2.5, 3.5, 4.5]) b = np.array([2.5, 3.5, 4.5])
@ -332,7 +359,7 @@ class TestConcatenate(TestCase):
assert out is rout assert out is rout
assert np.all(r == rout) assert np.all(r == rout)
@xfail # (reason="concatenate(x, axis=None) relies on x being a sequence") @xpassIfTorchDynamo # (reason="concatenate(x, axis=None) relies on x being a sequence")
def test_large_concatenate_axis_None(self): def test_large_concatenate_axis_None(self):
# When no axis is given, concatenate uses flattened versions. # When no axis is given, concatenate uses flattened versions.
# This also had a bug with many arrays (see gh-5979). # This also had a bug with many arrays (see gh-5979).
@ -442,6 +469,7 @@ class TestConcatenate(TestCase):
@instantiate_parametrized_tests @instantiate_parametrized_tests
class TestStackMisc(TestCase): class TestStackMisc(TestCase):
@skipif(numpy.__version__ < "1.24", reason="NP_VER: fails on NumPy 1.23.x")
def test_stack(self): def test_stack(self):
# non-iterable input # non-iterable input
assert_raises(TypeError, stack, 1) assert_raises(TypeError, stack, 1)
@ -523,6 +551,7 @@ class TestStackMisc(TestCase):
with assert_raises(TypeError): with assert_raises(TypeError):
stack((a, b), dtype=np.int64, axis=1, casting="safe") stack((a, b), dtype=np.int64, axis=1, casting="safe")
@skipif(numpy.__version__ < "1.24", reason="NP_VER: fails on NumPy 1.23.x")
@parametrize("axis", [0]) @parametrize("axis", [0])
@parametrize("out_dtype", ["c8", "f4", "f8", "i8"]) # torch does not have ">f8", @parametrize("out_dtype", ["c8", "f4", "f8", "i8"]) # torch does not have ">f8",
@parametrize("casting", ["no", "equiv", "safe", "same_kind", "unsafe"]) @parametrize("casting", ["no", "equiv", "safe", "same_kind", "unsafe"])

View File

@ -5,10 +5,20 @@
Copied from fftpack.helper by Pearu Peterson, October 2005 Copied from fftpack.helper by Pearu Peterson, October 2005
""" """
import torch._numpy as np from torch.testing._internal.common_utils import (
from torch._numpy import fft, pi run_tests,
from torch._numpy.testing import assert_array_almost_equal TEST_WITH_TORCHDYNAMO,
from torch.testing._internal.common_utils import run_tests, TestCase TestCase,
)
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy import fft, pi
from numpy.testing import assert_array_almost_equal
else:
import torch._numpy as np
from torch._numpy import fft, pi
from torch._numpy.testing import assert_array_almost_equal
class TestFFTShift(TestCase): class TestFFTShift(TestCase):
@ -73,7 +83,10 @@ class TestFFTShift(TestCase):
def test_equal_to_original(self): def test_equal_to_original(self):
"""Test that the new (>=v1.15) implementation (see #10073) is equal to the original (<=v1.14)""" """Test that the new (>=v1.15) implementation (see #10073) is equal to the original (<=v1.14)"""
from torch._numpy import arange, asarray, concatenate, take if TEST_WITH_TORCHDYNAMO:
from numpy import arange, asarray, concatenate, take
else:
from torch._numpy import arange, asarray, concatenate, take
def original_fftshift(x, axes=None): def original_fftshift(x, axes=None):
"""How fftshift was implemented in v1.14""" """How fftshift was implemented in v1.14"""

View File

@ -6,19 +6,26 @@ import threading
from unittest import skipIf as skipif, SkipTest from unittest import skipIf as skipif, SkipTest
import pytest import pytest
import torch._numpy as np
from pytest import raises as assert_raises from pytest import raises as assert_raises
from torch._numpy.random import random
from torch._numpy.testing import assert_allclose, assert_array_equal # , IS_WASM
from torch.testing._internal.common_utils import ( from torch.testing._internal.common_utils import (
instantiate_parametrized_tests, instantiate_parametrized_tests,
parametrize, parametrize,
run_tests, run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase, TestCase,
) )
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy.random import random
from numpy.testing import assert_allclose # , IS_WASM
else:
import torch._numpy as np
from torch._numpy.random import random
from torch._numpy.testing import assert_allclose # , IS_WASM
skip = functools.partial(skipif, True) skip = functools.partial(skipif, True)
@ -62,10 +69,10 @@ class TestFFT1D(TestCase):
def test_ifft(self, norm): def test_ifft(self, norm):
x = random(30) + 1j * random(30) x = random(30) + 1j * random(30)
assert_allclose(x, np.fft.ifft(np.fft.fft(x, norm=norm), norm=norm), atol=1e-6) assert_allclose(x, np.fft.ifft(np.fft.fft(x, norm=norm), norm=norm), atol=1e-6)
# Ensure we get the correct error message # Ensure we get the correct error message
with pytest.raises( # NB: Exact wording differs slightly under Dynamo and in eager.
(ValueError, RuntimeError), match="Invalid number of data points" with pytest.raises((ValueError, RuntimeError), match="Invalid number of"):
):
np.fft.ifft([], norm=norm) np.fft.ifft([], norm=norm)
def test_fft2(self): def test_fft2(self):
@ -358,10 +365,13 @@ class TestFFTThreadSafe(TestCase):
[x.join() for x in t] [x.join() for x in t]
# Make sure all threads returned the correct value # Make sure all threads returned the correct value
for i in range(self.threads): for i in range(self.threads):
assert_array_equal( # under torch.dynamo `assert_array_equal` fails with relative errors of
# about 1.5e-14. Hence replace it with `assert_allclose(..., rtol=2e-14)`
assert_allclose(
q.get(timeout=5), q.get(timeout=5),
expected, expected,
"Function returned wrong value in multithreaded context", atol=2e-14
# msg="Function returned wrong value in multithreaded context",
) )
def test_fft(self): def test_fft(self):

View File

@ -1,15 +1,27 @@
# Owner(s): ["module: dynamo"] # Owner(s): ["module: dynamo"]
from unittest import expectedFailure as xfail, skipIf as skipif from unittest import skipIf as skipif
import torch._numpy as np from torch.testing._internal.common_utils import (
from torch._numpy.testing import assert_allclose, assert_array_equal run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
xpassIfTorchDynamo,
)
from torch.testing._internal.common_utils import run_tests, TestCase
# If we are going to trace through these, we should use NumPy
# If testing on eager mode, we use torch._numpy
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
else:
import torch._numpy as np
from torch._numpy.testing import assert_allclose, assert_array_equal
class TestConstant(TestCase): class TestConstant(TestCase):
@xfail # (reason="tuple values") @xpassIfTorchDynamo # (reason="tuple values")
def test_check_constant(self): def test_check_constant(self):
a = np.arange(100) a = np.arange(100)
a = np.pad(a, (25, 20), "constant", constant_values=(10, 20)) a = np.pad(a, (25, 20), "constant", constant_values=(10, 20))
@ -357,7 +369,7 @@ class TestConstant(TestCase):
) )
assert_allclose(test, expected) assert_allclose(test, expected)
@xfail # (reason="tuple values") @xpassIfTorchDynamo # (reason="tuple values")
def test_check_constant_float3(self): def test_check_constant_float3(self):
a = np.arange(100, dtype=float) a = np.arange(100, dtype=float)
a = np.pad(a, (25, 20), "constant", constant_values=(-1.1, -1.2)) a = np.pad(a, (25, 20), "constant", constant_values=(-1.1, -1.2))
@ -528,7 +540,7 @@ class TestConstant(TestCase):
) )
assert_allclose(test, expected) assert_allclose(test, expected)
@xfail # (reason="tuple values") @xpassIfTorchDynamo # (reason="tuple values")
def test_check_constant_pad_2d(self): def test_check_constant_pad_2d(self):
arr = np.arange(4).reshape(2, 2) arr = np.arange(4).reshape(2, 2)
test = np.lib.pad( test = np.lib.pad(

View File

@ -3,24 +3,39 @@
"""Test functions for 1D array set operations. """Test functions for 1D array set operations.
""" """
from unittest import expectedFailure as xfail from unittest import skipIf
import numpy
import torch._numpy as np
from pytest import raises as assert_raises from pytest import raises as assert_raises
from torch._numpy import unique
from torch._numpy.testing import assert_array_equal, assert_equal
from torch.testing._internal.common_utils import ( from torch.testing._internal.common_utils import (
instantiate_parametrized_tests, instantiate_parametrized_tests,
parametrize, parametrize,
run_tests, run_tests,
subtest,
TEST_WITH_TORCHDYNAMO,
TestCase, TestCase,
xfailIfTorchDynamo,
xpassIfTorchDynamo,
) )
@xfail # (reason="TODO") # If we are going to trace through these, we should use NumPy
# If testing on eager mode, we use torch._numpy
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy import ediff1d, in1d, intersect1d, setdiff1d, setxor1d, union1d, unique
from numpy.testing import assert_array_equal, assert_equal, assert_raises_regex
else:
import torch._numpy as np
from torch._numpy import unique
from torch._numpy.testing import assert_array_equal, assert_equal
@skipIf(numpy.__version__ < "1.24", reason="NP_VER: fails on NumPy 1.23.x")
@xpassIfTorchDynamo # (reason="TODO")
@instantiate_parametrized_tests @instantiate_parametrized_tests
class TestSetOps(TestCase): class TestSetOps(TestCase):
def test_intersect1d(self): def test_intersect1d(self):
@ -145,11 +160,14 @@ class TestSetOps(TestCase):
(np.array([1, 2, 3], dtype=np.int64), None, np.nan, "to_end"), (np.array([1, 2, 3], dtype=np.int64), None, np.nan, "to_end"),
# should fail because attempting # should fail because attempting
# to downcast to int type: # to downcast to int type:
( subtest(
np.array([1, 2, 3], dtype=np.int64), (
np.array([5, 7, 2], dtype=np.float32), np.array([1, 2, 3], dtype=np.int64),
None, np.array([5, 7, 2], dtype=np.float32),
"to_begin", None,
"to_begin",
),
decorators=[xfailIfTorchDynamo],
), ),
# should fail because attempting to cast # should fail because attempting to cast
# two special floating point values # two special floating point values
@ -205,6 +223,7 @@ class TestSetOps(TestCase):
assert_equal(actual, expected) assert_equal(actual, expected)
assert actual.dtype == expected.dtype assert actual.dtype == expected.dtype
@skipIf(True, reason="NP_VER: fails with NumPy 1.22.x")
@parametrize("kind", [None, "sort", "table"]) @parametrize("kind", [None, "sort", "table"])
def test_isin(self, kind): def test_isin(self, kind):
# the tests for in1d cover most of isin's behavior # the tests for in1d cover most of isin's behavior
@ -217,7 +236,7 @@ class TestSetOps(TestCase):
isin_slow = np.vectorize(_isin_slow, otypes=[bool], excluded={1}) isin_slow = np.vectorize(_isin_slow, otypes=[bool], excluded={1})
def assert_isin_equal(a, b): def assert_isin_equal(a, b):
x = isin(a, b, kind=kind) x = np.isin(a, b, kind=kind)
y = isin_slow(a, b) y = isin_slow(a, b)
assert_array_equal(x, y) assert_array_equal(x, y)
@ -444,7 +463,7 @@ class TestSetOps(TestCase):
a = np.array([0, 1, 2], dtype="timedelta64[s]") a = np.array([0, 1, 2], dtype="timedelta64[s]")
b = a b = a
# Make sure it raises a value error: # Make sure it raises a value error:
with pytest.raises(ValueError): with assert_raises(ValueError):
in1d(a, b, kind="table") in1d(a, b, kind="table")
@parametrize( @parametrize(
@ -475,7 +494,7 @@ class TestSetOps(TestCase):
) )
if expect_failure: if expect_failure:
with pytest.raises(RuntimeError, match="exceed the maximum"): with assert_raises(RuntimeError, match="exceed the maximum"):
in1d(ar1, ar2, kind=kind) in1d(ar1, ar2, kind=kind)
else: else:
assert_array_equal(in1d(ar1, ar2, kind=kind), expected) assert_array_equal(in1d(ar1, ar2, kind=kind), expected)
@ -744,7 +763,7 @@ class TestUnique(TestCase):
# assert_equal(a3_idx.dtype, np.intp) # assert_equal(a3_idx.dtype, np.intp)
# assert_equal(a3_inv.dtype, np.intp) # assert_equal(a3_inv.dtype, np.intp)
@xfail # (reason="unique with nans") @xpassIfTorchDynamo # (reason="unique with nans")
def test_unique_1d_2(self): def test_unique_1d_2(self):
# test for ticket 2111 - float # test for ticket 2111 - float
a = [2.0, np.nan, 1.0, np.nan] a = [2.0, np.nan, 1.0, np.nan]
@ -790,7 +809,7 @@ class TestUnique(TestCase):
assert_array_equal(unique(inp, axis=0), unique(inp_arr, axis=0), msg) assert_array_equal(unique(inp, axis=0), unique(inp_arr, axis=0), msg)
assert_array_equal(unique(inp, axis=1), unique(inp_arr, axis=1), msg) assert_array_equal(unique(inp, axis=1), unique(inp_arr, axis=1), msg)
@xfail # _run_axis_tests xfails with the message @xpassIfTorchDynamo # _run_axis_tests xfails with the message
# torch has different unique ordering behaviour" # torch has different unique ordering behaviour"
def test_unique_axis(self): def test_unique_axis(self):
types = [] types = []
@ -816,7 +835,7 @@ class TestUnique(TestCase):
uniq = unique(x, axis=axis) uniq = unique(x, axis=axis)
assert_array_equal(uniq, [1, 2, 3, 4]) assert_array_equal(uniq, [1, 2, 3, 4])
@xfail # (reason="unique / return_index") @xpassIfTorchDynamo # (reason="unique / return_index")
def test_unique_axis_zeros(self): def test_unique_axis_zeros(self):
# issue 15559 # issue 15559
single_zero = np.empty(shape=(2, 0), dtype=np.int8) single_zero = np.empty(shape=(2, 0), dtype=np.int8)
@ -923,7 +942,8 @@ class TestUnique(TestCase):
msg = "Unique's return_counts=True failed with axis=1" msg = "Unique's return_counts=True failed with axis=1"
assert_array_equal(cnt, np.array([2, 1, 1]), msg) assert_array_equal(cnt, np.array([2, 1, 1]), msg)
@xfail # (reason="unique / return_index / nans") @skipIf(True, reason="NP_VER: fails on CI with older NumPy")
@xpassIfTorchDynamo # (reason="unique / return_index / nans")
def test_unique_nanequals(self): def test_unique_nanequals(self):
# issue 20326 # issue 20326
a = np.array([1, 1, np.nan, np.nan, np.nan]) a = np.array([1, 1, np.nan, np.nan, np.nan])

View File

@ -11,29 +11,21 @@ from unittest import expectedFailure as xfail, skipIf as skipif
import hypothesis import hypothesis
import hypothesis.strategies as st import hypothesis.strategies as st
import pytest
import torch._numpy as np import numpy
import pytest
from hypothesis.extra.numpy import arrays from hypothesis.extra.numpy import arrays
from pytest import raises as assert_raises from pytest import raises as assert_raises
from torch._numpy.testing import (
assert_,
assert_allclose, # IS_PYPY,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_equal,
assert_raises_regex,
assert_warns,
suppress_warnings, # HAS_REFCOUNT, IS_WASM
)
from torch.testing._internal.common_utils import ( from torch.testing._internal.common_utils import (
instantiate_parametrized_tests, instantiate_parametrized_tests,
parametrize, parametrize,
run_tests, run_tests,
subtest, subtest,
TEST_WITH_TORCHDYNAMO,
TestCase, TestCase,
xpassIfTorchDynamo,
) )
skip = functools.partial(skipif, True) skip = functools.partial(skipif, True)
@ -47,25 +39,79 @@ IS_PYPY = False
# from numpy lib import digitize, piecewise, trapz, select, trim_zeros, interp # from numpy lib import digitize, piecewise, trapz, select, trim_zeros, interp
from numpy.lib import delete, extract, insert, msort, place, setxor1d, unwrap, vectorize from numpy.lib import delete, extract, insert, msort, place, setxor1d, unwrap, vectorize
from torch._numpy import (
angle, # If we are going to trace through these, we should use NumPy
bartlett, # If testing on eager mode, we use torch._numpy
blackman, if TEST_WITH_TORCHDYNAMO:
corrcoef, import numpy as np
cov, from numpy import (
diff, angle,
flipud, bartlett,
gradient, blackman,
hamming, corrcoef,
hanning, cov,
i0, diff,
kaiser, digitize,
meshgrid, flipud,
sinc, gradient,
unique, hamming,
) hanning,
from torch._numpy._util import normalize_axis_tuple i0,
from torch._numpy.random import rand interp,
kaiser,
meshgrid,
sinc,
trapz,
trim_zeros,
unique,
)
from numpy.core.numeric import normalize_axis_tuple
from numpy.random import rand
from numpy.testing import (
assert_,
assert_allclose, # IS_PYPY,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_equal,
assert_raises_regex,
assert_warns,
suppress_warnings, # HAS_REFCOUNT, IS_WASM
)
else:
import torch._numpy as np
from torch._numpy import (
angle,
bartlett,
blackman,
corrcoef,
cov,
diff,
flipud,
gradient,
hamming,
hanning,
i0,
kaiser,
meshgrid,
sinc,
unique,
)
from torch._numpy._util import normalize_axis_tuple
from torch._numpy.random import rand
from torch._numpy.testing import (
assert_,
assert_allclose, # IS_PYPY,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_equal,
assert_raises_regex,
assert_warns,
suppress_warnings, # HAS_REFCOUNT, IS_WASM
)
def get_mat(n): def get_mat(n):
@ -251,7 +297,7 @@ class TestCopy(TestCase):
assert_equal(a[0, 0], 1) assert_equal(a[0, 0], 1)
assert_equal(a_copy[0, 0], 10) assert_equal(a_copy[0, 0], 10)
@xfail # (reason="order='F' not implemented") @xpassIfTorchDynamo # (reason="order='F' not implemented")
def test_order(self): def test_order(self):
# It turns out that people rely on np.copy() preserving order by # It turns out that people rely on np.copy() preserving order by
# default; changing this broke scikit-learn: # default; changing this broke scikit-learn:
@ -477,7 +523,7 @@ class TestSelect(TestCase):
select(conditions, choices) select(conditions, choices)
@xfail # (reason="TODO: implement") @xpassIfTorchDynamo # (reason="TODO: implement")
@instantiate_parametrized_tests @instantiate_parametrized_tests
class TestInsert(TestCase): class TestInsert(TestCase):
def test_basic(self): def test_basic(self):
@ -795,7 +841,7 @@ class TestDiff(TestCase):
assert_raises(np.AxisError, diff, x, append=0, axis=3) assert_raises(np.AxisError, diff, x, append=0, axis=3)
@xfail # (reason="TODO: implement") @xpassIfTorchDynamo # (reason="TODO: implement")
@instantiate_parametrized_tests @instantiate_parametrized_tests
class TestDelete(TestCase): class TestDelete(TestCase):
def setUp(self): def setUp(self):
@ -867,7 +913,9 @@ class TestDelete(TestCase):
with pytest.raises(IndexError): with pytest.raises(IndexError):
np.delete([0, 1, 2], np.array([], dtype=float)) np.delete([0, 1, 2], np.array([], dtype=float))
@parametrize("indexer", [np.array([1]), [1]]) @parametrize(
"indexer", [subtest(np.array([1]), name="array([1])"), subtest([1], name="[1]")]
)
def test_single_item_array(self, indexer): def test_single_item_array(self, indexer):
a_del_int = delete(self.a, 1) a_del_int = delete(self.a, 1)
a_del = delete(self.a, indexer) a_del = delete(self.a, indexer)
@ -1142,7 +1190,7 @@ class TestAngle(TestCase):
assert_array_almost_equal(z, zo, 11) assert_array_almost_equal(z, zo, 11)
@xfail # (reason="trim_zeros not implemented") @xpassIfTorchDynamo
@instantiate_parametrized_tests @instantiate_parametrized_tests
class TestTrimZeros(TestCase): class TestTrimZeros(TestCase):
a = np.array([0, 0, 1, 0, 2, 3, 4, 0]) a = np.array([0, 0, 1, 0, 2, 3, 4, 0])
@ -1151,7 +1199,11 @@ class TestTrimZeros(TestCase):
# d = a.astype(object) # d = a.astype(object)
def values(self): def values(self):
attr_names = ("a", "b", "c", "d") attr_names = (
"a",
"b",
"c",
) # "d")
return (getattr(self, name) for name in attr_names) return (getattr(self, name) for name in attr_names)
def test_basic(self): def test_basic(self):
@ -1210,7 +1262,7 @@ class TestTrimZeros(TestCase):
assert isinstance(res, list) assert isinstance(res, list)
@xfail # (reason="TODO: implement") @xpassIfTorchDynamo # (reason="TODO: implement")
class TestExtins(TestCase): class TestExtins(TestCase):
def test_basic(self): def test_basic(self):
a = np.array([1, 3, 2, 1, 2, 3, 3]) a = np.array([1, 3, 2, 1, 2, 3, 3])
@ -1612,7 +1664,7 @@ class TestVectorize(TestCase):
f(x) f(x)
@xfail # (reason="TODO: implement") @xpassIfTorchDynamo # (reason="TODO: implement")
class TestDigitize(TestCase): class TestDigitize(TestCase):
def test_forward(self): def test_forward(self):
x = np.arange(-6, 5) x = np.arange(-6, 5)
@ -1716,7 +1768,9 @@ class TestUnwrap(TestCase):
@instantiate_parametrized_tests @instantiate_parametrized_tests
class TestFilterwindows(TestCase): class TestFilterwindows(TestCase):
@parametrize("dtype", np.typecodes["AllInteger"] + np.typecodes["Float"]) @parametrize(
"dtype", "Bbhil" + "efd"
) # np.typecodes["AllInteger"] + np.typecodes["Float"])
@parametrize("M", [0, 1, 10]) @parametrize("M", [0, 1, 10])
def test_hanning(self, dtype: str, M: int) -> None: def test_hanning(self, dtype: str, M: int) -> None:
scalar = M scalar = M
@ -1736,7 +1790,9 @@ class TestFilterwindows(TestCase):
else: else:
assert_almost_equal(np.sum(w, axis=0), 4.500, 4) assert_almost_equal(np.sum(w, axis=0), 4.500, 4)
@parametrize("dtype", np.typecodes["AllInteger"] + np.typecodes["Float"]) @parametrize(
"dtype", "Bbhil" + "efd"
) # np.typecodes["AllInteger"] + np.typecodes["Float"])
@parametrize("M", [0, 1, 10]) @parametrize("M", [0, 1, 10])
def test_hamming(self, dtype: str, M: int) -> None: def test_hamming(self, dtype: str, M: int) -> None:
scalar = M scalar = M
@ -1756,7 +1812,9 @@ class TestFilterwindows(TestCase):
else: else:
assert_almost_equal(np.sum(w, axis=0), 4.9400, 4) assert_almost_equal(np.sum(w, axis=0), 4.9400, 4)
@parametrize("dtype", np.typecodes["AllInteger"] + np.typecodes["Float"]) @parametrize(
"dtype", "Bbhil" + "efd"
) # np.typecodes["AllInteger"] + np.typecodes["Float"])
@parametrize("M", [0, 1, 10]) @parametrize("M", [0, 1, 10])
def test_bartlett(self, dtype: str, M: int) -> None: def test_bartlett(self, dtype: str, M: int) -> None:
scalar = M scalar = M
@ -1776,7 +1834,9 @@ class TestFilterwindows(TestCase):
else: else:
assert_almost_equal(np.sum(w, axis=0), 4.4444, 4) assert_almost_equal(np.sum(w, axis=0), 4.4444, 4)
@parametrize("dtype", np.typecodes["AllInteger"] + np.typecodes["Float"]) @parametrize(
"dtype", "Bbhil" + "efd"
) # np.typecodes["AllInteger"] + np.typecodes["Float"])
@parametrize("M", [0, 1, 10]) @parametrize("M", [0, 1, 10])
def test_blackman(self, dtype: str, M: int) -> None: def test_blackman(self, dtype: str, M: int) -> None:
scalar = M scalar = M
@ -1796,7 +1856,9 @@ class TestFilterwindows(TestCase):
else: else:
assert_almost_equal(np.sum(w, axis=0), 3.7800, 4) assert_almost_equal(np.sum(w, axis=0), 3.7800, 4)
@parametrize("dtype", np.typecodes["AllInteger"] + np.typecodes["Float"]) @parametrize(
"dtype", "Bbhil" + "efd"
) # np.typecodes["AllInteger"] + np.typecodes["Float"])
@parametrize("M", [0, 1, 10]) @parametrize("M", [0, 1, 10])
def test_kaiser(self, dtype: str, M: int) -> None: def test_kaiser(self, dtype: str, M: int) -> None:
scalar = M scalar = M
@ -1817,7 +1879,7 @@ class TestFilterwindows(TestCase):
assert_almost_equal(np.sum(w, axis=0), 10, 15) assert_almost_equal(np.sum(w, axis=0), 10, 15)
@xfail # (reason="TODO: implement") @xpassIfTorchDynamo # (reason="TODO: implement")
class TestTrapz(TestCase): class TestTrapz(TestCase):
def test_simple(self): def test_simple(self):
x = np.arange(-10, 10, 0.1) x = np.arange(-10, 10, 0.1)
@ -1886,13 +1948,13 @@ class TestUnique(TestCase):
assert_(unique(np.array([1, 1, 1, 1, 1])) == np.array([1])) assert_(unique(np.array([1, 1, 1, 1, 1])) == np.array([1]))
@xfail # (reason="unique not implemented for 'ComplexDouble'") @xpassIfTorchDynamo # (reason="unique not implemented for 'ComplexDouble'")
def test_simple_complex(self): def test_simple_complex(self):
x = np.array([5 + 6j, 1 + 1j, 1 + 10j, 10, 5 + 6j]) x = np.array([5 + 6j, 1 + 1j, 1 + 10j, 10, 5 + 6j])
assert_(np.all(unique(x) == [1 + 1j, 1 + 10j, 5 + 6j, 10])) assert_(np.all(unique(x) == [1 + 1j, 1 + 10j, 5 + 6j, 10]))
@xfail # (reason="TODO: implement") @xpassIfTorchDynamo # (reason="TODO: implement")
class TestCheckFinite(TestCase): class TestCheckFinite(TestCase):
def test_simple(self): def test_simple(self):
a = [1, 2, 3] a = [1, 2, 3]
@ -2537,7 +2599,19 @@ class TestBincount(TestCase):
np.bincount(vals) np.bincount(vals)
@xfail # (reason="TODO: implement") parametrize_interp_sc = parametrize(
"sc",
[
subtest(lambda x: np.float_(x), name="real"),
subtest(lambda x: _make_complex(x, 0), name="complex-real"),
subtest(lambda x: _make_complex(0, x), name="complex-imag"),
subtest(lambda x: _make_complex(x, np.multiply(x, -2)), name="complex-both"),
],
)
@xpassIfTorchDynamo # (reason="TODO: implement")
@instantiate_parametrized_tests
class TestInterp(TestCase): class TestInterp(TestCase):
def test_exceptions(self): def test_exceptions(self):
assert_raises(ValueError, interp, 0, [], []) assert_raises(ValueError, interp, 0, [], [])
@ -2612,19 +2686,7 @@ class TestInterp(TestCase):
fp = [1, 2, np.nan, 4] fp = [1, 2, np.nan, 4]
assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.nan, np.nan, 4]) assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.nan, np.nan, 4])
@pytest.fixture( @parametrize_interp_sc
params=[
lambda x: np.float_(x),
lambda x: _make_complex(x, 0),
lambda x: _make_complex(0, x),
lambda x: _make_complex(x, np.multiply(x, -2)),
],
ids=["real", "complex-real", "complex-imag", "complex-both"],
)
def sc(self, request):
"""scale function used by the below tests"""
return request.param
def test_non_finite_any_nan(self, sc): def test_non_finite_any_nan(self, sc):
"""test that nans are propagated""" """test that nans are propagated"""
assert_equal(np.interp(0.5, [np.nan, 1], sc([0, 10])), sc(np.nan)) assert_equal(np.interp(0.5, [np.nan, 1], sc([0, 10])), sc(np.nan))
@ -2632,6 +2694,7 @@ class TestInterp(TestCase):
assert_equal(np.interp(0.5, [0, 1], sc([np.nan, 10])), sc(np.nan)) assert_equal(np.interp(0.5, [0, 1], sc([np.nan, 10])), sc(np.nan))
assert_equal(np.interp(0.5, [0, 1], sc([0, np.nan])), sc(np.nan)) assert_equal(np.interp(0.5, [0, 1], sc([0, np.nan])), sc(np.nan))
@parametrize_interp_sc
def test_non_finite_inf(self, sc): def test_non_finite_inf(self, sc):
"""Test that interp between opposite infs gives nan""" """Test that interp between opposite infs gives nan"""
assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([0, 10])), sc(np.nan)) assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([0, 10])), sc(np.nan))
@ -2641,6 +2704,7 @@ class TestInterp(TestCase):
# unless the y values are equal # unless the y values are equal
assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([10, 10])), sc(10)) assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([10, 10])), sc(10))
@parametrize_interp_sc
def test_non_finite_half_inf_xf(self, sc): def test_non_finite_half_inf_xf(self, sc):
"""Test that interp where both axes have a bound at inf gives nan""" """Test that interp where both axes have a bound at inf gives nan"""
assert_equal(np.interp(0.5, [-np.inf, 1], sc([-np.inf, 10])), sc(np.nan)) assert_equal(np.interp(0.5, [-np.inf, 1], sc([-np.inf, 10])), sc(np.nan))
@ -2652,6 +2716,7 @@ class TestInterp(TestCase):
assert_equal(np.interp(0.5, [0, +np.inf], sc([0, -np.inf])), sc(np.nan)) assert_equal(np.interp(0.5, [0, +np.inf], sc([0, -np.inf])), sc(np.nan))
assert_equal(np.interp(0.5, [0, +np.inf], sc([0, +np.inf])), sc(np.nan)) assert_equal(np.interp(0.5, [0, +np.inf], sc([0, +np.inf])), sc(np.nan))
@parametrize_interp_sc
def test_non_finite_half_inf_x(self, sc): def test_non_finite_half_inf_x(self, sc):
"""Test interp where the x axis has a bound at inf""" """Test interp where the x axis has a bound at inf"""
assert_equal(np.interp(0.5, [-np.inf, -np.inf], sc([0, 10])), sc(10)) assert_equal(np.interp(0.5, [-np.inf, -np.inf], sc([0, 10])), sc(10))
@ -2659,6 +2724,7 @@ class TestInterp(TestCase):
assert_equal(np.interp(0.5, [0, +np.inf], sc([0, 10])), sc(0)) assert_equal(np.interp(0.5, [0, +np.inf], sc([0, 10])), sc(0))
assert_equal(np.interp(0.5, [+np.inf, +np.inf], sc([0, 10])), sc(0)) assert_equal(np.interp(0.5, [+np.inf, +np.inf], sc([0, 10])), sc(0))
@parametrize_interp_sc
def test_non_finite_half_inf_f(self, sc): def test_non_finite_half_inf_f(self, sc):
"""Test interp where the f axis has a bound at inf""" """Test interp where the f axis has a bound at inf"""
assert_equal(np.interp(0.5, [0, 1], sc([0, -np.inf])), sc(-np.inf)) assert_equal(np.interp(0.5, [0, 1], sc([0, -np.inf])), sc(-np.inf))
@ -2786,7 +2852,7 @@ class TestPercentile(TestCase):
x = np.array([[1, 1, 1], [1, 1, 1], [4, 4, 3], [1, 1, 1], [1, 1, 1]]) x = np.array([[1, 1, 1], [1, 1, 1], [4, 4, 3], [1, 1, 1], [1, 1, 1]])
assert_array_equal(np.percentile(x, 50, axis=0), [1, 1, 1]) assert_array_equal(np.percentile(x, 50, axis=0), [1, 1, 1])
@xfail # (reason="TODO: implement") @xpassIfTorchDynamo # (reason="TODO: implement")
@parametrize("dtype", np.typecodes["Float"]) @parametrize("dtype", np.typecodes["Float"])
def test_linear_nan_1D(self, dtype): def test_linear_nan_1D(self, dtype):
# METHOD 1 of H&F # METHOD 1 of H&F
@ -2796,14 +2862,14 @@ class TestPercentile(TestCase):
np.testing.assert_equal(res.dtype, arr.dtype) np.testing.assert_equal(res.dtype, arr.dtype)
H_F_TYPE_CODES = [ H_F_TYPE_CODES = [
(int_type, np.float64) for int_type in np.typecodes["AllInteger"] (int_type, np.float64) for int_type in "Bbhil" # np.typecodes["AllInteger"]
] + [ ] + [
(np.float16, np.float16), (np.float16, np.float16),
(np.float32, np.float32), (np.float32, np.float32),
(np.float64, np.float64), (np.float64, np.float64),
] ]
@xfail # (reason="TODO: implement percentile interpolations") @skipif(numpy.__version__ < "1.24", reason="NEP 50 is new in 1.24")
@parametrize("input_dtype, expected_dtype", H_F_TYPE_CODES) @parametrize("input_dtype, expected_dtype", H_F_TYPE_CODES)
@parametrize( @parametrize(
"method, expected", "method, expected",
@ -3076,7 +3142,7 @@ class TestPercentile(TestCase):
b = np.percentile([2, 3, 4, 1], [50], overwrite_input=True) b = np.percentile([2, 3, 4, 1], [50], overwrite_input=True)
assert_equal(b, np.array([2.5])) assert_equal(b, np.array([2.5]))
@xfail # (reason="pytorch percentile does not support tuple axes.") @xpassIfTorchDynamo # (reason="pytorch percentile does not support tuple axes.")
def test_extended_axis(self): def test_extended_axis(self):
o = np.random.normal(size=(71, 23)) o = np.random.normal(size=(71, 23))
x = np.dstack([o] * 10) x = np.dstack([o] * 10)
@ -3165,6 +3231,7 @@ class TestPercentile(TestCase):
np.percentile(d, [1, 7], axis=(0, 3), keepdims=True).shape, (2, 1, 5, 7, 1) np.percentile(d, [1, 7], axis=(0, 3), keepdims=True).shape, (2, 1, 5, 7, 1)
) )
@skipif(numpy.__version__ < "1.24", reason="NP_VER: fails on NumPy 1.23.x")
@parametrize( @parametrize(
"q", "q",
[ [
@ -3172,7 +3239,7 @@ class TestPercentile(TestCase):
subtest( subtest(
[1, 7], [1, 7],
decorators=[ decorators=[
xfail, xpassIfTorchDynamo,
], ],
), ),
], ],
@ -3186,13 +3253,13 @@ class TestPercentile(TestCase):
subtest( subtest(
(0, 1), (0, 1),
decorators=[ decorators=[
xfail, xpassIfTorchDynamo,
], ],
), ),
subtest( subtest(
(-3, -1), (-3, -1),
decorators=[ decorators=[
xfail, xpassIfTorchDynamo,
], ],
), ),
], ],
@ -3242,7 +3309,7 @@ class TestPercentile(TestCase):
assert_equal(np.percentile(d, 1, out=o), o) assert_equal(np.percentile(d, 1, out=o), o)
assert_equal(np.percentile(d, 1, method="nearest", out=o), o) assert_equal(np.percentile(d, 1, method="nearest", out=o), o)
@xfail # (reason="np.percentile undocumented nan weirdness") @xpassIfTorchDynamo # (reason="np.percentile undocumented nan weirdness")
def test_nan_behavior(self): def test_nan_behavior(self):
a = np.arange(24, dtype=float) a = np.arange(24, dtype=float)
a[2] = np.nan a[2] = np.nan
@ -3335,7 +3402,7 @@ class TestQuantile(TestCase):
assert_equal(np.quantile(x, 1), 3.5) assert_equal(np.quantile(x, 1), 3.5)
assert_equal(np.quantile(x, 0.5), 1.75) assert_equal(np.quantile(x, 0.5), 1.75)
@xfail # (reason="quantile w/integers or bools") @xpassIfTorchDynamo # (reason="quantile w/integers or bools")
def test_correct_quantile_value(self): def test_correct_quantile_value(self):
a = np.array([True]) a = np.array([True])
tf_quant = np.quantile(True, False) tf_quant = np.quantile(True, False)
@ -3394,8 +3461,8 @@ class TestQuantile(TestCase):
np.quantile(np.arange(100.0), p, method="midpoint") np.quantile(np.arange(100.0), p, method="midpoint")
assert_array_equal(p, p0) assert_array_equal(p, p0)
@xfail # (reason="TODO: make quantile preserve integers") @xpassIfTorchDynamo # (reason="TODO: make quantile preserve integers")
@parametrize("dtype", np.typecodes["AllInteger"]) @parametrize("dtype", "Bbhil") # np.typecodes["AllInteger"])
def test_quantile_preserve_int_type(self, dtype): def test_quantile_preserve_int_type(self, dtype):
res = np.quantile(np.array([1, 2], dtype=dtype), [0.5], method="nearest") res = np.quantile(np.array([1, 2], dtype=dtype), [0.5], method="nearest")
assert res.dtype == dtype assert res.dtype == dtype
@ -3406,50 +3473,50 @@ class TestQuantile(TestCase):
subtest( subtest(
"inverted_cdf", "inverted_cdf",
decorators=[ decorators=[
xfail, xpassIfTorchDynamo,
], ],
), ),
subtest( subtest(
"averaged_inverted_cdf", "averaged_inverted_cdf",
decorators=[ decorators=[
xfail, xpassIfTorchDynamo,
], ],
), ),
subtest( subtest(
"closest_observation", "closest_observation",
decorators=[ decorators=[
xfail, xpassIfTorchDynamo,
], ],
), ),
subtest( subtest(
"interpolated_inverted_cdf", "interpolated_inverted_cdf",
decorators=[ decorators=[
xfail, xpassIfTorchDynamo,
], ],
), ),
subtest( subtest(
"hazen", "hazen",
decorators=[ decorators=[
xfail, xpassIfTorchDynamo,
], ],
), ),
subtest( subtest(
"weibull", "weibull",
decorators=[ decorators=[
xfail, xpassIfTorchDynamo,
], ],
), ),
"linear", "linear",
subtest( subtest(
"median_unbiased", "median_unbiased",
decorators=[ decorators=[
xfail, xpassIfTorchDynamo,
], ],
), ),
subtest( subtest(
"normal_unbiased", "normal_unbiased",
decorators=[ decorators=[
xfail, xpassIfTorchDynamo,
], ],
), ),
"nearest", "nearest",
@ -3517,7 +3584,7 @@ class TestMedian(TestCase):
a = np.array([0.0444502, 0.141249, 0.0463301]) a = np.array([0.0444502, 0.141249, 0.0463301])
assert_equal(a[-1], np.median(a)) assert_equal(a[-1], np.median(a))
@xfail # (reason="median: scalar output vs 0-dim") @xpassIfTorchDynamo # (reason="median: scalar output vs 0-dim")
def test_basic_2(self): def test_basic_2(self):
# check array scalar result # check array scalar result
a = np.array([0.0444502, 0.141249, 0.0463301]) a = np.array([0.0444502, 0.141249, 0.0463301])
@ -3626,7 +3693,7 @@ class TestMedian(TestCase):
b[1, 2] = np.nan b[1, 2] = np.nan
assert_equal(np.median(a, 1), b) assert_equal(np.median(a, 1), b)
@xfail # (reason="median: does not support tuple axes") @xpassIfTorchDynamo # (reason="median: does not support tuple axes")
def test_nan_behavior_2(self): def test_nan_behavior_2(self):
a = np.arange(24, dtype=float).reshape(2, 3, 4) a = np.arange(24, dtype=float).reshape(2, 3, 4)
a[1, 2, 3] = np.nan a[1, 2, 3] = np.nan
@ -3638,7 +3705,7 @@ class TestMedian(TestCase):
b[2] = np.nan b[2] = np.nan
assert_equal(np.median(a, (0, 2)), b) assert_equal(np.median(a, (0, 2)), b)
@xfail # (reason="median: scalar vs 0-dim") @xpassIfTorchDynamo # (reason="median: scalar vs 0-dim")
def test_nan_behavior_3(self): def test_nan_behavior_3(self):
a = np.arange(24, dtype=float).reshape(2, 3, 4) a = np.arange(24, dtype=float).reshape(2, 3, 4)
a[1, 2, 3] = np.nan a[1, 2, 3] = np.nan
@ -3647,7 +3714,7 @@ class TestMedian(TestCase):
# no axis # no axis
assert_equal(np.median(a).ndim, 0) assert_equal(np.median(a).ndim, 0)
@xfail # (reason="median: torch.quantile does not handle empty tensors") @xpassIfTorchDynamo # (reason="median: torch.quantile does not handle empty tensors")
@skipif(IS_WASM, reason="fp errors don't work correctly") @skipif(IS_WASM, reason="fp errors don't work correctly")
def test_empty(self): def test_empty(self):
# mean(empty array) emits two warnings: empty slice and divide by 0 # mean(empty array) emits two warnings: empty slice and divide by 0
@ -3678,7 +3745,7 @@ class TestMedian(TestCase):
assert_equal(np.median(a, axis=2), b) assert_equal(np.median(a, axis=2), b)
assert_(w[0].category is RuntimeWarning) assert_(w[0].category is RuntimeWarning)
@xfail # (reason="median: tuple axes not implemented") @xpassIfTorchDynamo # (reason="median: tuple axes not implemented")
def test_extended_axis(self): def test_extended_axis(self):
o = np.random.normal(size=(71, 23)) o = np.random.normal(size=(71, 23))
x = np.dstack([o] * 10) x = np.dstack([o] * 10)
@ -3728,7 +3795,7 @@ class TestMedian(TestCase):
d = np.ones((3, 5, 7, 11)) d = np.ones((3, 5, 7, 11))
assert_equal(np.median(d, axis=None, keepdims=True).shape, (1, 1, 1, 1)) assert_equal(np.median(d, axis=None, keepdims=True).shape, (1, 1, 1, 1))
@xfail # (reason="median: tuple axis") @xpassIfTorchDynamo # (reason="median: tuple axis")
def test_keepdims_2(self): def test_keepdims_2(self):
d = np.ones((3, 5, 7, 11)) d = np.ones((3, 5, 7, 11))
assert_equal(np.median(d, axis=(0, 1), keepdims=True).shape, (1, 1, 7, 11)) assert_equal(np.median(d, axis=(0, 1), keepdims=True).shape, (1, 1, 7, 11))
@ -3737,6 +3804,7 @@ class TestMedian(TestCase):
assert_equal(np.median(d, axis=(0, 1, 2, 3), keepdims=True).shape, (1, 1, 1, 1)) assert_equal(np.median(d, axis=(0, 1, 2, 3), keepdims=True).shape, (1, 1, 1, 1))
assert_equal(np.median(d, axis=(0, 1, 3), keepdims=True).shape, (1, 1, 7, 1)) assert_equal(np.median(d, axis=(0, 1, 3), keepdims=True).shape, (1, 1, 7, 1))
@skipif(numpy.__version__ < "1.24", reason="NP_VER: fails on NumPy 1.23.x")
@parametrize( @parametrize(
"axis", "axis",
[ [
@ -3746,13 +3814,13 @@ class TestMedian(TestCase):
subtest( subtest(
(0, 1), (0, 1),
decorators=[ decorators=[
xfail, xpassIfTorchDynamo,
], ],
), ),
subtest( subtest(
(-3, -1), (-3, -1),
decorators=[ decorators=[
xfail, xpassIfTorchDynamo,
], ],
), ),
], ],
@ -3772,7 +3840,7 @@ class TestMedian(TestCase):
assert_equal(result.shape, shape_out) assert_equal(result.shape, shape_out)
@xfail # (reason="TODO: implement") @xpassIfTorchDynamo # (reason="TODO: implement")
@instantiate_parametrized_tests @instantiate_parametrized_tests
class TestSortComplex(TestCase): class TestSortComplex(TestCase):
@parametrize( @parametrize(

View File

@ -3,32 +3,46 @@
# from numpy.testing._private.utils import requires_memory # from numpy.testing._private.utils import requires_memory
import functools import functools
from unittest import expectedFailure as xfail, skipIf from unittest import skipIf
import pytest
import torch._numpy as np
from pytest import raises as assert_raises from pytest import raises as assert_raises
from torch._numpy import histogram, histogramdd
# from numpy.lib.histograms import histogram, histogramdd, histogram_bin_edges skip = functools.partial(skipIf, True)
from torch._numpy.testing import (
assert_,
assert_allclose,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_equal,
# assert_array_max_ulp, #assert_raises_regex, suppress_warnings,
)
from torch.testing._internal.common_utils import ( from torch.testing._internal.common_utils import (
instantiate_parametrized_tests, instantiate_parametrized_tests,
parametrize, parametrize,
run_tests, run_tests,
slowTest as slow, slowTest as slow,
TEST_WITH_TORCHDYNAMO,
TestCase, TestCase,
xpassIfTorchDynamo,
) )
skip = functools.partial(skipIf, True) if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy import histogram, histogram_bin_edges, histogramdd
from numpy.testing import (
assert_,
assert_allclose,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_equal,
# assert_array_max_ulp, #assert_raises_regex, suppress_warnings,
)
else:
import torch._numpy as np
from torch._numpy import histogram, histogramdd
from torch._numpy.testing import (
assert_,
assert_allclose,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_equal,
# assert_array_max_ulp, #assert_raises_regex, suppress_warnings,
)
class TestHistogram(TestCase): class TestHistogram(TestCase):
@ -189,7 +203,7 @@ class TestHistogram(TestCase):
) )
assert_almost_equal(a, [0.2, 0.1, 0.1, 0.075]) assert_almost_equal(a, [0.2, 0.1, 0.1, 0.075])
@xfail # (reason="histogram complex weights") @xpassIfTorchDynamo # (reason="histogram complex weights")
def test_exotic_weights(self): def test_exotic_weights(self):
# Test the use of weights that are not integer or floats, but e.g. # Test the use of weights that are not integer or floats, but e.g.
# complex numbers or object types. # complex numbers or object types.
@ -251,7 +265,7 @@ class TestHistogram(TestCase):
with assert_raises((RuntimeError, ValueError)): with assert_raises((RuntimeError, ValueError)):
np.histogram(vals, range=[0.1, 0.01]) np.histogram(vals, range=[0.1, 0.01])
@xfail # (reason="edge cases") @xpassIfTorchDynamo # (reason="edge cases")
def test_bin_edge_cases(self): def test_bin_edge_cases(self):
# Ensure that floating-point computations correctly place edge cases. # Ensure that floating-point computations correctly place edge cases.
arr = np.array([337, 404, 739, 806, 1007, 1811, 2012]) arr = np.array([337, 404, 739, 806, 1007, 1811, 2012])
@ -275,7 +289,7 @@ class TestHistogram(TestCase):
with assert_raises((RuntimeError, ValueError)): with assert_raises((RuntimeError, ValueError)):
np.histogram(vals, bins=bins) np.histogram(vals, bins=bins)
@xfail # (reason="no uint64") @xpassIfTorchDynamo # (reason="no uint64")
def test_unsigned_monotonicity_check(self): def test_unsigned_monotonicity_check(self):
# Ensures ValueError is raised if bins not increasing monotonically # Ensures ValueError is raised if bins not increasing monotonically
# when bins contain unsigned values (see #9222) # when bins contain unsigned values (see #9222)
@ -301,7 +315,7 @@ class TestHistogram(TestCase):
np.histogram([np.array(0.5) for i in range(10)] + [0.500000000000001]) np.histogram([np.array(0.5) for i in range(10)] + [0.500000000000001])
np.histogram([np.array(0.5) for i in range(10)] + [0.5]) np.histogram([np.array(0.5) for i in range(10)] + [0.5])
@xfail # (reason="bins='auto'") @xpassIfTorchDynamo # (reason="bins='auto'")
def test_some_nan_values(self): def test_some_nan_values(self):
# gh-7503 # gh-7503
one_nan = np.array([0, 1, np.nan]) one_nan = np.array([0, 1, np.nan])
@ -339,7 +353,7 @@ class TestHistogram(TestCase):
self.do_signed_overflow_bounds(np.short) self.do_signed_overflow_bounds(np.short)
self.do_signed_overflow_bounds(np.intc) self.do_signed_overflow_bounds(np.intc)
@xfail # (reason="int->float conversin loses precision") @xpassIfTorchDynamo # (reason="int->float conversin loses precision")
def test_signed_overflow_bounds_2(self): def test_signed_overflow_bounds_2(self):
self.do_signed_overflow_bounds(np.int_) self.do_signed_overflow_bounds(np.int_)
self.do_signed_overflow_bounds(np.longlong) self.do_signed_overflow_bounds(np.longlong)
@ -382,14 +396,14 @@ class TestHistogram(TestCase):
self.do_precision_lower_bound(float_small, float_large) self.do_precision_lower_bound(float_small, float_large)
self.do_precision_upper_bound(float_small, float_large) self.do_precision_upper_bound(float_small, float_large)
@xfail # (reason="mixed dtypes") @xpassIfTorchDynamo # (reason="mixed dtypes")
def test_precision(self): def test_precision(self):
# not looping results in a useful stack trace upon failure # not looping results in a useful stack trace upon failure
self.do_precision(np.half, np.single) self.do_precision(np.half, np.single)
self.do_precision(np.half, np.double) self.do_precision(np.half, np.double)
self.do_precision(np.single, np.double) self.do_precision(np.single, np.double)
@xfail # (reason="histogram_bin_edges") @xpassIfTorchDynamo # (reason="histogram_bin_edges")
def test_histogram_bin_edges(self): def test_histogram_bin_edges(self):
hist, e = histogram([1, 2, 3, 4], [1, 2]) hist, e = histogram([1, 2, 3, 4], [1, 2])
edges = histogram_bin_edges([1, 2, 3, 4], [1, 2]) edges = histogram_bin_edges([1, 2, 3, 4], [1, 2])
@ -405,7 +419,7 @@ class TestHistogram(TestCase):
assert_array_equal(edges, e) assert_array_equal(edges, e)
# @requires_memory(free_bytes=1e10) # @requires_memory(free_bytes=1e10)
@xfail # (reason="pytorch does not support bins = [int, int, array]") @xpassIfTorchDynamo # (reason="pytorch does not support bins = [int, int, array]")
@slow @slow
def test_big_arrays(self): def test_big_arrays(self):
sample = np.zeros([100000000, 3]) sample = np.zeros([100000000, 3])
@ -416,7 +430,7 @@ class TestHistogram(TestCase):
assert_equal(type(hist), type((1, 2))) assert_equal(type(hist), type((1, 2)))
@xfail # (reason="TODO") @xpassIfTorchDynamo # (reason="TODO")
@instantiate_parametrized_tests @instantiate_parametrized_tests
class TestHistogramOptimBinNums(TestCase): class TestHistogramOptimBinNums(TestCase):
""" """
@ -698,7 +712,6 @@ class TestHistogramOptimBinNums(TestCase):
""" """
Check that weighted data raises a TypeError Check that weighted data raises a TypeError
""" """
pytest.xpass(reason="passes by chance")
estimator_list = ["fd", "scott", "rice", "sturges", "auto"] estimator_list = ["fd", "scott", "rice", "sturges", "auto"]
for estimator in estimator_list: for estimator in estimator_list:
assert_raises(TypeError, histogram, [1, 2, 3], estimator, weights=[1, 2, 3]) assert_raises(TypeError, histogram, [1, 2, 3], estimator, weights=[1, 2, 3])
@ -840,13 +853,13 @@ class TestHistogramdd(TestCase):
(RuntimeError, ValueError), np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]] (RuntimeError, ValueError), np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]]
) )
@xfail # (reason="pytorch does not support bins = [int, int, array]") @xpassIfTorchDynamo # (reason="pytorch does not support bins = [int, int, array]")
def test_bins_error_2(self): def test_bins_error_2(self):
# mixing scalar (# of bins) and explicit bin arrays, ugh # mixing scalar (# of bins) and explicit bin arrays, ugh
x = np.arange(8).reshape(2, 4) x = np.arange(8).reshape(2, 4)
assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]])) assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]]))
@xfail # (reason="pytorch does not support bins = [int, int, array]") @xpassIfTorchDynamo # (reason="pytorch does not support bins = [int, int, array]")
def test_inf_edges(self): def test_inf_edges(self):
# Test using +/-inf bin edges works. See #1788. # Test using +/-inf bin edges works. See #1788.
x = np.arange(6).reshape(3, 2) x = np.arange(6).reshape(3, 2)
@ -897,7 +910,7 @@ class TestHistogramdd(TestCase):
range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]], range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]],
) )
@xfail # (reason="pytorch does not allow equal entries") @xpassIfTorchDynamo # (reason="pytorch does not allow equal entries")
def test_equal_edges(self): def test_equal_edges(self):
"""Test that adjacent entries in an edge array can be equal""" """Test that adjacent entries in an edge array can be equal"""
x = np.array([0, 1, 2]) x = np.array([0, 1, 2])
@ -928,7 +941,7 @@ class TestHistogramdd(TestCase):
def test_large_integers(self): def test_large_integers(self):
big = 2**60 # Too large to represent with a full precision float big = 2**60 # Too large to represent with a full precision float
x = np.array([0], np.int64) x = np.asarray([0], dtype=np.int64)
x_edges = np.array([-1, +1], np.int64) x_edges = np.array([-1, +1], np.int64)
y = big + x y = big + x
y_edges = big + x_edges y_edges = big + x_edges

View File

@ -4,29 +4,52 @@ import functools
from unittest import expectedFailure as xfail, skipIf from unittest import expectedFailure as xfail, skipIf
import torch._numpy as np
from pytest import raises as assert_raises # , assert_raises_regex, from pytest import raises as assert_raises # , assert_raises_regex,
from torch._numpy import diag_indices, diag_indices_from, fill_diagonal, index_exp, s_
from torch._numpy.testing import (
assert_,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_equal,
)
from torch.testing._internal.common_utils import ( from torch.testing._internal.common_utils import (
instantiate_parametrized_tests, instantiate_parametrized_tests,
parametrize, parametrize,
run_tests, run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase, TestCase,
xpassIfTorchDynamo,
) )
skip = functools.partial(skipIf, True) skip = functools.partial(skipIf, True)
@xfail # (reason="unravel_index not implemented") # If we are going to trace through these, we should use NumPy
# If testing on eager mode, we use torch._numpy
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy import diag_indices, diag_indices_from, fill_diagonal, index_exp, s_
from numpy.testing import (
assert_,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_equal,
assert_raises_regex,
)
else:
import torch._numpy as np
from torch._numpy import (
diag_indices,
diag_indices_from,
fill_diagonal,
index_exp,
s_,
)
from torch._numpy.testing import (
assert_,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_equal,
)
@xpassIfTorchDynamo # (reason="unravel_index not implemented")
@instantiate_parametrized_tests @instantiate_parametrized_tests
class TestRavelUnravelIndex(TestCase): class TestRavelUnravelIndex(TestCase):
def test_basic(self): def test_basic(self):
@ -428,7 +451,7 @@ class TestIx_(TestCase):
class TestC(TestCase): class TestC(TestCase):
@xfail # (reason="c_ not implemented") @xpassIfTorchDynamo # (reason="c_ not implemented")
def test_c_(self): def test_c_(self):
a = np.c_[np.array([[1, 2, 3]]), 0, 0, np.array([[4, 5, 6]])] a = np.c_[np.array([[1, 2, 3]]), 0, 0, np.array([[4, 5, 6]])]
assert_equal(a, [[1, 2, 3, 0, 0, 4, 5, 6]]) assert_equal(a, [[1, 2, 3, 0, 0, 4, 5, 6]])

View File

@ -5,34 +5,62 @@ import sys
from unittest import expectedFailure as xfail, skipIf as skipif from unittest import expectedFailure as xfail, skipIf as skipif
import torch._numpy as np
from pytest import raises as assert_raises from pytest import raises as assert_raises
from torch._numpy import (
array_split,
column_stack,
dsplit,
dstack,
expand_dims,
hsplit,
kron,
put_along_axis,
split,
take_along_axis,
tile,
vsplit,
)
from torch._numpy.random import rand, randint
from torch._numpy.testing import assert_, assert_array_equal, assert_equal
from torch.testing._internal.common_utils import ( from torch.testing._internal.common_utils import (
instantiate_parametrized_tests, instantiate_parametrized_tests,
parametrize, parametrize,
run_tests, run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase, TestCase,
xfailIfTorchDynamo,
xpassIfTorchDynamo,
) )
# If we are going to trace through these, we should use NumPy
# If testing on eager mode, we use torch._numpy
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy import (
apply_along_axis,
array_split,
column_stack,
dsplit,
dstack,
expand_dims,
hsplit,
kron,
put_along_axis,
split,
take_along_axis,
tile,
vsplit,
)
from numpy.random import rand, randint
from numpy.testing import assert_, assert_array_equal, assert_equal
else:
import torch._numpy as np
from torch._numpy import (
array_split,
column_stack,
dsplit,
dstack,
expand_dims,
hsplit,
kron,
put_along_axis,
split,
take_along_axis,
tile,
vsplit,
)
from torch._numpy.random import rand, randint
from torch._numpy.testing import assert_, assert_array_equal, assert_equal
skip = functools.partial(skipif, True) skip = functools.partial(skipif, True)
@ -126,7 +154,7 @@ class TestPutAlongAxis(TestCase):
assert_equal(i_min, i_max) assert_equal(i_min, i_max)
@xfail # ( @xpassIfTorchDynamo # (
# reason="RuntimeError: Expected index [1, 2, 5] to be smaller than self [3, 4, 1] apart from dimension 1") # reason="RuntimeError: Expected index [1, 2, 5] to be smaller than self [3, 4, 1] apart from dimension 1")
def test_broadcast(self): def test_broadcast(self):
"""Test that non-indexing dimensions are broadcast in both directions""" """Test that non-indexing dimensions are broadcast in both directions"""
@ -136,7 +164,7 @@ class TestPutAlongAxis(TestCase):
assert_equal(take_along_axis(a, ai, axis=1), 20) assert_equal(take_along_axis(a, ai, axis=1), 20)
@xfail # (reason="apply_along_axis not implemented") @xpassIfTorchDynamo # (reason="apply_along_axis not implemented")
class TestApplyAlongAxis(TestCase): class TestApplyAlongAxis(TestCase):
def test_simple(self): def test_simple(self):
a = np.ones((20, 10), "d") a = np.ones((20, 10), "d")
@ -679,6 +707,8 @@ class TestSqueeze(TestCase):
assert_equal(res.ndim, 0) assert_equal(res.ndim, 0)
assert type(res) is np.ndarray assert type(res) is np.ndarray
@xfailIfTorchDynamo
def test_basic_2(self):
aa = np.ones((3, 1, 4, 1, 1)) aa = np.ones((3, 1, 4, 1, 1))
assert aa.squeeze().tensor._base is aa.tensor assert aa.squeeze().tensor._base is aa.tensor
@ -712,7 +742,7 @@ class TestSqueeze(TestCase):
assert_(a.flags.f_contiguous) assert_(a.flags.f_contiguous)
assert_(b.flags.f_contiguous) assert_(b.flags.f_contiguous)
@xfail # (reason="XXX: noop in torch, while numpy raises") @xpassIfTorchDynamo # (reason="XXX: noop in torch, while numpy raises")
def test_squeeze_axis_handling(self): def test_squeeze_axis_handling(self):
with assert_raises(ValueError): with assert_raises(ValueError):
np.squeeze(np.array([[1], [2], [3]]), axis=0) np.squeeze(np.array([[1], [2], [3]]), axis=0)
@ -810,7 +840,7 @@ class TestTile(TestCase):
assert_equal(large, klarge) assert_equal(large, klarge)
@xfail # (reason="TODO: implement") @xpassIfTorchDynamo # (reason="TODO: implement")
class TestMayShareMemory(TestCase): class TestMayShareMemory(TestCase):
def test_basic(self): def test_basic(self):
d = np.ones((50, 60)) d = np.ones((50, 60))

View File

@ -8,40 +8,72 @@ import functools
from unittest import expectedFailure as xfail, skipIf as skipif from unittest import expectedFailure as xfail, skipIf as skipif
import pytest import pytest
import torch._numpy as np
from pytest import raises as assert_raises from pytest import raises as assert_raises
from torch._numpy import (
arange,
array,
diag,
eye,
fliplr,
flipud,
histogram2d,
ones,
tri, # mask_indices,
tril_indices,
tril_indices_from,
triu_indices,
triu_indices_from,
vander,
zeros,
)
from torch._numpy.testing import (
assert_allclose,
assert_array_almost_equal,
assert_array_equal, # assert_array_max_ulp,
assert_equal,
)
from torch.testing._internal.common_utils import ( from torch.testing._internal.common_utils import (
instantiate_parametrized_tests, instantiate_parametrized_tests,
parametrize, parametrize,
run_tests, run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase, TestCase,
xpassIfTorchDynamo,
) )
# If we are going to trace through these, we should use NumPy
# If testing on eager mode, we use torch._numpy
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy import (
arange,
array,
diag,
eye,
fliplr,
flipud,
histogram2d,
ones,
tri, # mask_indices,
tril_indices,
tril_indices_from,
triu_indices,
triu_indices_from,
vander,
zeros,
)
from numpy.testing import (
assert_allclose,
assert_array_almost_equal,
assert_array_equal, # assert_array_max_ulp,
assert_equal,
)
else:
import torch._numpy as np
from torch._numpy import (
arange,
array,
diag,
eye,
fliplr,
flipud,
histogram2d,
ones,
tri, # mask_indices,
tril_indices,
tril_indices_from,
triu_indices,
triu_indices_from,
vander,
zeros,
)
from torch._numpy.testing import (
assert_allclose,
assert_array_almost_equal,
assert_array_equal, # assert_array_max_ulp,
assert_equal,
)
skip = functools.partial(skipif, True) skip = functools.partial(skipif, True)
@ -101,7 +133,7 @@ class TestEye(TestCase):
def test_bool(self): def test_bool(self):
assert_equal(eye(2, 2, dtype=bool), [[True, False], [False, True]]) assert_equal(eye(2, 2, dtype=bool), [[True, False], [False, True]])
@xfail # (reason="TODO: implement order=non-default") @xpassIfTorchDynamo # (reason="TODO: implement order=non-default")
def test_order(self): def test_order(self):
mat_c = eye(4, 3, k=-1) mat_c = eye(4, 3, k=-1)
mat_f = eye(4, 3, k=-1, order="F") mat_f = eye(4, 3, k=-1, order="F")
@ -127,9 +159,10 @@ class TestDiag(TestCase):
assert_equal(diag(vals, k=2), b) assert_equal(diag(vals, k=2), b)
assert_equal(diag(vals, k=-2), c) assert_equal(diag(vals, k=-2), c)
def test_matrix(self, vals=None): def test_matrix(self):
if vals is None: self.check_matrix(vals=(100 * get_mat(5) + 1).astype("l"))
vals = (100 * get_mat(5) + 1).astype("l")
def check_matrix(self, vals):
b = zeros((5,)) b = zeros((5,))
for k in range(5): for k in range(5):
b[k] = vals[k, k] b[k] = vals[k, k]
@ -142,10 +175,10 @@ class TestDiag(TestCase):
b[k] = vals[k + 2, k] b[k] = vals[k + 2, k]
assert_equal(diag(vals, -2), b[:3]) assert_equal(diag(vals, -2), b[:3])
@xfail # (reason="TODO implement orders") @xpassIfTorchDynamo # (reason="TODO implement orders")
def test_fortran_order(self): def test_fortran_order(self):
vals = array((100 * get_mat(5) + 1), order="F", dtype="l") vals = array((100 * get_mat(5) + 1), order="F", dtype="l")
self.test_matrix(vals) self.check_matrix(vals)
def test_diag_bounds(self): def test_diag_bounds(self):
A = [[1, 2], [3, 4], [5, 6]] A = [[1, 2], [3, 4], [5, 6]]
@ -251,7 +284,7 @@ class TestHistogram2d(TestCase):
# assert_array_max_ulp(a, np.zeros((4, 4))) # assert_array_max_ulp(a, np.zeros((4, 4)))
assert_allclose(a, np.zeros((4, 4)), atol=1e-15) assert_allclose(a, np.zeros((4, 4)), atol=1e-15)
@xfail # (reason="pytorch does not support bins = [int, array]") @xpassIfTorchDynamo # (reason="pytorch does not support bins = [int, array]")
def test_binparameter_combination(self): def test_binparameter_combination(self):
x = array([0, 0.09207008, 0.64575234, 0.12875982, 0.47390599, 0.59944483, 1]) x = array([0, 0.09207008, 0.64575234, 0.12875982, 0.47390599, 0.59944483, 1])
y = array([0, 0.14344267, 0.48988575, 0.30558665, 0.44700682, 0.15886423, 1]) y = array([0, 0.14344267, 0.48988575, 0.30558665, 0.44700682, 0.15886423, 1])
@ -285,6 +318,7 @@ class TestHistogram2d(TestCase):
assert_array_equal(H, answer) assert_array_equal(H, answer)
assert_array_equal(xe, array([0.0, 0.25, 0.5, 0.75, 1])) assert_array_equal(xe, array([0.0, 0.25, 0.5, 0.75, 1]))
@skip(reason="NP_VER: fails on CI with older NumPy")
@parametrize("x_len, y_len", [(10, 11), (20, 19)]) @parametrize("x_len, y_len", [(10, 11), (20, 19)])
def test_bad_length(self, x_len, y_len): def test_bad_length(self, x_len, y_len):
x, y = np.ones(x_len), np.ones(y_len) x, y = np.ones(x_len), np.ones(y_len)
@ -368,7 +402,7 @@ class TestTri(TestCase):
iu1 = mask_indices(3, np.triu, 1) iu1 = mask_indices(3, np.triu, 1)
assert_array_equal(a[iu1], array([1, 2, 5])) assert_array_equal(a[iu1], array([1, 2, 5]))
@xfail # (reason="np.tril_indices == our tuple(tril_indices)") @xpassIfTorchDynamo # (reason="np.tril_indices == our tuple(tril_indices)")
def test_tril_indices(self): def test_tril_indices(self):
# indices without and with offset # indices without and with offset
il1 = tril_indices(4) il1 = tril_indices(4)
@ -428,7 +462,7 @@ class TestTri(TestCase):
) )
@xfail # (reason="np.triu_indices == our tuple(triu_indices)") @xpassIfTorchDynamo # (reason="np.triu_indices == our tuple(triu_indices)")
class TestTriuIndices(TestCase): class TestTriuIndices(TestCase):
def test_triu_indices(self): def test_triu_indices(self):
iu1 = triu_indices(4) iu1 = triu_indices(4)

View File

@ -5,22 +5,44 @@ import functools
from unittest import expectedFailure as xfail, skipIf as skipif from unittest import expectedFailure as xfail, skipIf as skipif
import torch._numpy as np
from pytest import raises as assert_raises from pytest import raises as assert_raises
from torch.testing._internal.common_utils import (
from torch._numpy import ( run_tests,
common_type, TEST_WITH_TORCHDYNAMO,
iscomplex, TestCase,
iscomplexobj, xpassIfTorchDynamo,
isneginf,
isposinf,
isreal,
isrealobj,
nan_to_num,
real_if_close,
) )
from torch._numpy.testing import assert_, assert_array_equal, assert_equal
from torch.testing._internal.common_utils import run_tests, TestCase
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy import (
common_type,
iscomplex,
iscomplexobj,
isneginf,
isposinf,
isreal,
isrealobj,
nan_to_num,
real_if_close,
)
from numpy.testing import assert_, assert_array_equal, assert_equal
else:
import torch._numpy as np
from torch._numpy import (
common_type,
iscomplex,
iscomplexobj,
isneginf,
isposinf,
isreal,
isrealobj,
nan_to_num,
real_if_close,
)
from torch._numpy.testing import assert_, assert_array_equal, assert_equal
skip = functools.partial(skipif, True) skip = functools.partial(skipif, True)
@ -29,7 +51,7 @@ def assert_all(x):
assert_(np.all(x), x) assert_(np.all(x), x)
@xfail # (reason="common_type not implemented") @xpassIfTorchDynamo # (reason="common_type not implemented")
class TestCommonType(TestCase): class TestCommonType(TestCase):
def test_basic(self): def test_basic(self):
ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32) ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32)
@ -96,7 +118,7 @@ class TestMintypecode(TestCase):
assert_equal(mintypecode("idD"), "D") assert_equal(mintypecode("idD"), "D")
@xfail # (reason="TODO: decide on if [1] is a scalar or not") @xpassIfTorchDynamo # (reason="TODO: decide on if [1] is a scalar or not")
class TestIsscalar(TestCase): class TestIsscalar(TestCase):
def test_basic(self): def test_basic(self):
assert_(np.isscalar(3)) assert_(np.isscalar(3))

View File

@ -15,45 +15,85 @@ from unittest import expectedFailure as xfail, skipIf as skipif, SkipTest
import pytest import pytest
import torch._numpy as np
from numpy.linalg.linalg import _multi_dot_matrix_chain_order from numpy.linalg.linalg import _multi_dot_matrix_chain_order
from pytest import raises as assert_raises from pytest import raises as assert_raises
from torch._numpy import (
array,
asarray,
atleast_2d,
cdouble,
csingle,
dot,
double,
identity,
inf,
linalg,
matmul,
single,
swapaxes,
)
from torch._numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm
from torch._numpy.testing import (
assert_,
assert_allclose,
assert_almost_equal,
assert_array_equal,
assert_equal,
suppress_warnings,
# assert_raises_regex, HAS_LAPACK64, IS_WASM
)
from torch.testing._internal.common_utils import ( from torch.testing._internal.common_utils import (
instantiate_parametrized_tests, instantiate_parametrized_tests,
parametrize, parametrize,
run_tests, run_tests,
slowTest as slow,
TEST_WITH_TORCHDYNAMO,
TestCase, TestCase,
xpassIfTorchDynamo,
) )
skip = functools.partial(skipif, True)
# FIXME: slow tests have never run (= are broken) # If we are going to trace through these, we should use NumPy
slow = skip # If testing on eager mode, we use torch._numpy
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy import (
array,
asarray,
atleast_2d,
cdouble,
csingle,
dot,
double,
identity,
inf,
linalg,
matmul,
single,
swapaxes,
)
from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm
from numpy.testing import (
assert_,
assert_allclose,
assert_almost_equal,
assert_array_equal,
assert_equal,
suppress_warnings,
# assert_raises_regex, HAS_LAPACK64, IS_WASM
)
else:
import torch._numpy as np
from torch._numpy import (
array,
asarray,
atleast_2d,
cdouble,
csingle,
dot,
double,
identity,
inf,
linalg,
matmul,
single,
swapaxes,
)
from torch._numpy.linalg import (
LinAlgError,
matrix_power,
matrix_rank,
multi_dot,
norm,
)
from torch._numpy.testing import (
assert_,
assert_allclose,
assert_almost_equal,
assert_array_equal,
assert_equal,
suppress_warnings,
# assert_raises_regex, HAS_LAPACK64, IS_WASM
)
skip = functools.partial(skipif, True)
IS_WASM = False IS_WASM = False
HAS_LAPACK64 = False HAS_LAPACK64 = False
@ -307,11 +347,11 @@ def _make_generalized_cases():
if not isinstance(case.a, np.ndarray): if not isinstance(case.a, np.ndarray):
continue continue
a = np.array([case.a, 2 * case.a, 3 * case.a]) a = np.stack([case.a, 2 * case.a, 3 * case.a])
if case.b is None: if case.b is None:
b = None b = None
else: else:
b = np.array([case.b, 7 * case.b, 6 * case.b]) b = np.stack([case.b, 7 * case.b, 6 * case.b])
new_case = LinalgCase( new_case = LinalgCase(
case.name + "_tile3", a, b, tags=case.tags | {"generalized"} case.name + "_tile3", a, b, tags=case.tags | {"generalized"}
) )
@ -408,7 +448,6 @@ class LinalgGeneralizedNonsquareTestCase(LinalgTestCase):
class HermitianGeneralizedTestCase(LinalgTestCase): class HermitianGeneralizedTestCase(LinalgTestCase):
@xfail # (reason="sort complex")
@slow @slow
def test_generalized_herm_cases(self): def test_generalized_herm_cases(self):
self.check_cases(require={"generalized", "hermitian"}, exclude={"size-0"}) self.check_cases(require={"generalized", "hermitian"}, exclude={"size-0"})
@ -802,7 +841,7 @@ class TestCond(CondCases, TestCase):
for A, p in itertools.product(As, p_neg): for A, p in itertools.product(As, p_neg):
linalg.cond(A, p) linalg.cond(A, p)
@xfail # ( @skip(reason="NP_VER: fails on CI") # (
# True, run=False, reason="Platform/LAPACK-dependent failure, see gh-18914" # True, run=False, reason="Platform/LAPACK-dependent failure, see gh-18914"
# ) # )
def test_nan(self): def test_nan(self):
@ -890,7 +929,7 @@ class DetCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
ad = asarray(a).astype(cdouble) ad = asarray(a).astype(cdouble)
ev = linalg.eigvals(ad) ev = linalg.eigvals(ad)
assert_almost_equal(d, np.prod(ev, axis=-1)) assert_almost_equal(d, np.prod(ev, axis=-1))
assert_almost_equal(s * np.exp(ld), np.prod(ev, axis=-1)) assert_almost_equal(s * np.exp(ld), np.prod(ev, axis=-1), single_decimal=5)
s = np.atleast_1d(s) s = np.atleast_1d(s)
ld = np.atleast_1d(ld) ld = np.atleast_1d(ld)
@ -976,7 +1015,7 @@ class LstsqCases(LinalgSquareTestCase, LinalgNonsquareTestCase):
@instantiate_parametrized_tests @instantiate_parametrized_tests
class TestLstsq(LstsqCases, TestCase): class TestLstsq(LstsqCases, TestCase):
@xfail # (reason="Lstsq: we use the future default =None") @xpassIfTorchDynamo # (reason="Lstsq: we use the future default =None")
def test_future_rcond(self): def test_future_rcond(self):
a = np.array( a = np.array(
[ [
@ -1306,8 +1345,8 @@ class _TestNormGeneral(_TestNormBase):
def test_vector_return_type(self): def test_vector_return_type(self):
a = np.array([1, 0, 1]) a = np.array([1, 0, 1])
exact_types = np.typecodes["AllInteger"] exact_types = "Bbhil" # np.typecodes["AllInteger"]
inexact_types = np.typecodes["AllFloat"] inexact_types = "efdFD" # np.typecodes["AllFloat"]
all_types = exact_types + inexact_types all_types = exact_types + inexact_types
@ -1485,7 +1524,7 @@ class _TestNorm2D(_TestNormBase):
def test_matrix_return_type(self): def test_matrix_return_type(self):
a = np.array([[1, 0, 1], [0, 1, 1]]) a = np.array([[1, 0, 1], [0, 1, 1]])
exact_types = np.typecodes["AllInteger"] exact_types = "Bbhil" # np.typecodes["AllInteger"]
# float32, complex64, float64, complex128 types are the only types # float32, complex64, float64, complex128 types are the only types
# allowed by `linalg`, which performs the matrix operations used # allowed by `linalg`, which performs the matrix operations used
@ -1721,7 +1760,7 @@ class TestQR(TestCase):
assert_(isinstance(r2, a_type)) assert_(isinstance(r2, a_type))
assert_almost_equal(r2, r1) assert_almost_equal(r2, r1)
@xfail # (reason="torch does not allow qr(..., mode='raw'") @xpassIfTorchDynamo # (reason="torch does not allow qr(..., mode='raw'")
@parametrize("m, n", [(3, 0), (0, 3), (0, 0)]) @parametrize("m, n", [(3, 0), (0, 3), (0, 0)])
def test_qr_empty(self, m, n): def test_qr_empty(self, m, n):
k = min(m, n) k = min(m, n)
@ -1735,7 +1774,7 @@ class TestQR(TestCase):
assert_equal(h.shape, (n, m)) assert_equal(h.shape, (n, m))
assert_equal(tau.shape, (k,)) assert_equal(tau.shape, (k,))
@xfail # (reason="torch does not allow qr(..., mode='raw'") @xpassIfTorchDynamo # (reason="torch does not allow qr(..., mode='raw'")
def test_mode_raw(self): def test_mode_raw(self):
# The factorization is not unique and varies between libraries, # The factorization is not unique and varies between libraries,
# so it is not possible to check against known values. Functional # so it is not possible to check against known values. Functional
@ -1870,7 +1909,7 @@ class TestCholesky(TestCase):
class TestMisc(TestCase): class TestMisc(TestCase):
@xfail # (reason="endianness") @xpassIfTorchDynamo # (reason="endianness")
def test_byteorder_check(self): def test_byteorder_check(self):
# Byte order check should pass for native order # Byte order check should pass for native order
if sys.byteorder == "little": if sys.byteorder == "little":
@ -2205,7 +2244,7 @@ class TestTensorsolve(TestCase):
class TestMisc2(TestCase): class TestMisc2(TestCase):
@xfail # (reason="TODO") @xpassIfTorchDynamo # (reason="TODO")
def test_unsupported_commontype(self): def test_unsupported_commontype(self):
# linalg gracefully handles unsupported type # linalg gracefully handles unsupported type
arr = np.array([[1, -2], [2, 5]], dtype="float16") arr = np.array([[1, -2], [2, 5]], dtype="float16")
@ -2213,7 +2252,6 @@ class TestMisc2(TestCase):
with assert_raises(TypeError): with assert_raises(TypeError):
linalg.cholesky(arr) linalg.cholesky(arr)
@xfail # (reason="TODO")
# @slow # @slow
# @pytest.mark.xfail(not HAS_LAPACK64, run=False, # @pytest.mark.xfail(not HAS_LAPACK64, run=False,
# reason="Numpy not compiled with 64-bit BLAS/LAPACK") # reason="Numpy not compiled with 64-bit BLAS/LAPACK")

View File

@ -3,10 +3,20 @@
import pytest import pytest
import torch._numpy as np from torch.testing._internal.common_utils import (
from torch._numpy.testing import assert_equal run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
from torch.testing._internal.common_utils import run_tests, TestCase # If we are going to trace through these, we should use NumPy
# If testing on eager mode, we use torch._numpy
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy.testing import assert_equal
else:
import torch._numpy as np
from torch._numpy.testing import assert_equal
class TestAppend(TestCase): class TestAppend(TestCase):

View File

@ -1,26 +1,34 @@
# Owner(s): ["module: dynamo"] # Owner(s): ["module: dynamo"]
import itertools import itertools
from unittest import expectedFailure as xfail, skipIf as skip from unittest import expectedFailure as xfail, skipIf as skipif
import numpy
import pytest import pytest
import torch._numpy as np
# import numpy as np
from pytest import raises as assert_raises from pytest import raises as assert_raises
from torch._numpy.testing import assert_equal
from torch.testing._internal.common_utils import ( from torch.testing._internal.common_utils import (
instantiate_parametrized_tests, instantiate_parametrized_tests,
parametrize, parametrize,
run_tests, run_tests,
skipIfTorchDynamo,
subtest, subtest,
TEST_WITH_TORCHDYNAMO,
TestCase, TestCase,
xpassIfTorchDynamo,
) )
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy.testing import assert_equal
else:
import torch._numpy as np
from torch._numpy.testing import assert_equal
class TestIndexing(TestCase): class TestIndexing(TestCase):
@skipif(TEST_WITH_TORCHDYNAMO, reason=".tensor attr, type of a[0, 0]")
def test_indexing_simple(self): def test_indexing_simple(self):
a = np.array([[1, 2, 3], [4, 5, 6]]) a = np.array([[1, 2, 3], [4, 5, 6]])
@ -36,6 +44,7 @@ class TestIndexing(TestCase):
class TestReshape(TestCase): class TestReshape(TestCase):
@skipif(TEST_WITH_TORCHDYNAMO, reason=".tensor attribute")
def test_reshape_function(self): def test_reshape_function(self):
arr = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]] arr = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]] tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
@ -44,6 +53,7 @@ class TestReshape(TestCase):
arr = np.asarray(arr) arr = np.asarray(arr)
assert np.transpose(arr, (1, 0)).tensor._base is arr.tensor assert np.transpose(arr, (1, 0)).tensor._base is arr.tensor
@skipif(TEST_WITH_TORCHDYNAMO, reason=".tensor attribute")
def test_reshape_method(self): def test_reshape_method(self):
arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
arr_shape = arr.shape arr_shape = arr.shape
@ -85,6 +95,7 @@ class TestReshape(TestCase):
class TestTranspose(TestCase): class TestTranspose(TestCase):
@skipif(TEST_WITH_TORCHDYNAMO, reason=".tensor attribute")
def test_transpose_function(self): def test_transpose_function(self):
arr = [[1, 2], [3, 4], [5, 6]] arr = [[1, 2], [3, 4], [5, 6]]
tgt = [[1, 3, 5], [2, 4, 6]] tgt = [[1, 3, 5], [2, 4, 6]]
@ -93,6 +104,7 @@ class TestTranspose(TestCase):
arr = np.asarray(arr) arr = np.asarray(arr)
assert np.transpose(arr, (1, 0)).tensor._base is arr.tensor assert np.transpose(arr, (1, 0)).tensor._base is arr.tensor
@skipif(TEST_WITH_TORCHDYNAMO, reason=".tensor attribute")
def test_transpose_method(self): def test_transpose_method(self):
a = np.array([[1, 2], [3, 4]]) a = np.array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]]) assert_equal(a.transpose(), [[1, 3], [2, 4]])
@ -105,6 +117,7 @@ class TestTranspose(TestCase):
class TestRavel(TestCase): class TestRavel(TestCase):
@skipif(TEST_WITH_TORCHDYNAMO, reason=".tensor attribute")
def test_ravel_function(self): def test_ravel_function(self):
a = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]] a = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tgt = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] tgt = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
@ -113,6 +126,7 @@ class TestRavel(TestCase):
arr = np.asarray(a) arr = np.asarray(a)
assert np.ravel(arr).tensor._base is arr.tensor assert np.ravel(arr).tensor._base is arr.tensor
@skipif(TEST_WITH_TORCHDYNAMO, reason=".tensor attribute")
def test_ravel_method(self): def test_ravel_method(self):
a = np.array([[0, 1], [2, 3]]) a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3]) assert_equal(a.ravel(), [0, 1, 2, 3])
@ -189,6 +203,7 @@ class TestArgmaxArgminCommon(TestCase):
(256,), (256,),
] ]
@skipif(numpy.__version__ < "1.22", reason="NP_VER: fails on NumPy 1.21.x")
@parametrize( @parametrize(
"size, axis", "size, axis",
list( list(
@ -272,7 +287,7 @@ class TestArgmaxArgminCommon(TestCase):
with pytest.raises(ValueError): with pytest.raises(ValueError):
method(arr.T, axis=axis, out=wrong_outarray, keepdims=True) method(arr.T, axis=axis, out=wrong_outarray, keepdims=True)
@skip(True, reason="XXX: need ndarray.chooses") @skipif(True, reason="XXX: need ndarray.chooses")
@parametrize("method", ["max", "min"]) @parametrize("method", ["max", "min"])
def test_all(self, method): def test_all(self, method):
# a = np.random.normal(0, 1, (4, 5, 6, 7, 8)) # a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
@ -396,27 +411,38 @@ class TestArgmax(TestCase):
) )
] ]
nan_arr = darr + [ nan_arr = darr + [
subtest(([0, 1, 2, 3, complex(0, np.nan)], 4), decorators=[xfail]), subtest(([0, 1, 2, 3, complex(0, np.nan)], 4), decorators=[xpassIfTorchDynamo]),
subtest(([0, 1, 2, 3, complex(np.nan, 0)], 4), decorators=[xfail]), subtest(([0, 1, 2, 3, complex(np.nan, 0)], 4), decorators=[xpassIfTorchDynamo]),
subtest(([0, 1, 2, complex(np.nan, 0), 3], 3), decorators=[xfail]), subtest(([0, 1, 2, complex(np.nan, 0), 3], 3), decorators=[xpassIfTorchDynamo]),
subtest(([0, 1, 2, complex(0, np.nan), 3], 3), decorators=[xfail]), subtest(([0, 1, 2, complex(0, np.nan), 3], 3), decorators=[xpassIfTorchDynamo]),
subtest(([complex(0, np.nan), 0, 1, 2, 3], 0), decorators=[xfail]), subtest(([complex(0, np.nan), 0, 1, 2, 3], 0), decorators=[xpassIfTorchDynamo]),
subtest(([complex(np.nan, np.nan), 0, 1, 2, 3], 0), decorators=[xfail]), subtest(
([complex(np.nan, np.nan), 0, 1, 2, 3], 0), decorators=[xpassIfTorchDynamo]
),
subtest( subtest(
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0), ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
decorators=[xfail], decorators=[xpassIfTorchDynamo],
), ),
subtest( subtest(
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0), ([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
decorators=[xfail], decorators=[xpassIfTorchDynamo],
), ),
subtest( subtest(
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0), ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
decorators=[xfail], decorators=[xpassIfTorchDynamo],
),
subtest(
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
decorators=[xpassIfTorchDynamo],
),
subtest(
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
decorators=[xpassIfTorchDynamo],
),
subtest(
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
decorators=[xpassIfTorchDynamo],
), ),
subtest(([complex(0, 0), complex(0, 2), complex(0, 1)], 1), decorators=[xfail]),
subtest(([complex(1, 0), complex(0, 2), complex(0, 1)], 0), decorators=[xfail]),
subtest(([complex(1, 0), complex(0, 2), complex(1, 1)], 2), decorators=[xfail]),
([False, False, False, False, True], 4), ([False, False, False, False, True], 4),
([False, False, False, True, False], 3), ([False, False, False, True, False], 3),
([True, False, False, False, False], 0), ([True, False, False, False, False], 0),
@ -619,11 +645,12 @@ class TestNoExtraMethods(TestCase):
class TestIter(TestCase): class TestIter(TestCase):
@skipIfTorchDynamo
def test_iter_1d(self): def test_iter_1d(self):
# numpy generates array scalars, we do 0D arrays # numpy generates array scalars, we do 0D arrays
a = np.arange(5) a = np.arange(5)
lst = list(a) lst = list(a)
assert all(type(x) == np.ndarray for x in lst) assert all(type(x) == np.ndarray for x in lst), f"{[type(x) for x in lst]}"
assert all(x.ndim == 0 for x in lst) assert all(x.ndim == 0 for x in lst)
def test_iter_2d(self): def test_iter_2d(self):

View File

@ -1,28 +1,44 @@
# Owner(s): ["module: dynamo"] # Owner(s): ["module: dynamo"]
from unittest import expectedFailure as xfail, SkipTest from unittest import skipIf, SkipTest
import numpy
import pytest import pytest
import torch._numpy as np
from pytest import raises as assert_raises from pytest import raises as assert_raises
from torch._numpy import _util
from torch._numpy.testing import (
assert_allclose,
assert_almost_equal,
assert_array_equal,
assert_equal,
)
from torch.testing._internal.common_utils import ( from torch.testing._internal.common_utils import (
instantiate_parametrized_tests, instantiate_parametrized_tests,
parametrize, parametrize,
run_tests, run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase, TestCase,
xpassIfTorchDynamo,
) )
# If we are going to trace through these, we should use NumPy
# If testing on eager mode, we use torch._numpy
if TEST_WITH_TORCHDYNAMO:
import numpy as np
import numpy.core.numeric as _util # for normalize_axis_tuple
from numpy.testing import (
assert_allclose,
assert_almost_equal,
assert_array_equal,
assert_equal,
)
else:
import torch._numpy as np
from torch._numpy import _util
from torch._numpy.testing import (
assert_allclose,
assert_almost_equal,
assert_array_equal,
assert_equal,
)
class TestFlatnonzero(TestCase): class TestFlatnonzero(TestCase):
def test_basic(self): def test_basic(self):
x = np.arange(-2, 3) x = np.arange(-2, 3)
@ -112,7 +128,7 @@ class TestMean(TestCase):
# of float32. # of float32.
assert np.mean(np.ones(100000, dtype="float16")) == 1 assert np.mean(np.ones(100000, dtype="float16")) == 1
@xfail # (reason="XXX: mean(..., where=...) not implemented") @xpassIfTorchDynamo # (reason="XXX: mean(..., where=...) not implemented")
def test_mean_where(self): def test_mean_where(self):
a = np.arange(16).reshape((4, 4)) a = np.arange(16).reshape((4, 4))
wh_full = np.array( wh_full = np.array(
@ -178,7 +194,8 @@ class TestSum(TestCase):
assert_allclose(res_float, 4.0, atol=1e-15) assert_allclose(res_float, 4.0, atol=1e-15)
assert res_float.dtype == "float64" assert res_float.dtype == "float64"
@xfail # (reason="sum: does not warn on overflow") @skipIf(numpy.__version__ < "1.24", reason="NP_VER: fails on NumPy 1.23.x")
@xpassIfTorchDynamo # (reason="sum: does not warn on overflow")
def test_sum_dtypes_warnings(self): def test_sum_dtypes_warnings(self):
for dt in (int, np.float16, np.float32, np.float64): for dt in (int, np.float16, np.float32, np.float64):
for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127, 128, 1024, 1235): for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127, 128, 1024, 1235):
@ -245,7 +262,7 @@ class TestSum(TestCase):
d += d d += d
assert_allclose(d, 2.0 + 2j, atol=1.5e-7) assert_allclose(d, 2.0 + 2j, atol=1.5e-7)
@xfail # (reason="initial=... need implementing") @xpassIfTorchDynamo # (reason="initial=... need implementing")
def test_sum_initial(self): def test_sum_initial(self):
# Integer, single axis # Integer, single axis
assert_equal(np.sum([3], initial=2), 5) assert_equal(np.sum([3], initial=2), 5)
@ -259,7 +276,7 @@ class TestSum(TestCase):
[12, 12, 12], [12, 12, 12],
) )
@xfail # (reason="where=... need implementing") @xpassIfTorchDynamo # (reason="where=... need implementing")
def test_sum_where(self): def test_sum_where(self):
# More extensive tests done in test_reduction_with_where. # More extensive tests done in test_reduction_with_where.
assert_equal(np.sum([[1.0, 2.0], [3.0, 4.0]], where=[True, False]), 4.0) assert_equal(np.sum([[1.0, 2.0], [3.0, 4.0]], where=[True, False]), 4.0)
@ -302,6 +319,10 @@ fails_out_arg = {
np.count_nonzero, np.count_nonzero,
} }
restricts_dtype_casts = {np.var, np.std}
fails_empty_tuple = {np.argmin, np.argmax}
@instantiate_parametrized_tests @instantiate_parametrized_tests
class TestGenericReductions(TestCase): class TestGenericReductions(TestCase):
@ -336,6 +357,9 @@ class TestGenericReductions(TestCase):
@parametrize_func @parametrize_func
def test_axis_empty_generic(self, func): def test_axis_empty_generic(self, func):
if func in fails_empty_tuple:
raise SkipTest("func(..., axis=()) is not valid")
a = np.array([[0, 0, 1], [1, 0, 1]]) a = np.array([[0, 0, 1], [1, 0, 1]])
assert_array_equal(func(a, axis=()), func(np.expand_dims(a, axis=0), axis=0)) assert_array_equal(func(a, axis=()), func(np.expand_dims(a, axis=0), axis=0))
@ -361,6 +385,7 @@ class TestGenericReductions(TestCase):
expanded = np.expand_dims(func(a, axis=axis), axis=axis) expanded = np.expand_dims(func(a, axis=axis), axis=axis)
assert_array_equal(with_keepdims, expanded) assert_array_equal(with_keepdims, expanded)
@skipIf(numpy.__version__ < "1.24", reason="NP_VER: fails on CI w/old numpy")
@parametrize_func @parametrize_func
def test_keepdims_generic_axis_none(self, func): def test_keepdims_generic_axis_none(self, func):
a = np.arange(2 * 3 * 4).reshape((2, 3, 4)) a = np.arange(2 * 3 * 4).reshape((2, 3, 4))
@ -405,7 +430,7 @@ class TestGenericReductions(TestCase):
# Here we follow pytorch, since the result is a superset # Here we follow pytorch, since the result is a superset
# of the numpy functionality # of the numpy functionality
@parametrize("keepdims", [True, False, None]) @parametrize("keepdims", [True, False])
@parametrize("dtype", [bool, "int32", "float64"]) @parametrize("dtype", [bool, "int32", "float64"])
@parametrize_func @parametrize_func
@parametrize_axis @parametrize_axis
@ -415,6 +440,8 @@ class TestGenericReductions(TestCase):
raise SkipTest(f"{func.__name__} does not have out= arg.") raise SkipTest(f"{func.__name__} does not have out= arg.")
if func in fails_axes_tuples: if func in fails_axes_tuples:
raise SkipTest(f"{func.__name__} does not hangle tuple axis.") raise SkipTest(f"{func.__name__} does not hangle tuple axis.")
if func in restricts_dtype_casts:
raise SkipTest(f"{func.__name__}: test implies float->int casts")
a = np.arange(2 * 3 * 4).reshape((2, 3, 4)) a = np.arange(2 * 3 * 4).reshape((2, 3, 4))
result = func(a, axis=axis, keepdims=keepdims).astype(dtype) result = func(a, axis=axis, keepdims=keepdims).astype(dtype)

View File

@ -9,18 +9,25 @@ Extensive tests of this sort of functionality is in numpy_tests/core/*scalar*
Also test the isscalar function (which is deliberately a bit more lax). Also test the isscalar function (which is deliberately a bit more lax).
""" """
import torch._numpy as np
from torch._numpy.testing import assert_equal
from torch.testing._internal.common_utils import ( from torch.testing._internal.common_utils import (
instantiate_parametrized_tests, instantiate_parametrized_tests,
parametrize, parametrize,
run_tests, run_tests,
subtest, subtest,
TEST_WITH_TORCHDYNAMO,
TestCase, TestCase,
xfailIfTorchDynamo,
) )
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy.testing import assert_equal
else:
import torch._numpy as np
from torch._numpy.testing import assert_equal
parametrize_value = parametrize( parametrize_value = parametrize(
"value", "value",
[ [
@ -79,6 +86,7 @@ class TestArrayScalars(TestCase):
assert arr == 42 assert arr == 42
# @xfailIfTorchDynamo
@instantiate_parametrized_tests @instantiate_parametrized_tests
class TestIsScalar(TestCase): class TestIsScalar(TestCase):
# #
@ -89,12 +97,12 @@ class TestIsScalar(TestCase):
scalars = [ scalars = [
subtest(42, "literal"), subtest(42, "literal"),
subtest(int(42.0), "int"), subtest(int(42.0), "int"),
np.float32(42), subtest(np.float32(42), "float32"),
np.array(42), subtest(np.array(42), "array_0D", decorators=[xfailIfTorchDynamo]),
[42], subtest([42], "list", decorators=[xfailIfTorchDynamo]),
[[42]], subtest([[42]], "list-list", decorators=[xfailIfTorchDynamo]),
np.array([42]), subtest(np.array([42]), "array_1D", decorators=[xfailIfTorchDynamo]),
np.array([[42]]), subtest(np.array([[42]]), "array_2D", decorators=[xfailIfTorchDynamo]),
] ]
import math import math
@ -102,8 +110,8 @@ class TestIsScalar(TestCase):
not_scalars = [ not_scalars = [
int, int,
np.float32, np.float32,
"s", subtest("s", decorators=[xfailIfTorchDynamo]),
"string", subtest("string", decorators=[xfailIfTorchDynamo]),
(), (),
[], [],
math.sin, math.sin,

View File

@ -13,16 +13,22 @@ import operator
from unittest import skipIf as skip, SkipTest from unittest import skipIf as skip, SkipTest
import torch._numpy as np
from pytest import raises as assert_raises from pytest import raises as assert_raises
from torch._numpy.testing import assert_equal
from torch.testing._internal.common_utils import ( from torch.testing._internal.common_utils import (
instantiate_parametrized_tests, instantiate_parametrized_tests,
parametrize, parametrize,
run_tests, run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase, TestCase,
) )
if TEST_WITH_TORCHDYNAMO:
import numpy as np
from numpy.testing import assert_equal
else:
import torch._numpy as np
from torch._numpy.testing import assert_equal
parametrize_unary_ufuncs = parametrize("ufunc", [np.sin]) parametrize_unary_ufuncs = parametrize("ufunc", [np.sin])
parametrize_casting = parametrize( parametrize_casting = parametrize(

View File

@ -914,6 +914,8 @@ class NumpyNdarrayVariable(TensorVariable):
return insert_into_graph() return insert_into_graph()
elif name in ["base", "flags", "dtype"]: elif name in ["base", "flags", "dtype"]:
unimplemented(f"TODO: add support for ndarray.{name}") unimplemented(f"TODO: add support for ndarray.{name}")
elif name in ["__version__"]:
unimplemented("delegate np.__version__ to NumPy")
if result is None: if result is None:
raise NotImplementedError() raise NotImplementedError()
return result return result

View File

@ -585,7 +585,6 @@ def _conv_corr_impl(a, v, mode):
v = _util.cast_if_needed(v, dt) v = _util.cast_if_needed(v, dt)
padding = v.shape[0] - 1 if mode == "full" else mode padding = v.shape[0] - 1 if mode == "full" else mode
if padding == "same" and v.shape[0] % 2 == 0: if padding == "same" and v.shape[0] % 2 == 0:
# UserWarning: Using padding='same' with even kernel lengths and odd # UserWarning: Using padding='same' with even kernel lengths and odd
# dilation may require a zero-padded copy of the input be created # dilation may require a zero-padded copy of the input be created

View File

@ -69,7 +69,6 @@ def normalize_seq_array_like(x, parm=None):
def normalize_dtype(dtype, parm=None): def normalize_dtype(dtype, parm=None):
# cf _decorators.dtype_to_torch
torch_dtype = None torch_dtype = None
if dtype is not None: if dtype is not None:
dtype = _dtypes.dtype(dtype) dtype = _dtypes.dtype(dtype)

View File

@ -1189,6 +1189,14 @@ if TEST_WITH_TORCHDYNAMO:
torch._inductor.config.fallback_random = True torch._inductor.config.fallback_random = True
def xpassIfTorchDynamo(func):
return func if TEST_WITH_TORCHDYNAMO else unittest.expectedFailure(func)
def xfailIfTorchDynamo(func):
return unittest.expectedFailure(func) if TEST_WITH_TORCHDYNAMO else func
def skipIfTorchDynamo(msg="test doesn't currently work with dynamo"): def skipIfTorchDynamo(msg="test doesn't currently work with dynamo"):
def decorator(fn): def decorator(fn):
if not isinstance(fn, type): if not isinstance(fn, type):