mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[Caffe2] [2/N] Remove Caffe2 from tests (#128911)
Follows #128675 Pull Request resolved: https://github.com/pytorch/pytorch/pull/128911 Approved by: https://github.com/titaiwangms, https://github.com/r-barnes
This commit is contained in:
@ -911,51 +911,6 @@ class TestTracer(JitTestCase):
|
||||
self.assertEqual(len(list(g.inputs())), 2)
|
||||
FileCheck().check("mul").check("add").run(str(g))
|
||||
|
||||
def test_trace_c10_ops(self):
|
||||
try:
|
||||
_ = torch.ops._caffe2.GenerateProposals
|
||||
except AttributeError:
|
||||
self.skipTest("Skip the test since c2 ops are not registered.")
|
||||
|
||||
class MyModel(torch.nn.Module):
|
||||
def forward(self, scores, bbox_deltas, im_info, anchors):
|
||||
a, b = torch.ops._caffe2.GenerateProposals(
|
||||
(scores),
|
||||
(bbox_deltas),
|
||||
(im_info),
|
||||
(anchors),
|
||||
2.0,
|
||||
6000,
|
||||
300,
|
||||
0.7,
|
||||
16,
|
||||
True,
|
||||
-90,
|
||||
90,
|
||||
1.0,
|
||||
True,
|
||||
)
|
||||
return a, b
|
||||
|
||||
model = MyModel()
|
||||
A = 4
|
||||
H = 10
|
||||
W = 8
|
||||
img_count = 3
|
||||
scores = torch.ones(img_count, A, H, W, dtype=torch.float32)
|
||||
bbox_deltas = torch.linspace(
|
||||
0, 10, steps=img_count * 4 * A * H * W, dtype=torch.float32
|
||||
)
|
||||
bbox_deltas = bbox_deltas.view(img_count, 4 * A, H, W)
|
||||
im_info = torch.ones(img_count, 3, dtype=torch.float32)
|
||||
anchors = torch.ones(A, 4, dtype=torch.float32)
|
||||
inputs = (scores, bbox_deltas, im_info, anchors)
|
||||
traced_model = torch.jit.trace(model, inputs)
|
||||
self.assertEqual(traced_model(*inputs), model(*inputs))
|
||||
self.assertExportImportModule(
|
||||
traced_model, (scores, bbox_deltas, im_info, anchors)
|
||||
)
|
||||
|
||||
def run_ge_tests(self, optimize, use_cuda):
|
||||
with enable_profiling_mode_for_profiling_tests():
|
||||
with torch.jit.optimized_execution(optimize):
|
||||
|
@ -340,8 +340,8 @@ def xfail(error_message: str, reason: Optional[str] = None):
|
||||
|
||||
|
||||
# skips tests for opset_versions listed in unsupported_opset_versions.
|
||||
# if the caffe2 test cannot be run for a specific version, add this wrapper
|
||||
# (for example, an op was modified but the change is not supported in caffe2)
|
||||
# if the PyTorch test cannot be run for a specific version, add this wrapper
|
||||
# (for example, an op was modified but the change is not supported in PyTorch)
|
||||
def skipIfUnsupportedOpsetVersion(unsupported_opset_versions):
|
||||
def skip_dec(func):
|
||||
@functools.wraps(func)
|
||||
|
@ -873,33 +873,6 @@ class TestOperators(common_utils.TestCase):
|
||||
x = torch.randn(2, 3, 4, requires_grad=True)
|
||||
self.assertONNX(lambda x: torch.cumsum(x, dim=1), x, opset_version=11)
|
||||
|
||||
# Github Issue: https://github.com/pytorch/pytorch/issues/71095
|
||||
# def test_c2_op(self):
|
||||
# class MyModel(torch.nn.Module):
|
||||
# def __init__(self):
|
||||
# super().__init__()
|
||||
#
|
||||
# def forward(self, scores, bbox_deltas, im_info, anchors):
|
||||
# a, b = torch.ops._caffe2.GenerateProposals(
|
||||
# (scores), (bbox_deltas), (im_info), (anchors),
|
||||
# 2.0, 6000, 300, 0.7, 16, True, -90, 90, 1.0, True,
|
||||
# )
|
||||
# return a, b
|
||||
#
|
||||
# model = MyModel()
|
||||
# A = 4
|
||||
# H = 10
|
||||
# W = 8
|
||||
# img_count = 3
|
||||
# scores = torch.ones(img_count, A, H, W, dtype=torch.float32)
|
||||
# bbox_deltas = torch.linspace(0, 10, steps=img_count * 4 * A * H * W,
|
||||
# dtype=torch.float32)
|
||||
# bbox_deltas = bbox_deltas.view(img_count, 4 * A, H, W)
|
||||
# im_info = torch.ones(img_count, 3, dtype=torch.float32)
|
||||
# anchors = torch.ones(A, 4, dtype=torch.float32)
|
||||
# inputs = (scores, bbox_deltas, im_info, anchors)
|
||||
# self.assertONNX(model, inputs, custom_opsets={"org.pytorch._caffe2": 0})
|
||||
|
||||
def test_dict(self):
|
||||
class MyModel(torch.nn.Module):
|
||||
def forward(self, x_in):
|
||||
|
@ -4457,54 +4457,7 @@ class TestQuantizedEmbeddingOps(TestCase):
|
||||
self.assertEqual(unpacked_weight.q_per_channel_scales(), qweight.q_per_channel_scales())
|
||||
self.assertEqual(unpacked_weight.q_per_channel_zero_points(), qweight.q_per_channel_zero_points())
|
||||
|
||||
# compare against C2 to ensure numerical equivalency.
|
||||
from caffe2.python import core, workspace
|
||||
conversion_op = "FloatToFused8BitRowwiseQuantized" if data_type == torch.float32 else "HalfFloatToFused8BitRowwiseQuantized"
|
||||
reverse_conversion_op = None
|
||||
if bit_rate == 4:
|
||||
conversion_op = "FloatToFused4BitRowwiseQuantized" if data_type == torch.float32 else "HalfToFused4BitRowwiseQuantized"
|
||||
reverse_conversion_op = "Fused4BitRowwiseQuantizedToFloat"
|
||||
elif bit_rate == 2:
|
||||
conversion_op = "FloatToFused2BitRowwiseQuantized" if data_type == torch.float32 else "HalfToFused2BitRowwiseQuantized"
|
||||
reverse_conversion_op = "Fused2BitRowwiseQuantizedToFloat"
|
||||
|
||||
def get_c2_weights(weights, engine_str):
|
||||
workspace.ResetWorkspace()
|
||||
|
||||
workspace.FeedBlob("weights", weights)
|
||||
workspace.RunOperatorOnce(
|
||||
core.CreateOperator(
|
||||
conversion_op, ["weights"], ["quantized_weights"], engine=engine_str
|
||||
)
|
||||
)
|
||||
emb_q = workspace.FetchBlob("quantized_weights")
|
||||
if bit_rate == 4 or bit_rate == 2:
|
||||
workspace.RunOperatorOnce(
|
||||
core.CreateOperator(
|
||||
reverse_conversion_op, ["quantized_weights"], ["dequantized_weights"]
|
||||
)
|
||||
)
|
||||
dequantized_data = torch.from_numpy(workspace.FetchBlob("dequantized_weights"))
|
||||
else:
|
||||
dequantized_data = torch.ops._caffe2.Fused8BitRowwiseQuantizedToFloat(
|
||||
torch.tensor(emb_q)
|
||||
)
|
||||
return torch.from_numpy(emb_q), dequantized_data
|
||||
|
||||
if optimized_qparams:
|
||||
engine = "GREEDY"
|
||||
else:
|
||||
engine = ""
|
||||
|
||||
# C2 quantization needs the memory format of Tensor to be `continuous`, otherwise it will
|
||||
# throw exceptions. torch.clone() will make the memory format to be `continuous`
|
||||
c2_copy = torch.clone(weights)
|
||||
w_packed_c2, w_unpacked_c2 = get_c2_weights(c2_copy, engine)
|
||||
|
||||
# Compare packed weights against C2.
|
||||
np.testing.assert_allclose(w_packed.numpy(), w_packed_c2.numpy(), atol=1e-6, rtol=1e-6)
|
||||
# Compare unpacked weights against C2
|
||||
np.testing.assert_allclose(w_unpacked.numpy(), w_unpacked_c2.numpy(), atol=1e-6, rtol=1e-6)
|
||||
|
||||
|
||||
def _test_embedding_bag_unpack_fn(self, pack_fn, unpack_fn, num_embeddings, embedding_dim, bit_rate,
|
||||
|
@ -121,13 +121,6 @@ class DeterminationTest(TestCase):
|
||||
],
|
||||
)
|
||||
|
||||
def test_caffe2_file(self):
|
||||
"""Caffe2 files trigger dependent tests"""
|
||||
self.assertEqual(self.determined_tests(["caffe2/python/brew_test.py"]), [])
|
||||
self.assertEqual(
|
||||
self.determined_tests(["caffe2/python/context.py"]), self.TESTS
|
||||
)
|
||||
|
||||
def test_new_folder(self):
|
||||
"""New top-level Python folder triggers all tests"""
|
||||
self.assertEqual(self.determined_tests(["new_module/file.py"]), self.TESTS)
|
||||
|
@ -342,7 +342,6 @@ class TestPublicBindings(TestCase):
|
||||
"torch.testing._internal.distributed.rpc.rpc_test",
|
||||
"torch.testing._internal.distributed.rpc.tensorpipe_rpc_agent_test_fixture",
|
||||
"torch.testing._internal.distributed.rpc_utils",
|
||||
"torch.utils.tensorboard._caffe2_graph",
|
||||
"torch._inductor.codegen.cuda.cuda_template",
|
||||
"torch._inductor.codegen.cuda.gemm_template",
|
||||
"torch._inductor.runtime.triton_helpers",
|
||||
|
@ -23,15 +23,6 @@ except ImportError:
|
||||
HAS_TORCHVISION = False
|
||||
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
|
||||
|
||||
TEST_CAFFE2 = True
|
||||
try:
|
||||
import caffe2.python.caffe2_pybind11_state as _caffe2_pybind11_state # noqa: F401
|
||||
from caffe2.python import brew, cnn, core, workspace
|
||||
from caffe2.python.model_helper import ModelHelper
|
||||
except ImportError:
|
||||
TEST_CAFFE2 = False
|
||||
skipIfNoCaffe2 = unittest.skipIf(not TEST_CAFFE2, "no caffe2")
|
||||
|
||||
TEST_MATPLOTLIB = True
|
||||
try:
|
||||
import matplotlib
|
||||
@ -48,7 +39,6 @@ from torch.testing._internal.common_utils import (
|
||||
parametrize,
|
||||
TestCase,
|
||||
run_tests,
|
||||
TEST_WITH_ASAN,
|
||||
TEST_WITH_CROSSREF,
|
||||
IS_WINDOWS,
|
||||
IS_MACOS,
|
||||
@ -94,8 +84,6 @@ if TEST_TENSORBOARD:
|
||||
from torch.utils.tensorboard._pytorch_graph import graph
|
||||
from google.protobuf import text_format
|
||||
from PIL import Image
|
||||
if TEST_TENSORBOARD and TEST_CAFFE2:
|
||||
from torch.utils.tensorboard import _caffe2_graph as c2_graph
|
||||
|
||||
class TestTensorBoardPyTorchNumpy(BaseTestCase):
|
||||
def test_pytorch_np(self):
|
||||
@ -754,80 +742,11 @@ class TestTensorBoardNumpy(BaseTestCase):
|
||||
res = make_np(np.int64(100000000000))
|
||||
self.assertIsInstance(res, np.ndarray) and self.assertEqual(res.shape, (1,))
|
||||
|
||||
@skipIfNoCaffe2
|
||||
def test_caffe2_np(self):
|
||||
workspace.FeedBlob("testBlob", tensor_N(shape=(1, 3, 64, 64)))
|
||||
self.assertIsInstance(make_np('testBlob'), np.ndarray)
|
||||
|
||||
@skipIfNoCaffe2
|
||||
def test_caffe2_np_expect_fail(self):
|
||||
with self.assertRaises(RuntimeError):
|
||||
res = make_np('This_blob_does_not_exist')
|
||||
|
||||
def test_pytorch_np_expect_fail(self):
|
||||
with self.assertRaises(NotImplementedError):
|
||||
res = make_np({'pytorch': 1.0})
|
||||
|
||||
@skipIfNoCaffe2
|
||||
@unittest.skipIf(TEST_WITH_ASAN, "Caffe2 failure with ASAN")
|
||||
def test_caffe2_simple_model(self):
|
||||
model = ModelHelper(name="mnist")
|
||||
# how come those inputs don't break the forward pass =.=a
|
||||
workspace.FeedBlob("data", np.random.randn(1, 3, 64, 64).astype(np.float32))
|
||||
workspace.FeedBlob("label", np.random.randn(1, 1000).astype(int))
|
||||
|
||||
with core.NameScope("conv1"):
|
||||
conv1 = brew.conv(model, "data", 'conv1', dim_in=1, dim_out=20, kernel=5)
|
||||
# Image size: 24 x 24 -> 12 x 12
|
||||
pool1 = brew.max_pool(model, conv1, 'pool1', kernel=2, stride=2)
|
||||
# Image size: 12 x 12 -> 8 x 8
|
||||
conv2 = brew.conv(model, pool1, 'conv2', dim_in=20, dim_out=100, kernel=5)
|
||||
# Image size: 8 x 8 -> 4 x 4
|
||||
pool2 = brew.max_pool(model, conv2, 'pool2', kernel=2, stride=2)
|
||||
with core.NameScope("classifier"):
|
||||
# 50 * 4 * 4 stands for dim_out from previous layer multiplied by the image size
|
||||
fc3 = brew.fc(model, pool2, 'fc3', dim_in=100 * 4 * 4, dim_out=500)
|
||||
relu = brew.relu(model, fc3, fc3)
|
||||
pred = brew.fc(model, relu, 'pred', 500, 10)
|
||||
softmax = brew.softmax(model, pred, 'softmax')
|
||||
xent = model.LabelCrossEntropy([softmax, "label"], 'xent')
|
||||
# compute the expected loss
|
||||
loss = model.AveragedLoss(xent, "loss")
|
||||
model.net.RunAllOnMKL()
|
||||
model.param_init_net.RunAllOnMKL()
|
||||
model.AddGradientOperators([loss], skip=1)
|
||||
blob_name_tracker = {}
|
||||
graph = c2_graph.model_to_graph_def(
|
||||
model,
|
||||
blob_name_tracker=blob_name_tracker,
|
||||
shapes={},
|
||||
show_simplified=False,
|
||||
)
|
||||
compare_proto(graph, self)
|
||||
|
||||
@skipIfNoCaffe2
|
||||
def test_caffe2_simple_cnnmodel(self):
|
||||
model = cnn.CNNModelHelper("NCHW", name="overfeat")
|
||||
workspace.FeedBlob("data", np.random.randn(1, 3, 64, 64).astype(np.float32))
|
||||
workspace.FeedBlob("label", np.random.randn(1, 1000).astype(int))
|
||||
with core.NameScope("conv1"):
|
||||
conv1 = model.Conv("data", "conv1", 3, 96, 11, stride=4)
|
||||
relu1 = model.Relu(conv1, conv1)
|
||||
pool1 = model.MaxPool(relu1, "pool1", kernel=2, stride=2)
|
||||
with core.NameScope("classifier"):
|
||||
fc = model.FC(pool1, "fc", 4096, 1000)
|
||||
pred = model.Softmax(fc, "pred")
|
||||
xent = model.LabelCrossEntropy([pred, "label"], "xent")
|
||||
loss = model.AveragedLoss(xent, "loss")
|
||||
|
||||
blob_name_tracker = {}
|
||||
graph = c2_graph.model_to_graph_def(
|
||||
model,
|
||||
blob_name_tracker=blob_name_tracker,
|
||||
shapes={},
|
||||
show_simplified=False,
|
||||
)
|
||||
compare_proto(graph, self)
|
||||
|
||||
class TestTensorProtoSummary(BaseTestCase):
|
||||
@parametrize(
|
||||
|
@ -41,7 +41,7 @@ from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
|
||||
skipCUDAMemoryLeakCheckIf, BytesIOContext,
|
||||
skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName,
|
||||
wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard,
|
||||
skipIfNotRegistered, bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
|
||||
bytes_to_scalar, parametrize, skipIfMps, noncontiguous_like,
|
||||
AlwaysWarnTypedStorageRemoval, TEST_WITH_TORCHDYNAMO, xfailIfTorchDynamo)
|
||||
from multiprocessing.reduction import ForkingPickler
|
||||
from torch.testing._internal.common_device_type import (
|
||||
@ -8632,21 +8632,6 @@ tensor([[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j],
|
||||
a = torch.ones(2, 3)
|
||||
# Metadata changes are allowed on view tensors that are created from detach().
|
||||
|
||||
@skipIfNotRegistered("LayerNorm", "Skipping as LayerNorm is not registered")
|
||||
def test_c10_layer_norm(self):
|
||||
# test that we can call c10 ops and they return a reasonable result
|
||||
X = torch.rand(5, 5, dtype=torch.float)
|
||||
weight = torch.rand(*X.size()[1:], dtype=torch.float)
|
||||
bias = torch.rand(*X.size()[1:], dtype=torch.float)
|
||||
epsilon = 1e-4
|
||||
|
||||
expected_norm = torch.nn.functional.layer_norm(
|
||||
X, X.size()[1:], weight=weight, bias=bias, eps=epsilon)
|
||||
actual_norm, actual_mean, actual_stdev = \
|
||||
torch.ops._caffe2.LayerNorm(torch.tensor(X), torch.tensor(
|
||||
weight), torch.tensor(bias), 1, epsilon, True)
|
||||
torch.testing.assert_close(expected_norm, actual_norm)
|
||||
|
||||
def test_memory_format(self):
|
||||
def test_helper(x, memory_format):
|
||||
y = x.contiguous(memory_format=memory_format)
|
||||
|
Reference in New Issue
Block a user