mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[BE] remove torch deploy - conditionals (#158288)
This PR is part of the work to deprecate torch::deploy in OSS. Effectively it does 3 things to get started. 1. Remove test_deploy_interaction as we no longer need to worry about this 2. Remove all torch._running_with_deploy checks and use the False path always (surfaced 1) 3. Remove `USE_DEPLOY` and switch to the default path always Note: MyPy does fail on a bunch of things here as a bunch of older files are touched. It may be better to fix these things on a separate PR Pull Request resolved: https://github.com/pytorch/pytorch/pull/158288 Approved by: https://github.com/albanD
This commit is contained in:
@ -3603,8 +3603,8 @@ class TestSparseCompressedTritonKernels(TestCase):
|
||||
@onlyCUDA
|
||||
@dtypes(torch.half, torch.bfloat16, torch.float)
|
||||
@dtypesIfCUDA(torch.half, *[torch.bfloat16] if SM80OrLater else [], torch.float)
|
||||
@unittest.skipIf((not TEST_WITH_TORCHINDUCTOR) or (IS_FBCODE and IS_REMOTE_GPU) or torch._running_with_deploy(),
|
||||
"Skipped for deploy and internal with remote GPUs")
|
||||
@unittest.skipIf((not TEST_WITH_TORCHINDUCTOR) or (IS_FBCODE and IS_REMOTE_GPU),
|
||||
"Skipped for internal with remote GPUs")
|
||||
def test_triton_bsr_dense_bmm(self, device, dtype, index_dtype, block_size):
|
||||
from functools import partial
|
||||
from torch.sparse._triton_ops import bsr_dense_mm
|
||||
@ -3680,8 +3680,8 @@ class TestSparseCompressedTritonKernels(TestCase):
|
||||
|
||||
@onlyCUDA
|
||||
@dtypes(torch.half)
|
||||
@unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU or torch._running_with_deploy(),
|
||||
"Skipped for deploy and internal with remote GPUs")
|
||||
@unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU,
|
||||
"Skipped for internal with remote GPUs")
|
||||
def test_triton_bsr_dense_bmm_error_messages(self, device, dtype):
|
||||
from torch.sparse._triton_ops import bsr_dense_mm
|
||||
|
||||
|
Reference in New Issue
Block a user