remove hasPrimaryContext workaround on ROCm (#71146)

Summary:
As issue https://github.com/pytorch/pytorch/issues/59750 is fixed, this PR is to remove the workaround implemented for it on ROCm.

Enabled hasPrimaryContext() related PyTorch unit tests on ROCm.

cc: amathews-amd, jithunnair-amd

cc jeffdaily sunway513 jithunnair-amd ROCmSupport KyleCZH

Pull Request resolved: https://github.com/pytorch/pytorch/pull/71146

Reviewed By: anjali411

Differential Revision: D33754615

Pulled By: albanD

fbshipit-source-id: b3a5c65a20c6d52d5f2ffc9e6f9628c819329b5d
(cherry picked from commit cfdd12166cfd1365de0ebe5a75ce40ac7fde15cc)
This commit is contained in:
rraminen
2022-01-25 12:19:32 -08:00
committed by PyTorch MergeBot
parent 22a77d7b92
commit 07ca1fc88b
2 changed files with 3 additions and 3 deletions

View File

@ -1,7 +1,7 @@
# Owner(s): ["module: cuda"]
import torch
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocmVersionLessThan
import sys
import unittest
@ -25,7 +25,7 @@ class TestCudaPrimaryCtx(TestCase):
"where CUDA contexts are never created. Use either run_test.py or add "
"--subprocess to run each test in a different subprocess.")
@skipIfRocm
@skipIfRocmVersionLessThan((4, 4, 21504))
def setUp(self):
for device in range(torch.cuda.device_count()):
# Ensure context has not been created beforehand

View File

@ -1262,7 +1262,7 @@ void GraphTask::stash_current_streams() {
caller_current_streams_.resize(num_gpus);
if (num_gpus > 0) {
for (c10::DeviceIndex idx = 0; idx < num_gpus; idx++) {
#if defined(USE_ROCM)
#if defined(USE_ROCM) && (ROCM_VERSION < 50000)
// If the build targets ROCM, stash streams for all visible devices unconditionally, to work around
// https://github.com/pytorch/pytorch/issues/59750.
// TODO: Remove ROCM-specific behavior when https://github.com/pytorch/pytorch/issues/59750 is fixed.