diff --git a/torch/csrc/dynamo/compiled_autograd.h b/torch/csrc/dynamo/compiled_autograd.h index 87689f34dfae..cba8158213c6 100644 --- a/torch/csrc/dynamo/compiled_autograd.h +++ b/torch/csrc/dynamo/compiled_autograd.h @@ -1106,16 +1106,14 @@ struct IValuePacker { // That's what the TypePtr is for: it contains the information to do the // parsing. See torch::jit::toIValue for more information. static at::TypePtr packed_type() { -#if defined(_WIN32) -#if defined(USE_CUDA) || defined(USE_ROCM) +#ifdef _WIN32 // NB: the if-constexpr usage triggers compilation errors on Windows // with certain compiler settings // (see https://github.com/pytorch/pytorch/pull/144707 for examples). // It's not clear what the problem is, so we're going to ignore it for now. TORCH_CHECK_NOT_IMPLEMENTED( - false, "torch.compile not supported on Windows GPU"); -#endif -#endif + false, "torch.compile not supported on Windows"); +#else if constexpr (::std::is_same_v) { return at::TensorType::get(); } else if constexpr (::std::is_same_v) { @@ -1155,6 +1153,7 @@ struct IValuePacker { false, "IValuePacker not implemented for type"); return at::NoneType::get(); } +#endif } };