mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Add C10_NODEPRECATED check for xpu (#153935)
# Motivation Add `C10_NODEPRECATED` check for XPU. This doesn't allow xpu codebase to use `c10::optional`. What's the change about torch-xpu-ops commit update? Deprecate `c10::optional`, `c10::nullopt`, `c10::make_option`, use the counterpart in std instead. # Additional Context This PR depends on https://github.com/intel/torch-xpu-ops/pull/1683 https://github.com/intel/torch-xpu-ops/pull/1690 Pull Request resolved: https://github.com/pytorch/pytorch/pull/153935 Approved by: https://github.com/Skylion007, https://github.com/cyyever
This commit is contained in:
committed by
PyTorch MergeBot
parent
482e5b6660
commit
a664cfdf95
@ -985,12 +985,11 @@ endif()
|
||||
include(cmake/public/utils.cmake)
|
||||
if(NOT MSVC)
|
||||
string(APPEND CMAKE_CXX_FLAGS " -O2 -fPIC")
|
||||
if(NOT USE_XPU)
|
||||
# This prevents use of `c10::optional`, `c10::nullopt` etc within the codebase
|
||||
string(APPEND CMAKE_CXX_FLAGS " -DC10_NODEPRECATED")
|
||||
string(APPEND CMAKE_CUDA_FLAGS " -DC10_NODEPRECATED")
|
||||
string(APPEND CMAKE_OBJCXX_FLAGS " -DC10_NODEPRECATED")
|
||||
endif()
|
||||
|
||||
# This prevents use of `c10::optional`, `c10::nullopt` etc within the codebase
|
||||
string(APPEND CMAKE_CXX_FLAGS " -DC10_NODEPRECATED")
|
||||
string(APPEND CMAKE_CUDA_FLAGS " -DC10_NODEPRECATED")
|
||||
string(APPEND CMAKE_OBJCXX_FLAGS " -DC10_NODEPRECATED")
|
||||
|
||||
# Eigen fails to build with some versions, so convert this to a warning
|
||||
# Details at http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1459
|
||||
|
@ -273,7 +273,7 @@ void quantized_matmul(
|
||||
|
||||
int scratchpad_size = matmul_pd.scratchpad_desc().get_size();
|
||||
at::Tensor scratchpad_tensor =
|
||||
at::empty({scratchpad_size}, m1.options().dtype(at::kByte), c10::nullopt);
|
||||
at::empty({scratchpad_size}, m1.options().dtype(at::kByte), std::nullopt);
|
||||
auto scratchpad_memory = make_onednn_memory(
|
||||
matmul_pd.scratchpad_desc(), engine, scratchpad_tensor.data_ptr());
|
||||
args.insert({DNNL_ARG_SCRATCHPAD, scratchpad_memory});
|
||||
|
@ -164,7 +164,7 @@ void woq_matmul_int4(
|
||||
|
||||
int scratchpad_size = matmul_pd.scratchpad_desc().get_size();
|
||||
Tensor scratchpad_tensor =
|
||||
at::empty({scratchpad_size}, m1.options().dtype(at::kByte), c10::nullopt);
|
||||
at::empty({scratchpad_size}, m1.options().dtype(at::kByte), std::nullopt);
|
||||
auto scratchpad_memory = make_onednn_memory(
|
||||
matmul_pd.scratchpad_desc(), engine, scratchpad_tensor.data_ptr());
|
||||
args.insert({DNNL_ARG_SCRATCHPAD, scratchpad_memory});
|
||||
|
2
third_party/xpu.txt
vendored
2
third_party/xpu.txt
vendored
@ -1 +1 @@
|
||||
defce46ae775cc14dcfddd4cb1e8f5220a36eb76
|
||||
4e027f1e1c560d7dc7db7eb41e48bdee5fc00707
|
||||
|
Reference in New Issue
Block a user