mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-21 05:34:18 +08:00
Example: ```python File "/home/xmfan/core/a/pytorch/torch/autograd/graph.py", line 829, in _engine_run_backward return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ NotImplementedError: TorchDispatchMode not yet implemented for compiled autograd. You can disable compiled autograd for this operation by: 1. Relocating the unsupported autograd call outside the compiled region. 2. Wrapping the unsupported autograd call within a scope that disables compiled autograd. 3. Configuring the specific compilation unit to disable compiled autograd. 4. Globally disabling compiled autograd at the application's initialization. ``` No duplicate error messages for python side trace-time errors ```python ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/xmfan/core/a/pytorch/torch/_dynamo/compiled_autograd.py", line 344, in begin_capture raise NotImplementedError( NotImplementedError: Found tensor of type <class 'torch.nn.utils._expanded_weights.expanded_weights_impl.ExpandedWeight'>, which is not supported by FakeTensorMode. You can turn off compiled autograd by either: 1. Moving the unsupported autograd call outside of the torch.compile'd region. 2. Wrapping the unsupported autograd call in the torch._dynamo.compiled_autograd._disable() context manager. 3. Setting torch._dynamo.config.compiled_autograd=False for the torch.compile call containing the unsupported autograd call. 4. Setting torch._dynamo.config.compiled_autograd=False at the start of the program. ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/156509 Approved by: https://github.com/jansel ghstack dependencies: #156374
73 lines
2.2 KiB
C++
73 lines
2.2 KiB
C++
#pragma once
|
|
|
|
#include <ATen/Tensor.h>
|
|
#include <torch/csrc/Export.h>
|
|
#include <string>
|
|
#include <vector>
|
|
|
|
namespace torch::dynamo::autograd {
|
|
class CompiledNodeArgs;
|
|
class SwapSavedVariables;
|
|
struct PackedArgs;
|
|
} // namespace torch::dynamo::autograd
|
|
|
|
// A hook that's called on gradients
|
|
|
|
namespace torch::autograd {
|
|
|
|
using Variable = at::Tensor;
|
|
using variable_list = std::vector<Variable>;
|
|
|
|
struct TORCH_API FunctionPreHook {
|
|
virtual ~FunctionPreHook() = default;
|
|
virtual variable_list operator()(const variable_list& grads) = 0;
|
|
// only implemented for python hooks, registers hook with compiled autograd
|
|
virtual void compiled_args(
|
|
torch::dynamo::autograd::CompiledNodeArgs& args) const {
|
|
TORCH_CHECK_NOT_IMPLEMENTED(
|
|
false,
|
|
std::string("compiled_args nyi, see [Note: Compiled Autograd] ") +
|
|
typeid(*this).name());
|
|
}
|
|
};
|
|
|
|
struct TORCH_API FunctionPostHook {
|
|
virtual ~FunctionPostHook() = default;
|
|
virtual variable_list operator()(
|
|
const variable_list& outputs /* grad_inputs */,
|
|
const variable_list& inputs /* grad_outputs */) = 0;
|
|
// only implemented for python hooks, registers hook with compiled autograd
|
|
virtual void compiled_args(
|
|
torch::dynamo::autograd::CompiledNodeArgs& args) const {
|
|
TORCH_CHECK_NOT_IMPLEMENTED(
|
|
false,
|
|
std::string("compiled_args nyi, see [Note: Compiled Autograd] ") +
|
|
typeid(*this).name());
|
|
}
|
|
};
|
|
|
|
struct TORCH_API PostAccumulateGradHook {
|
|
virtual ~PostAccumulateGradHook() = default;
|
|
virtual void operator()(const Variable& tensor) = 0;
|
|
// only implemented for python hooks on nodes, registers hook with compiled
|
|
// autograd
|
|
virtual void compiled_args(
|
|
torch::dynamo::autograd::CompiledNodeArgs& args) const {
|
|
TORCH_CHECK_NOT_IMPLEMENTED(
|
|
false,
|
|
std::string("compiled_args nyi, see [Note: Compiled Autograd] ") +
|
|
typeid(*this).name());
|
|
}
|
|
|
|
virtual void apply_with_saved(
|
|
Variable&,
|
|
torch::dynamo::autograd::SwapSavedVariables&) {
|
|
TORCH_CHECK_NOT_IMPLEMENTED(
|
|
false,
|
|
std::string("compiled_args nyi, see [Note: Compiled Autograd] ") +
|
|
typeid(*this).name());
|
|
}
|
|
};
|
|
|
|
} // namespace torch::autograd
|