mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
This reverts commit 515c19a3856e953c0fe23a0ed4fa844f8eea34d8. Reverted https://github.com/pytorch/pytorch/pull/154165 on behalf of https://github.com/seemethere due to This is failing when attempting to test against executorch main internally, author has acknowledged that this should be reverted ([comment](https://github.com/pytorch/pytorch/pull/154165#issuecomment-2931489616))
45 lines
1.3 KiB
C++
45 lines
1.3 KiB
C++
#pragma once
|
|
|
|
#include "event_tracer.h"
|
|
|
|
namespace torch {
|
|
namespace executor {
|
|
|
|
/**
|
|
* Bucket type abstraction that contains many elements of runtime state that
|
|
* a kernel author may want available, but would otherwise be unable to access.
|
|
*
|
|
* Forwarded along to all operators when running in lean mode. NOTE: Will not be
|
|
* forwarded to operators if running in ATen mode as those operators do not
|
|
* expect to receive a KernelRuntimeContext and would not use it.
|
|
*
|
|
* This includes things like setting an error state, a scratch allocator for
|
|
* operators that need more then constant space, and a TensorResizer for dynamic
|
|
* shape tensors allowing programs to be more flexible with Tensor shape.
|
|
*/
|
|
class KernelRuntimeContext {
|
|
public:
|
|
/**
|
|
* Construct a new kernel runtime context along with an optional event tracer.
|
|
*/
|
|
KernelRuntimeContext(EventTracer* event_tracer = nullptr)
|
|
: event_tracer_(event_tracer) {}
|
|
|
|
/**
|
|
* INTERNAL ONLY
|
|
*
|
|
* Returns a pointer to an instance of EventTracer to do profiling/debugging
|
|
* logging inside the codegen layer. This is only for internal usage inside
|
|
* the codegen layer and users should not be accessing this.
|
|
*/
|
|
EventTracer* internal_event_tracer() {
|
|
return event_tracer_;
|
|
}
|
|
|
|
private:
|
|
EventTracer* event_tracer_;
|
|
};
|
|
|
|
} // namespace executor
|
|
} // namespace torch
|