mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
This PR is the first step towards refactors the build for nvfuser in order to have the coegen being a standalone library. Contents inside this PR: 1. nvfuser code base has been moved to `./nvfuser`, from `./torch/csrc/jit/codegen/cuda/`, except for registration code for integration (interface.h/interface.cpp) 2. splits the build system so nvfuser is generating its own `.so` files. Currently there are: - `libnvfuser_codegen.so`, which contains the integration, codegen and runtime system of nvfuser - `nvfuser.so`, which is nvfuser's python API via pybind. Python frontend is now exposed via `nvfuser._C.XXX` instead of `torch._C._nvfuser` 3. nvfuser cpp tests is currently being compiled into `nvfuser_tests` 4. cmake is refactored so that: - nvfuser now has its own `CMakeLists.txt`, which is under `torch/csrc/jit/codegen/cuda/`. - nvfuser backend code is not compiled inside `libtorch_cuda_xxx` any more - nvfuser is added as a subdirectory under `./CMakeLists.txt` at the very end after torch is built. - since nvfuser has dependency on torch, the registration of nvfuser at runtime is done via dlopen (`at::DynamicLibrary`). This avoids circular dependency in cmake, which will be a nightmare to handle. For details, look at `torch/csrc/jit/codegen/cuda/interface.cpp::LoadingNvfuserLibrary` Future work that's scoped in following PR: - Currently since nvfuser codegen has dependency on torch, we need to refactor that out so we can move nvfuser into a submodule and not rely on dlopen to load the library. @malfet - Since we moved nvfuser into a cmake build, we effectively disabled bazel build for nvfuser. This could impact internal workload at Meta, so we need to put support back. cc'ing @vors Pull Request resolved: https://github.com/pytorch/pytorch/pull/89621 Approved by: https://github.com/davidberard98
77 lines
1.8 KiB
C++
77 lines
1.8 KiB
C++
#include <instrumentation.h>
|
|
|
|
#include <c10/macros/Export.h>
|
|
|
|
#ifdef _WIN32
|
|
#include <c10/util/win32-headers.h>
|
|
#else
|
|
#include <pthread.h>
|
|
#include <unistd.h>
|
|
#endif
|
|
|
|
namespace torch {
|
|
namespace jit {
|
|
namespace fuser {
|
|
namespace cuda {
|
|
namespace inst {
|
|
|
|
Trace::Trace() {
|
|
const char* trace_filename = getenv("PYTORCH_NVFUSER_TRACE");
|
|
if (trace_filename != nullptr) {
|
|
log_file_ = fopen(trace_filename, "w");
|
|
TORCH_CHECK(log_file_ != nullptr, "Can't open trace file");
|
|
|
|
// Disable the file stream buffering, since it may result
|
|
// in torn writes in multi-threaded tracing
|
|
setbuf(log_file_, nullptr);
|
|
|
|
// Print the trace prologue
|
|
// (including a dummy TRACE_START event)
|
|
fprintf(log_file_, "{\n\"traceEvents\": [\n");
|
|
start_timestamp_ = Clock::now();
|
|
logEvent('I', "TRACE_START");
|
|
}
|
|
|
|
if (isOptionDisabled(DisableOption::Nvtx)) {
|
|
record_nvtx_range_ = false;
|
|
}
|
|
}
|
|
|
|
Trace::~Trace() {
|
|
if (log_file_ != nullptr) {
|
|
// Print trace epilogue
|
|
logEvent('I', "TRACE_END", ' ');
|
|
fprintf(log_file_, "],\n\"displayTimeUnit\": \"ms\"\n}\n");
|
|
fclose(log_file_);
|
|
}
|
|
}
|
|
|
|
void Trace::logEvent(char ph, const char* name, char sep) {
|
|
const std::chrono::duration<double> d = Clock::now() - start_timestamp_;
|
|
const double elapsed = d.count() * 1e6;
|
|
|
|
#ifdef _WIN32
|
|
const unsigned int pid = GetCurrentProcessId();
|
|
const unsigned int tid = GetCurrentThreadId();
|
|
#else
|
|
const unsigned int pid = getpid();
|
|
const unsigned int tid = std::hash<pthread_t>{}(pthread_self());
|
|
#endif // _WIN32
|
|
|
|
fprintf(
|
|
log_file_,
|
|
"{ \"name\": \"%s\", \"ph\": \"%c\", \"pid\": %u, \"tid\": %u, \"ts\": %.0f }%c\n",
|
|
name,
|
|
ph,
|
|
pid,
|
|
tid,
|
|
elapsed,
|
|
sep);
|
|
}
|
|
|
|
} // namespace inst
|
|
} // namespace cuda
|
|
} // namespace fuser
|
|
} // namespace jit
|
|
} // namespace torch
|