mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/65505 Generated with `fastmod -m 'toTuple\(\)(\s*)->' 'toTupleRef()${1}.'` , followed by `fastmod '(std::move\(.*)toTupleRef\(\).' '${1}toTuple()->'` to unbreak 2 callsites. ghstack-source-id: 142065835 Test Plan: CI Reviewed By: gchanan Differential Revision: D31131025 fbshipit-source-id: 54457ae5bbeb38db9c7f196d469b98521c3d3f34
40 lines
1.3 KiB
C++
40 lines
1.3 KiB
C++
#include <torch/cuda.h>
|
|
#include <torch/script.h>
|
|
|
|
#include <string>
|
|
|
|
#include "custom_backend.h"
|
|
|
|
// Load a module lowered for the custom backend from \p path and test that
|
|
// it can be executed and produces correct results.
|
|
void load_serialized_lowered_module_and_execute(const std::string& path) {
|
|
torch::jit::Module module = torch::jit::load(path);
|
|
// The custom backend is hardcoded to compute f(a, b) = (a + b, a - b).
|
|
auto tensor = torch::ones(5);
|
|
std::vector<torch::jit::IValue> inputs{tensor, tensor};
|
|
auto output = module.forward(inputs);
|
|
AT_ASSERT(output.isTuple());
|
|
auto output_elements = output.toTupleRef().elements();
|
|
for (auto& e : output_elements) {
|
|
AT_ASSERT(e.isTensor());
|
|
}
|
|
AT_ASSERT(output_elements.size(), 2);
|
|
AT_ASSERT(output_elements[0].toTensor().allclose(tensor + tensor));
|
|
AT_ASSERT(output_elements[1].toTensor().allclose(tensor - tensor));
|
|
}
|
|
|
|
int main(int argc, const char* argv[]) {
|
|
if (argc != 2) {
|
|
std::cerr
|
|
<< "usage: test_custom_backend <path-to-exported-script-module>\n";
|
|
return -1;
|
|
}
|
|
const std::string path_to_exported_script_module = argv[1];
|
|
|
|
std::cout << "Testing " << torch::custom_backend::getBackendName() << "\n";
|
|
load_serialized_lowered_module_and_execute(path_to_exported_script_module);
|
|
|
|
std::cout << "OK\n";
|
|
return 0;
|
|
}
|