mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Summary: VariadicOpConverter and FuseListUnpackConverter would introduce ops that only have CPU kernels. Currently, the graph passes are ran if static_dispatch is enabled. As we plan to enable static_dispatch by default, this diff add the additional check for the graph pass to only work on the node that has all the inputs/outputs on CPU. Test Plan: CI Rollback Plan: Differential Revision: D79295640 Pull Request resolved: https://github.com/pytorch/pytorch/pull/159519 Approved by: https://github.com/dolpm, https://github.com/henryoier
23 lines
536 B
C++
23 lines
536 B
C++
#pragma once
|
|
|
|
namespace torch::nativert {
|
|
|
|
class Node;
|
|
|
|
/**
|
|
* Utility functions for working with Graph nodes and values.
|
|
*/
|
|
|
|
/**
|
|
* Check if all input/output tensors are on CPU and all device-type attributes
|
|
* have the value of 'cpu'. This is a util function to check if a Node can use
|
|
* static dispatch CPU kernels.
|
|
*
|
|
* @param node The node to check
|
|
* @return true if all I/O tensors and device attributes are on CPU, false
|
|
* otherwise
|
|
*/
|
|
bool areAllIOTensorsAttributesOnCpu(const Node& node);
|
|
|
|
} // namespace torch::nativert
|