mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Summary: The goal of this PR is to add an infrastructure; to convert(hipify) CUDA ops into [HIP](https://github.com/ROCm-Developer-Tools/HIP) ops , at **compile** time. Note that HIP ops, which are portable c++ code, can run on AMD and NVIDIA platform. Pull Request resolved: https://github.com/pytorch/pytorch/pull/9322 Differential Revision: D8884707 Pulled By: bddppq fbshipit-source-id: dabc6319546002c308c10528238e6684f7aef0f8
47 lines
1.1 KiB
C++
47 lines
1.1 KiB
C++
#pragma once
|
|
|
|
#include <cfloat>
|
|
#include "caffe2/core/context.h"
|
|
#include "caffe2/core/context_gpu.h"
|
|
#include "caffe2/core/logging.h"
|
|
#include "caffe2/core/operator.h"
|
|
#include "caffe2/operators/conv_pool_op_base.h"
|
|
#include "caffe2/operators/pool_op.h"
|
|
#include "caffe2/utils/math.h"
|
|
|
|
namespace caffe2 {
|
|
|
|
class MaxPoolWithIndexOp final : public ConvPoolOpBase<CUDAContext> {
|
|
public:
|
|
USE_CONV_POOL_BASE_FUNCTIONS(CUDAContext);
|
|
MaxPoolWithIndexOp(const OperatorDef& operator_def, Workspace* ws)
|
|
: ConvPoolOpBase<CUDAContext>(operator_def, ws) {}
|
|
~MaxPoolWithIndexOp() {}
|
|
|
|
template <typename T>
|
|
bool DoRunWithType();
|
|
|
|
bool RunOnDevice() override;
|
|
|
|
// Input: X
|
|
// Output: Y, mask
|
|
};
|
|
|
|
class MaxPoolWithIndexGradientOp final : public ConvPoolOpBase<CUDAContext> {
|
|
public:
|
|
USE_CONV_POOL_BASE_FUNCTIONS(CUDAContext);
|
|
MaxPoolWithIndexGradientOp(const OperatorDef& operator_def, Workspace* ws)
|
|
: ConvPoolOpBase<CUDAContext>(operator_def, ws) {}
|
|
~MaxPoolWithIndexGradientOp() {}
|
|
|
|
template <typename T>
|
|
bool DoRunWithType();
|
|
|
|
bool RunOnDevice() override;
|
|
|
|
// Input: X, dY, mask
|
|
// Output: dX
|
|
};
|
|
|
|
}; // namespace caffe2
|