Files
pytorch/caffe2/operators/max_pool_with_index_gpu.h
Peter Yeh 54db14e390 HIP Operators Generator--> HipOpG (#9322)
Summary:
The goal of this PR is to add an infrastructure; to convert(hipify) CUDA ops into [HIP](https://github.com/ROCm-Developer-Tools/HIP) ops , at **compile** time.

Note that HIP ops, which are portable c++ code, can run on AMD and NVIDIA platform.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/9322

Differential Revision: D8884707

Pulled By: bddppq

fbshipit-source-id: dabc6319546002c308c10528238e6684f7aef0f8
2018-07-19 00:26:06 -07:00

47 lines
1.1 KiB
C++

#pragma once
#include <cfloat>
#include "caffe2/core/context.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/conv_pool_op_base.h"
#include "caffe2/operators/pool_op.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
class MaxPoolWithIndexOp final : public ConvPoolOpBase<CUDAContext> {
public:
USE_CONV_POOL_BASE_FUNCTIONS(CUDAContext);
MaxPoolWithIndexOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CUDAContext>(operator_def, ws) {}
~MaxPoolWithIndexOp() {}
template <typename T>
bool DoRunWithType();
bool RunOnDevice() override;
// Input: X
// Output: Y, mask
};
class MaxPoolWithIndexGradientOp final : public ConvPoolOpBase<CUDAContext> {
public:
USE_CONV_POOL_BASE_FUNCTIONS(CUDAContext);
MaxPoolWithIndexGradientOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CUDAContext>(operator_def, ws) {}
~MaxPoolWithIndexGradientOp() {}
template <typename T>
bool DoRunWithType();
bool RunOnDevice() override;
// Input: X, dY, mask
// Output: dX
};
}; // namespace caffe2