// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/extension.h"
#include <vector>

#define CHECK_CPU_INPUT(x) PD_CHECK(x.place() == paddle::PlaceType::kCPU, #x " must be a CPU Tensor.")


template <typename data_t>
void clip_cpu_forward_kernel(const data_t* x_data,
                             data_t* out_data,
                             float min, float max,
                             int64_t x_numel) {
    for (int i = 0; i < x_numel; ++i) {
        out_data[i] = x_data[i] > min ?
                      (x_data[i] < max ? x_data[i] : static_cast<data_t>(max)) :
                      static_cast<data_t>(min);
    }
}

template <typename data_t>
void clip_cpu_backward_kernel(const data_t* grad_out_data,
                              const data_t* out_data,
                              data_t* grad_x_data,
                              float min, float max,
                              int64_t out_numel) {
    for (int i = 0; i < out_numel; ++i) {
        grad_x_data[i] = grad_out_data[i] *
                         ((out_data[i] > min && out_data[i] < max) ?
                         static_cast<data_t>(1.) : static_cast<data_t>(0.));
    }
}

std::vector<paddle::Tensor> clip_cpu_forward(const paddle::Tensor& x, float min, float max) {
    CHECK_CPU_INPUT(x);
    // Tensor接口详见：
    // https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/extension/include/ext_tensor.h
    auto out = paddle::Tensor(paddle::PlaceType::kCPU, x.shape());
    PD_DISPATCH_FLOATING_TYPES(
        x.type(), "clip_cpu_forward_kernel", ([&] {
            clip_cpu_forward_kernel<data_t>(
                x.data<data_t>(), out.mutable_data<data_t>(x.place()),
                min, max, x.size());
            }));
    return { out };
}

std::vector<paddle::Tensor> clip_cpu_backward(const paddle::Tensor& x,
                                              const paddle::Tensor& out,
                                              const paddle::Tensor& grad_out,
                                              float min, float max) {
    auto grad_x = paddle::Tensor(paddle::PlaceType::kCPU, x.shape());
    PD_DISPATCH_FLOATING_TYPES(
        out.type(), "clip_cpu_backward_kernel", ([&] {
            clip_cpu_backward_kernel<data_t>(
                grad_out.data<data_t>(),
                out.data<data_t>(),
                grad_x.mutable_data<data_t>(x.place()),
                min, max, out.size());
            }));
    return { grad_x };
}

// 维度推导
std::vector<std::vector<int64_t>> clipInferShape(std::vector<int64_t> x_shape) {
    return { x_shape };
}

// 类型推导
std::vector<paddle::DataType> clipInferDtype(paddle::DataType x_dtype) {
    return { x_dtype };
}

PD_BUILD_OP(custom_clip_cpu)
    .Inputs({ "X" })
    .Outputs({ "Out" })
    .Attrs({ "min: float", "max: float" })
    .SetKernelFn(PD_KERNEL(clip_cpu_forward))
    .SetInferShapeFn(PD_INFER_SHAPE(clipInferShape))
    .SetInferDtypeFn(PD_INFER_DTYPE(clipInferDtype));

PD_BUILD_GRAD_OP(custom_clip_cpu)
    .Inputs({ "X", "Out", paddle::Grad("Out") })
    .Outputs({ paddle::Grad("X") })
    .Attrs({ "min: float", "max: float" })
    .SetKernelFn(PD_KERNEL(clip_cpu_backward));