/**
*
* Copyright (C) 2024. Huawei Technologies Co., Ltd. All rights reserved.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*/
#include <torch/extension.h>
#include <torch/csrc/autograd/custom_function.h>
#include "../common/pytorch_npu_helper.hpp"
using torch::autograd::Function;
using torch::autograd::AutogradContext;
using tensor_list = std::vector<at::Tensor>;
using namespace at;


at::Tensor my_op_impl_npu(const at::Tensor& x, const at::Tensor& indices, const at::Tensor& axis, int64_t batch_dims, bool negative_index_support, int64_t axis_value,int64_t caseNum) {

    std::vector<int64_t> outShape;
    for (size_t i = 0; i < x.sizes().size(); i++)
    {
        if(i ==axis_value){
            for (size_t j = 0; j < indices.sizes().size(); j++)
            {
                if (j >= batch_dims)
                {
                   outShape.push_back(indices.sizes().data()[j]);
                }
                
            }
        }else{
            outShape.push_back(x.sizes().data()[i]);
        }
    }
    
    at::Tensor result = at::empty(outShape, x.options());
    auto round = 50 ;
    for (size_t i = 0; i < round; i++)
    {
        EXEC_NPU_CMD(aclnnGatherV3, x, indices,axis,batch_dims, negative_index_support, result);
    }
    return result;
}



// 修改my_op的输入输出
TORCH_LIBRARY(myops, m) {
		m.def("my_op(Tensor x, Tensor indices, Tensor axis, int batch_dims, bool negative_index_support,int axis_value, int caseNum) -> Tensor");
}

// 不修改
TORCH_LIBRARY_IMPL(myops, PrivateUse1, m) {
		m.impl("my_op", &my_op_impl_npu);
}

// 不修改
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
		m.def("custom_op", &my_op_impl_npu, "tf gather");
}
