/**
 * Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
 *
 * This file is a part of the CANN Open Software.
 * Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
 * Please refer to the License for details. You may not use this file except in compliance with the License.
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
 * INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
 * See LICENSE in the root of the software repository for the full text of the License.
 */

#include "pool/kernel/pool_kernel.h"
namespace {
using PoolOpTraits = ATVC::OpTraits<ATVC::OpInputs<float>, ATVC::OpOutputs<float>, ATVC::OpTemps<int32_t>>;

template<typename Traits>
struct Edge2C3ComputeFunc {
    static constexpr ATVC::Layout2Dim TILE_LAYOUT{16, 16};     // 基本块宽高 宽需要32B对齐 未裁剪前的
    static constexpr ATVC::PoolTilePadding TILE_PADDING{8, 8, 1, 1};  // tile块上下左右padding的设置left/right需要32B对齐 未裁剪前基础值
    /*
    函数说明： c = a + b
    参数说明：
        a                   : 参与运算的输入
        b                   : 参与运算的输入
        c                   : 参与运算的输出
    */
    template<typename T,  typename U>
    // 重载operator，提供给算子模板类调用
    __aicore__ inline void operator()(AscendC::LocalTensor<T> a, AscendC::LocalTensor<T> c, AscendC::LocalTensor<U> temp) {
        AscendC::DumpTensor(a, 111, 96);
        uint32_t calcSize = c.GetSize();
        uint32_t sizeT = sizeof(T);
        static constexpr uint32_t TENSOR_WIDTH = TILE_PADDING.left + TILE_LAYOUT.width + TILE_PADDING.right;
        // 0 1 2
        // 3 4 5
        // 6 7 8
        // 计算: x[1,4,7]: x[2,5,8] - x[0,3,6]
        AscendC::CreateVecIndex<U>(temp, (int32_t)2, calcSize);
        AscendC::Muls<U>(temp, temp, sizeT, calcSize);
        AscendC::LocalTensor<uint32_t> tempRef = temp.template ReinterpretCast<uint32_t>();
        AscendC::Gather(c, a, tempRef, 0, calcSize - 2);
        AscendC::Sub(a, c, a, calcSize);
        AscendC::Adds<U>(temp, temp, (sizeT * -3), calcSize);
        AscendC::Relu(temp, temp, 1);
        AscendC::Gather(c, a, tempRef, 0, calcSize - 2);
        // 计算: x[4]: min(abs((x[1] + x[4] + x[7] / 3), 255)
        AscendC::Add(a[TENSOR_WIDTH], c, c[TENSOR_WIDTH * 2], calcSize - TENSOR_WIDTH * 2);
        AscendC::Add(c, a, c, calcSize);
        AscendC::Muls(c, c, 1/3.0f, calcSize);
        AscendC::Abs(c, c, calcSize);
        AscendC::Mins(c, c, 255.0f, calcSize);
        AscendC::DumpTensor(c, 222, 96);
    }

};
}
template<class Traits, const auto& totalLayout>
__global__ __aicore__ void EdgeCustom(GM_ADDR a, GM_ADDR c, ATVC::PoolParam param)
{
    KERNEL_TASK_TYPE_DEFAULT(KERNEL_TYPE_AIV_ONLY);
    AscendC::printf("total h ;%u total w:%u\n", totalLayout.height, totalLayout.width);
    // 将Edge2C3ComputeFunc仿函数作为模板参数传入，实例化PoolOpTemplate模板类
    auto op = ATVC::Kernel::PoolOpTemplate<Edge2C3ComputeFunc<Traits>, totalLayout>();
    op.Run(a, c, &param); // 按照输入、输出、param的顺序传入Run函数，实现GM->GM的数据计算
}

static constexpr ATVC::Layout2Dim totalLayout_512_750{512, 750};  // w h
void EdgeCustomFloat_512_750(uint32_t blockDim, void* stream, uint8_t* a, uint8_t* c, ATVC::PoolParam param) {
    EdgeCustom<PoolOpTraits, totalLayout_512_750><<<blockDim, nullptr, stream>>>(a, c, param);
}
static constexpr ATVC::Layout2Dim totalLayout_96_640{96, 640}; // w h
void EdgeCustomFloat_96_640(uint32_t blockDim, void* stream, uint8_t* a, uint8_t* c, ATVC::PoolParam param) {
    EdgeCustom<PoolOpTraits, totalLayout_96_640><<<blockDim, nullptr, stream>>>(a, c, param);
}