#include "../data_utils.h"
#include "acl/acl.h"
#include "../arch.h"
#include "kernel_operator.h"
#include <type_traits>
#include<iostream>

constexpr uint32_t show_size = 100 * sizeof(uint32_t);
constexpr uint32_t BLOCK_SIZE = 32; //再次注意，这个不是线程调用的block！这个是对齐用的！

template<
    typename ElementType
>
uint32_t h_AlignUp(uint32_t element_num)
{
    //up_or_down = 1向上取整
    uint32_t element_Bytes = element_num * sizeof(ElementType);
    return ((element_Bytes + BLOCK_SIZE - 1) / BLOCK_SIZE) * BLOCK_SIZE / sizeof(ElementType);
}


template<
    typename ElementType
>
__aicore__ uint32_t AlignUp(uint32_t element_num)
{
    //up_or_down = 1向上取整
    uint32_t element_Bytes = element_num * sizeof(ElementType);
    return ((element_Bytes + BLOCK_SIZE - 1) / BLOCK_SIZE) * BLOCK_SIZE / sizeof(ElementType);
}

template<
    typename ElementType
>
__aicore__ uint32_t AlignDown(uint32_t element_num)
{
    //up_or_down = 1向上取整
    uint32_t element_Bytes = element_num * sizeof(ElementType);
    return (element_Bytes / BLOCK_SIZE) * BLOCK_SIZE / sizeof(ElementType);
}


constexpr int32_t LOCTensorLen = 1 << 14; //假定一个LOCtensor最长是多少，单位是字节。
//这里注意了啊，后面这个地方它是要分给A、B、C三个tensor的，原则上这里是要/3的
constexpr int32_t L1Size = 1 << 16; //senpai的代码是18 .和上面一样，这里的L1也是要三分，我懒得搞这个，就直接给小点了

template<
    typename ElementType,
    uint32_t maxLenBlock
>
class MatAdd{

public:
    __aicore__ inline MatAdd(
            uint64_t total_num,
            AscendC::GlobalTensor<ElementType> Agm,
            AscendC::GlobalTensor<ElementType> Bgm,
            AscendC::GlobalTensor<ElementType> Cgm
        ):
        total_num(total_num), 
        Block_Idx(AscendC::GetBlockIdx()),
        Block_Num(AscendC::GetBlockNum()),
        Agm(Agm), Bgm(Bgm), Cgm(Cgm)
        {
        };
    __aicore__ inline void Init()
    {
        
        pipe.InitBuffer(inQueueA, 1, LOCTensorLen);
        pipe.InitBuffer(inQueueB, 1, LOCTensorLen);
        pipe.InitBuffer(outQueueC, 1, LOCTensorLen);

    }
    __aicore__ inline void Process()
    {
        for(uint32_t i = Block_Idx * maxLenBlock; i < total_num; i += Block_Num * maxLenBlock)
        {
            CopyIn(i);
            Compute(i);
            CopyOut(i);
        }

    }

private:
    __aicore__ inline void CopyIn(uint32_t progress)
    {
        uint32_t current_elements = AlignUp<ElementType>(progress + maxLenBlock > total_num ? total_num - progress : maxLenBlock);

        AscendC::LocalTensor<ElementType> aLocal = inQueueA.AllocTensor<ElementType>();
        AscendC::LocalTensor<ElementType> bLocal = inQueueB.AllocTensor<ElementType>();
        AscendC::DataCopy(aLocal, Agm[progress], current_elements);
        AscendC::DataCopy(bLocal, Bgm[progress], current_elements);
        inQueueA.EnQue<ElementType>(aLocal);
        inQueueB.EnQue<ElementType>(bLocal);
    }
    __aicore__ inline void Compute(uint32_t progress)
    {
        uint32_t current_elements = AlignUp<ElementType>(progress + maxLenBlock > total_num ? total_num - progress : maxLenBlock);

        AscendC::LocalTensor<ElementType> aLocal = inQueueA.DeQue<ElementType>();
        AscendC::LocalTensor<ElementType> bLocal = inQueueB.DeQue<ElementType>();
        AscendC::LocalTensor<ElementType> cLocal = outQueueC.AllocTensor<ElementType>();
        AscendC::Add(cLocal, aLocal, bLocal, current_elements);
        outQueueC.EnQue<ElementType>(cLocal);
        inQueueA.FreeTensor<ElementType>(aLocal);
        inQueueB.FreeTensor<ElementType>(bLocal);
    }
    __aicore__ inline void CopyOut(uint32_t progress)
    {
        //很神奇啊。这里如果之前没有VectorAlign<ElementType>的话，直接就全0结果了。
        uint32_t current_elements = AlignUp<ElementType>(progress + maxLenBlock > total_num ? total_num - progress : maxLenBlock);

        AscendC::LocalTensor<ElementType> cLocal = outQueueC.DeQue<ElementType>();
        AscendC::DataCopy(Cgm[progress], cLocal, current_elements); //这里出问题了。
        outQueueC.FreeTensor<ElementType>(cLocal);

    }

    AscendC::TPipe pipe;
    AscendC::TQue<AscendC::TPosition::VECIN, 1> inQueueA, inQueueB;
    AscendC::TQue<AscendC::TPosition::VECOUT, 1> outQueueC;
    AscendC::GlobalTensor<ElementType> Agm;
    AscendC::GlobalTensor<ElementType> Bgm;
    AscendC::GlobalTensor<ElementType> Cgm;


    uint64_t total_num; //是每个块负责的长度
    uint32_t Block_Idx;
    uint32_t Block_Num;

};



template<
    typename ElementA,
    typename ElementB,
    typename ElementC
>
__global__ __aicore__ void kernel_do(
    __gm__ ElementA * ptr_A,
    __gm__ ElementB * ptr_B,
    __gm__ ElementC * ptr_C,
    uint32_t M,
    uint32_t N,
    uint32_t strideA,
    uint32_t strideB,
    uint32_t strideC,
    __gm__ uint32_t * ptr_show_v
)
{
    AscendC::GlobalTensor<ElementA> Agm;
    AscendC::GlobalTensor<ElementB> Bgm;
    AscendC::GlobalTensor<ElementC> Cgm;
    Agm.SetGlobalBuffer((__gm__ ElementA *)ptr_A);
    Bgm.SetGlobalBuffer((__gm__ ElementB *)ptr_B);
    Cgm.SetGlobalBuffer((__gm__ ElementC *)ptr_C);


    MatAdd<ElementA, 512> op(M * N, Agm, Bgm, Cgm);
    op.Init();
    op.Process();

}

// __global__ __aicore__ void test(
//     __gm__ uint32_t * ptr_show_v
// )
// {
//     ptr_show_v[0] = 1;
// }


void run(uint32_t M, uint32_t N, uint32_t strideA, uint32_t strideB, uint32_t strideC)
{
    aclrtStream stream;
    const char *aclConfigPath = "../acl.json";
    ACL_CHECK(aclInit(nullptr));
    ACL_CHECK(aclrtSetDevice(DEVICE))
    ACL_CHECK(aclrtCreateStream(&stream));

    uint64_t A_size = h_AlignUp<__fp16>(M * strideA) * sizeof(__fp16);
    uint64_t B_size = h_AlignUp<__fp16>(M * strideB) * sizeof(__fp16);
    uint64_t C_size = h_AlignUp<__fp16>(M * strideC) * sizeof(__fp16);

    __fp16 *h_C;
    ACL_CHECK(aclrtMallocHost((void**)(&h_C), C_size));

    std::vector<__fp16> h_A(A_size / sizeof(__fp16), (__fp16)1.0);
    std::vector<__fp16> h_B(B_size / sizeof(__fp16), (__fp16)1.0);

    ReadFile("./input/A.bin", A_size, h_A.data(), A_size);
    ReadFile("./input/B.bin", B_size, h_B.data(), B_size);


    __fp16* d_A, *d_B, *d_C;
    ACL_CHECK(aclrtMalloc((void**)&d_A, A_size, ACL_MEM_MALLOC_HUGE_FIRST));
    ACL_CHECK(aclrtMalloc((void**)&d_B, B_size, ACL_MEM_MALLOC_HUGE_FIRST));
    ACL_CHECK(aclrtMalloc((void**)&d_C, C_size, ACL_MEM_MALLOC_HUGE_FIRST));

    ACL_CHECK(aclrtMemcpy(d_A, A_size, h_A.data(), A_size, ACL_MEMCPY_HOST_TO_DEVICE));
    ACL_CHECK(aclrtMemcpy(d_B, B_size, h_B.data(), B_size, ACL_MEMCPY_HOST_TO_DEVICE));



    uint32_t BlockNum = 20;

    uint32_t* d_show_v, *h_show_v;
    ACL_CHECK(aclrtMalloc((void**)&d_show_v, BlockNum * show_size, ACL_MEM_MALLOC_HUGE_FIRST));
    ACL_CHECK(aclrtMallocHost((void**)(&h_show_v), BlockNum * show_size));
    


    kernel_do<__fp16, __fp16, __fp16><<<BlockNum, nullptr, stream>>>(
        d_A, 
        d_B, 
        d_C, 
        M,
        N,
        strideA,
        strideB,
        strideC,
        d_show_v
    );
    

    ACL_CHECK(aclrtSynchronizeStream(stream));
    ACL_CHECK(aclrtMemcpy(h_C, C_size, d_C, C_size, ACL_MEMCPY_DEVICE_TO_HOST));

    __fp16 *reference_C;
    ACL_CHECK(aclrtMallocHost((void**)&reference_C, C_size));
    ReadFile("./output/golden.bin", C_size, reference_C, C_size);




    
    ACL_CHECK(aclrtMemcpy(h_show_v, BlockNum * show_size, d_show_v, BlockNum * show_size, ACL_MEMCPY_DEVICE_TO_HOST));
    for(int b = 0; b < BlockNum; b++)
    {
        for(int i = 0; i < 10; i++)
        {
            std::cout << h_show_v[b * show_size / sizeof(uint32_t) + i] << " ";
        }
        std::cout << "\n";
    }

    ACL_CHECK(aclrtFreeHost(h_show_v));
    ACL_CHECK(aclrtFree(d_show_v));


    // int print_num = 10;//M * N;
    // for(int i = 0; i < print_num; i++)
    // {
    //     std::cout<< static_cast<float>(h_A[i]) <<" ";
    // }
    // std::cout<< "\n";
    // for(int i = 0; i < print_num; i++)
    // {
    //     std::cout<< static_cast<float>(h_B[i]) <<" ";
    // }
    // std::cout<< "\n";
    // for(int i = 0; i < print_num; i++)
    // {
    //     std::cout<< static_cast<float>(h_C[i]) <<" ";
    // }
    // std::cout<< "\n";











    int errorCount = 0;
    int printCount = 0;
    float abs_tolerance = 1e-5f;  // 绝对误差容限
    for (int i = 0; i < M * strideC; i++) {
        float actual = (float)(*((__fp16 *)h_C + i));
        float expected = (float)(*((__fp16 *)reference_C + i));
        //float expected = (float)(*((__fp16 *)h_A.data() + i));
        float abs_diff = std::abs(actual - expected);
        
        //std::cout << actual <<" " << expected << " \n";
        if (abs_diff > abs_tolerance) {
            errorCount++;
            
            if (errorCount > M * strideC * 0.001 && printCount < 32) {
                std::cout << "Element[" << i << "]: " << actual 
                        << " vs " << expected << " (diff: " << abs_diff << ")" << std::endl;
                printCount++;
            }
        }
    }

    std::cout << "M: " << M << ", N: " << N << " " <<  "errorCount: " <<  errorCount << std::endl;
    if (errorCount < M * N * 0.0001) {
        std::cout << "[Compare success]" << std::endl;
    } else {
        std::cout << "[Compare failed]" << std::endl;
    }


    ACL_CHECK(aclrtFreeHost(h_C));
    ACL_CHECK(aclrtFree(d_A));
    ACL_CHECK(aclrtFree(d_B));
    ACL_CHECK(aclrtFree(d_C));

    ACL_CHECK(aclrtDestroyStream(stream));
    ACL_CHECK(aclrtResetDevice(DEVICE));
    ACL_CHECK(aclFinalize());




}

int main (int argc, char** argv) {
    int M = 1024;
    int N = 1024;
    if (argc > 1) M = std::stoi(argv[1]);
    if (argc > 2) N = std::stoi(argv[2]);


    run(M, N, N, N, N);

    return 0;
}




/*

template<
    typename ElementA,
    typename ElementB,
    typename ElementC
>
__global__ __aicore__ void add(
    __gm__ ElementA * ptr_A,
    __gm__ ElementB * ptr_B,
    __gm__ ElementC * ptr_C,
    uint32_t M,
    uint32_t N,
    uint32_t strideA,
    uint32_t strideB,
    uint32_t strideC
)
{
    //AscendC::printf("%d\n", e_num_per_block);
    //AscendC::DumpTensor(Agm, 5, 100);

    
    AscendC::GlobalTensor<ElementA> Agm;
    AscendC::GlobalTensor<ElementB> Bgm;
    AscendC::GlobalTensor<ElementC> Cgm;

    uint32_t BlockNum = AscendC::GetBlockNum();
    uint32_t Block_Idx = AscendC::GetBlockIdx();
    uint32_t total_num = M * N;
    uint32_t elements_per_block = (total_num + BlockNum - 1) / BlockNum;
    uint32_t start_pos = Block_Idx * elements_per_block;
    // uint32_t actual_length = (start_pos + elements_per_block > total_num) 
    //                         ? (total_num - start_pos) 
    //                         : elements_per_block;
    

    //这里的第二个参数是元素个数，不是数据长度。
    //这里只是设置缓冲区大小，不是从ptr中读取数据，所以elements_per_block而不是actual_length
    Agm.SetGlobalBuffer((__gm__ ElementA *)ptr_A + start_pos, elements_per_block);
    Bgm.SetGlobalBuffer((__gm__ ElementB *)ptr_B + start_pos, elements_per_block);
    Cgm.SetGlobalBuffer((__gm__ ElementC *)ptr_C + start_pos, elements_per_block);

    uint32_t e_per_run = BUFFER / sizeof(ElementA);// 一次能处理多少个元素
    int loop_count = (elements_per_block + e_per_run - 1) / e_per_run;


    AscendC::LocalTensor<ElementA> aLocal;// = inQueueA.AllocTensor<ElementA>();
    AscendC::LocalTensor<ElementB> bLocal;// = inQueueA.AllocTensor<ElementB>();
    AscendC::LocalTensor<ElementC> cLocal;// = outQueueC.AllocTensor<ElementC>();
    aLocal.SetBufferLen(BUFFER);
    bLocal.SetBufferLen(BUFFER);
    cLocal.SetBufferLen(BUFFER);

    
    for(int i = 0; i < loop_count; i++)
    {
        DataCopy(aLocal, Agm[i * e_per_run], BUFFER);
        DataCopy(bLocal, Bgm[i * e_per_run], BUFFER);  
        Add(cLocal, aLocal, bLocal, e_per_run);
        DataCopy(Cgm[i * e_per_run], cLocal, BUFFER);
    }


}
*/