#include "../data_utils.h"
#include "acl/acl.h"
#include "../arch.h"
#include "kernel_operator.h"
#include <type_traits>
#include<iostream>
#include <iomanip>

constexpr uint32_t show_num = 100;
constexpr uint32_t show_size = show_num * sizeof(uint32_t);
constexpr int32_t LOCTensorLen = 1 << 14; //假定一个LOCtensor最长是多少，单位是字节。


constexpr bool need_show = true;
constexpr bool need_compare = true;
uint32_t ThreadNum = 8;



template<
    typename ElementType
>
class MatAdd
{
public:
    __aicore__ inline MatAdd(uint32_t total_elements, bool need_pad = 0):
    total_elements(total_elements),
    need_pad(need_pad),
    maxElementNum(1024), //感觉这里如果不是正好的花，可能会出现padding时tensor爆了的情况，所以先给小一点。
    ThreadIdx(AscendC::GetBlockIdx())
    {};
    __aicore__ inline void Init(__gm__ ElementType * ptr_A, __gm__ ElementType * ptr_B, __gm__ ElementType * ptr_C)
    {
        Agm.SetGlobalBuffer((__gm__ ElementType *)ptr_A);
        Bgm.SetGlobalBuffer((__gm__ ElementType *)ptr_B);
        Cgm.SetGlobalBuffer((__gm__ ElementType *)ptr_C);
        
        pipe.InitBuffer(inQueueA, 1, LOCTensorLen);
        pipe.InitBuffer(inQueueB, 1, LOCTensorLen);
        pipe.InitBuffer(outQueueC, 1, LOCTensorLen);
    }
    __aicore__ inline void Process()
    {
        uint32_t loopCount = (total_elements + maxElementNum - 1) / maxElementNum;
        for (uint32_t i = 0; i < loopCount; i++) {
            CopyIn(i); 
            Compute(i);
            CopyOut(i);
        }
    }


private:
    __aicore__ inline void CopyIn(uint32_t progress)
    {
        if(need_pad)
        {
            uint32_t start_pos = progress * maxElementNum;
            uint32_t current_elements = (maxElementNum > total_elements - start_pos ? total_elements - start_pos : maxElementNum);
            if(current_elements <= 0) return;

            AscendC::LocalTensor<ElementType> aLocal = inQueueA.AllocTensor<ElementType>();
            AscendC::LocalTensor<ElementType> bLocal = inQueueB.AllocTensor<ElementType>();
            AscendC::DataCopyPad(
                aLocal, 
                Agm[start_pos], 
                AscendC::DataCopyExtParams(
                    1, current_elements * sizeof(ElementType), 0, 0, 0
                ),
                AscendC::DataCopyPadExtParams<ElementType>(
                    0, 0, 2, 0
                )
            );
            AscendC::DataCopyPad(
                bLocal, 
                Bgm[start_pos],
                AscendC::DataCopyExtParams(
                    1, current_elements * sizeof(ElementType), 0, 0, 0
                ),
                AscendC::DataCopyPadExtParams<ElementType>(
                    0, 0, 2, 0
                )
            );
            inQueueA.EnQue<ElementType>(aLocal);
            inQueueB.EnQue<ElementType>(bLocal);
        }
    }
    __aicore__ inline void Compute(uint32_t progress)
    {
        uint32_t start_pos = progress * maxElementNum;
        uint32_t current_elements = (maxElementNum > total_elements - start_pos ? total_elements - start_pos : maxElementNum);
        if(current_elements <= 0) return;

        AscendC::LocalTensor<ElementType> aLocal = inQueueA.DeQue<ElementType>();
        AscendC::LocalTensor<ElementType> bLocal = inQueueB.DeQue<ElementType>();
        AscendC::LocalTensor<ElementType> cLocal = outQueueC.AllocTensor<ElementType>();
        AscendC::Add(cLocal, aLocal, bLocal, current_elements);
        outQueueC.EnQue<ElementType>(cLocal);
        inQueueA.FreeTensor<ElementType>(aLocal);
        inQueueB.FreeTensor<ElementType>(bLocal);
    }
    __aicore__ inline void CopyOut(uint32_t progress)
    {


        if(need_pad)
        {
            uint32_t start_pos = progress * maxElementNum;
            uint32_t current_elements = (maxElementNum > total_elements - start_pos ? total_elements - start_pos : maxElementNum);
            if(current_elements <= 0) return;

            AscendC::LocalTensor<ElementType> cLocal = outQueueC.DeQue<ElementType>();
            AscendC::DataCopyPad(
                Cgm[start_pos], 
                cLocal, 
                AscendC::DataCopyExtParams(
                    1, current_elements * sizeof(ElementType), 0, 0, 0
                )
            ); 
            outQueueC.FreeTensor<ElementType>(cLocal);
        }

    }



    uint32_t total_elements;
    uint32_t maxElementNum;
    uint32_t ThreadIdx;
    bool need_pad;

    AscendC::TPipe pipe;
    AscendC::TQue<AscendC::TPosition::VECIN, 1> inQueueA, inQueueB;
    AscendC::TQue<AscendC::TPosition::VECOUT, 1> outQueueC;
    AscendC::GlobalTensor<ElementType> Agm;
    AscendC::GlobalTensor<ElementType> Bgm;
    AscendC::GlobalTensor<ElementType> Cgm;


};


template<
    typename ElementType,
    uint32_t maxLenPerBlock
>
__global__ __aicore__ void kernel_do(
    __gm__ ElementType * ptr_A,
    __gm__ ElementType * ptr_B,
    __gm__ ElementType * ptr_C,
    uint32_t M,
    uint32_t N,
    __gm__ uint32_t * ptr_show_v
)
{
    uint32_t ThreadNum = AscendC::GetBlockNum();
    uint32_t ThreadIdx = AscendC::GetBlockIdx();

    bool need_pad = 1;
    if(need_pad)
    {
        //不考虑对齐，使用pad切分。
        uint32_t total_elements = M * N;
        uint32_t elements_per_thread_base = total_elements / ThreadNum;
        uint32_t remainder = total_elements - elements_per_thread_base;
        uint32_t elements_per_thread = (
            remainder > ThreadIdx ? 
            elements_per_thread_base + 1 : elements_per_thread_base
        );
        uint32_t begin_pos = elements_per_thread_base * ThreadIdx + (remainder >= ThreadIdx ? ThreadIdx : remainder);


        MatAdd<ElementType> op(elements_per_thread, need_pad);
        op.Init((__gm__ ElementType *)ptr_A + begin_pos, (__gm__ ElementType *)ptr_B + begin_pos, (__gm__ ElementType *)ptr_C + begin_pos);
        op.Process();

        
        uint32_t row = ThreadIdx * show_num;
        ptr_show_v[row + 0] = total_elements;
        ptr_show_v[row + 1] = elements_per_thread_base;
        ptr_show_v[row + 2] = elements_per_thread;
        ptr_show_v[row + 3] = begin_pos;
    }



}


template<
    typename ElementType
>
void run(
    uint32_t M, uint32_t N
)
{
    aclrtStream stream;
    const char *aclConfigPath = "../acl.json";
    ACL_CHECK(aclInit(nullptr));
    ACL_CHECK(aclrtSetDevice(DEVICE));
    ACL_CHECK(aclrtCreateStream(&stream));

    uint64_t A_size = M * N * sizeof(ElementType);
    uint64_t B_size = N * N * sizeof(ElementType);
    uint64_t C_size = M * N * sizeof(ElementType);

    ElementType *h_C;
    ACL_CHECK(aclrtMallocHost((void **)(&h_C), C_size));

    // input init
    std::vector<ElementType> h_A(A_size / sizeof(ElementType), (ElementType)1.0);
    std::vector<ElementType> h_B(B_size / sizeof(ElementType), (ElementType)1.0);

    ReadFile("./input/A.bin", A_size, h_A.data(), A_size);
    ReadFile("./input/B.bin", B_size, h_B.data(), B_size);

    ElementType *d_A, *d_B, *d_C;
    ACL_CHECK(aclrtMalloc((void **)&d_A, A_size, ACL_MEM_MALLOC_HUGE_FIRST));
    ACL_CHECK(aclrtMalloc((void **)&d_B, B_size, ACL_MEM_MALLOC_HUGE_FIRST));
    ACL_CHECK(aclrtMalloc((void **)&d_C, C_size, ACL_MEM_MALLOC_HUGE_FIRST));

    ACL_CHECK(aclrtMemcpy(d_A, A_size, h_A.data(), A_size, ACL_MEMCPY_HOST_TO_DEVICE));
    ACL_CHECK(aclrtMemcpy(d_B, B_size, h_B.data(), B_size, ACL_MEMCPY_HOST_TO_DEVICE));

    uint32_t *d_show, *h_show;
    if(need_show)
    {
        ACL_CHECK(aclrtMalloc((void **)&d_show, ThreadNum * show_size, ACL_MEM_MALLOC_HUGE_FIRST));
        ACL_CHECK(aclrtMallocHost((void **)(&h_show), ThreadNum * show_size));
    }


    
    kernel_do<ElementType, (1 << 14)><<<ThreadNum, nullptr, stream>>>(
        d_A, 
        d_B, 
        d_C, 
        M,
        N,  
        d_show
    );


    ACL_CHECK(aclrtSynchronizeStream(stream));

    if(need_show)
    {
        ACL_CHECK(aclrtMemcpy(h_show, ThreadNum * show_size, d_show, ThreadNum * show_size, ACL_MEMCPY_DEVICE_TO_HOST));
        for(int i = 0; i < ThreadNum; i++)
        {
            for(int j = 0; j < 10; j++)
            {
                std::cout<< h_show[i * show_num + j] << " ";
            }
            std::cout<<"\n";
        }
        
        ACL_CHECK(aclrtFreeHost(h_show));
        ACL_CHECK(aclrtFree(d_show));
    }


    ACL_CHECK(aclrtMemcpy(h_C, C_size, d_C, C_size, ACL_MEMCPY_DEVICE_TO_HOST));

    __fp16 *reference_C;
    ACL_CHECK(aclrtMallocHost((void**)&reference_C, C_size));
    ReadFile("./output/golden.bin", C_size, reference_C, C_size);



    
    if(need_compare)
    {
        int errorCount = 0;
        int printCount = 0;
        float abs_tolerance = 1e-5f;  // 绝对误差容限
        for (int i = 0; i < M * N; i++) {
            float actual = (float)(*((__fp16 *)h_C + i));
            float expected = (float)(*((__fp16 *)reference_C + i));
            //float expected = (float)(*((__fp16 *)h_A.data() + i));
            float abs_diff = std::abs(actual - expected);
            
            //std::cout << actual <<" " << expected << " \n";
            if (abs_diff > abs_tolerance) {
                errorCount++;
                
                if (errorCount > M * N * 0.001 && printCount < 32) {
                    std::cout << "Element[" << i << "]: " << actual 
                            << " vs " << expected << " (diff: " << abs_diff << ")" << std::endl;
                    printCount++;
                }
            }
        }

        std::cout << "M: " << M << ", N: " << N << " " <<  "errorCount: " <<  errorCount << std::endl;
        if (errorCount < M * N * 0.0001) {
            std::cout << "[Compare success]" << std::endl;
        } else {
            std::cout << "[Compare failed]" << std::endl;
        }
    }
    


    ACL_CHECK(aclrtFreeHost(h_C));
    ACL_CHECK(aclrtFree(d_A));
    ACL_CHECK(aclrtFree(d_B));
    ACL_CHECK(aclrtFree(d_C));

    ACL_CHECK(aclrtDestroyStream(stream));
    ACL_CHECK(aclrtResetDevice(DEVICE));
    ACL_CHECK(aclFinalize());

}


int main (int argc, char** argv) {
    int M = 1024;
    int N = 1024;
    if (argc > 1) M = std::stoi(argv[1]);
    if (argc > 2) N = std::stoi(argv[2]);


    run<__fp16>(M, N);
}