#include "../data_utils.h"
#include "acl/acl.h"
#include "../arch.h"
#include "kernel_operator.h"
#include <type_traits>
#include<iostream>
#include<iomanip>
#include<vector>

constexpr uint32_t show_num = 100;
constexpr uint32_t show_size = show_num * sizeof(uint32_t);
constexpr bool need_show = false;
constexpr bool need_compare = false;
constexpr uint32_t print_num = 10;
uint32_t SetThreadNum = 1;
constexpr uint32_t LOCTensorLen = 1 << 14;

static uint32_t const L1Size = 512 * 1024;
static uint32_t const L0ASize = 64 * 1024;
static uint32_t const L0BSize = 64 * 1024;
static uint32_t const L0CSize = 128 * 1024;



template<
    typename ElementType,
    uint32_t maxMPerBlock,
    uint32_t maxNPerBlock,
    uint32_t maxKPerBlock,
    //M是行数捏
    uint32_t l0cNum
>
class Matmul{
public:
    __aicore__ inline Matmul(
        AscendC::GlobalTensor<ElementType> gmTensorA,
        AscendC::GlobalTensor<ElementType> gmTensorB,
        AscendC::GlobalTensor<ElementType> gmTensorC,
        uint32_t singleBlockSize, 
        uint32_t MLoops, 
        uint32_t NLoops,
        uint32_t loopCount
    ):
    gmTensorA(gmTensorA),  gmTensorB(gmTensorB), gmTensorC(gmTensorC),
    singleBlockSize(singleBlockSize), MLoops(MLoops), NLoops(NLoops), loopCount(loopCount),
    ThreadIdx(AscendC::GetBlockIdx()), ThreadNum(AscendC::GetBlockNum())
    {
        
    };
    __aicore__ inline void Init()
    {
        AscendC::TPipe l0cPipe;
        l0cPipe.InitBuffer(l0cTQue, 1, L0CSize);
        AscendC::LocalTensor<uint8_t> L0C_begin = l0cTQue.AllocTensor<uint8_t>();
        l0cPipe.Destroy(); 

        for(int i = 0; i < l0cNum; i++)
        {
            L0CTensors[i] = L0C_begin[i * singleBlockSize * sizeof(ElementType)].template ReinterpretCast<ElementType>();
        }

        

    }
    __aicore__ inline void Process()
    {
        // fill_L0CTensors(32);
        // AscendC::DataCopy(gmTensorC, L0CTensors[0], L0CTensors[0].GetSize()); 确认了确实可以传出来。
        for(int idx = ThreadIdx; idx < loopCount; idx += ThreadNum)
        {
            uint32_t M_Idx = idx / NLoops; //第M_Idx行
            uint32_t N_Idx = idx % NLoops; //余数为列

            //计算，将结果写入C的localtensor。这步先跳过。先确保写回正常执行
            



            //将C的localtensor写回gmTensorC
            // AscendC::DataCopy(
            //     dstTensor, 
            //     srcTensor,
            //     AscendC::DataCopyCO12DstParams(
            //         nActual,
            //         mActual,
            //         dstStride,
            //         mRound,
            //         getQuantMode<ElementC, ComputeElementC>(),
            //         0,
            //         false,
            //         true
            //     )
            // );


        }
    }
private:
    AscendC::GlobalTensor<ElementType> gmTensorA;
    AscendC::GlobalTensor<ElementType> gmTensorB;
    AscendC::GlobalTensor<ElementType> gmTensorC;


    //用于接收结果的C的L0缓存VECOUT 
    // AscendC::TQue<AscendC::QuePosition::CO1,1>l0cTQue;
    AscendC::TQue<AscendC::QuePosition::VECOUT,1>l0cTQue; //先用这个测试L0->GM部分是否成功
    AscendC::LocalTensor<ElementType> L0CTensors[l0cNum];
    //用于测试L0CTensors是否被正确写入的东西。它可以用固定的一串东西填充L0CTensors
    __aicore__ inline void fill_L0CTensors(uint32_t fillLen) {
        //这个东西支持VECOUT。CO1是不支持的
        for(int i = 0; i < fillLen; i++) L0CTensors[0].SetValue(i, (ElementType)(i + 10));
    }



    uint32_t singleBlockSize;
    uint32_t MLoops;
    uint32_t NLoops;
    uint32_t loopCount;
    uint32_t ThreadIdx;
    uint32_t ThreadNum;

};


template<
    typename ElementType,
    uint32_t maxMPerBlock,
    uint32_t maxNPerBlock,
    uint32_t maxKPerBlock
>
__global__ __aicore__ void kernel_do(
    __gm__ ElementType * ptr_A,
    __gm__ ElementType * ptr_B,
    __gm__ ElementType * ptr_C,
    uint32_t M,
    uint32_t N,
    uint32_t K,
    uint32_t strideA,
    uint32_t strideB,
    uint32_t strideC,
    __gm__ uint32_t * ptr_show_v = nullptr
)
{
    uint32_t ThreadNum = AscendC::GetBlockNum();
    uint32_t ThreadIdx = AscendC::GetBlockIdx();

    AscendC::GlobalTensor<ElementType> gmTensorA;
    AscendC::GlobalTensor<ElementType> gmTensorB;
    AscendC::GlobalTensor<ElementType> gmTensorC;
    gmTensorA.SetGlobalBuffer((__gm__ ElementType *)ptr_A);
    gmTensorB.SetGlobalBuffer((__gm__ ElementType *)ptr_B);
    gmTensorC.SetGlobalBuffer((__gm__ ElementType *)ptr_C);

    const uint32_t singleBlockSize = maxMPerBlock * maxNPerBlock;
    uint32_t MLoops = (M + maxMPerBlock - 1) / maxMPerBlock;
    uint32_t NLoops = (N + maxNPerBlock - 1) / maxNPerBlock;
    uint32_t loopCount = MLoops * NLoops;




    Matmul<
        ElementType, maxMPerBlock, maxNPerBlock, maxKPerBlock, L0CSize / singleBlockSize / sizeof(ElementType)
    >op(gmTensorA, gmTensorB, gmTensorC, singleBlockSize, MLoops, NLoops, loopCount);
    op.Init();
    op.Process();


    // uint32_t row = ThreadIdx * show_num;
    // ptr_show_v[row + 0] = M;
    // ptr_show_v[row + 1] = N;
    // ptr_show_v[row + 2] = K;
    // ptr_show_v[row + 3] = strideA;
    // ptr_show_v[row + 4] = strideB;
    // ptr_show_v[row + 5] = strideC;


}



template<
    typename ElementType
>
void run(
    uint32_t M, uint32_t N, uint32_t K, 
    uint32_t strideA, uint32_t strideB, uint32_t strideC
){
    //A矩阵是M*K，所以A的stride是K，B是N*K，所以B的stride是K(这里为了方便，B是转置过的)。C是M*N，所以它的stride是N

    aclrtStream stream;
    const char *aclConfigPath = "../acl.json";
    ACL_CHECK(aclInit(nullptr));
    ACL_CHECK(aclrtSetDevice(DEVICE));
    ACL_CHECK(aclrtCreateStream(&stream));

    uint64_t A_size = M * strideA * sizeof(ElementType);
    uint64_t B_size = N * strideB * sizeof(ElementType);
    uint64_t C_size = M * strideC * sizeof(ElementType);


    ElementType *h_C;
    ACL_CHECK(aclrtMallocHost((void **)(&h_C), C_size));

    // input init
    std::vector<ElementType> h_A(A_size / sizeof(ElementType), (ElementType)1.0);
    std::vector<ElementType> h_B(B_size / sizeof(ElementType), (ElementType)1.0);

    ReadFile("./input/A.bin", A_size, h_A.data(), A_size);
    ReadFile("./input/B.bin", B_size, h_B.data(), B_size);

    ElementType *d_A, *d_B, *d_C;
    ACL_CHECK(aclrtMalloc((void **)&d_A, A_size, ACL_MEM_MALLOC_HUGE_FIRST));
    ACL_CHECK(aclrtMalloc((void **)&d_B, B_size, ACL_MEM_MALLOC_HUGE_FIRST));
    ACL_CHECK(aclrtMalloc((void **)&d_C, C_size, ACL_MEM_MALLOC_HUGE_FIRST));

    ACL_CHECK(aclrtMemcpy(d_A, A_size, h_A.data(), A_size, ACL_MEMCPY_HOST_TO_DEVICE));
    ACL_CHECK(aclrtMemcpy(d_B, B_size, h_B.data(), B_size, ACL_MEMCPY_HOST_TO_DEVICE));


    uint32_t *d_show, *h_show;
    if(need_show)
    {
        ACL_CHECK(aclrtMalloc((void **)&d_show, SetThreadNum * show_size, ACL_MEM_MALLOC_HUGE_FIRST));
        ACL_CHECK(aclrtMallocHost((void **)(&h_show), SetThreadNum * show_size));
    }

    kernel_do<ElementType, 256, 128, 256><<<SetThreadNum, nullptr, stream>>>(
        d_A, 
        d_B, 
        d_C, 
        M,
        N,
        K,
        strideA,
        strideB,
        strideC,    
        d_show
    );
    ACL_CHECK(aclrtSynchronizeStream(stream));


    ACL_CHECK(aclrtMemcpy(h_C, C_size, d_C, C_size, ACL_MEMCPY_DEVICE_TO_HOST));


    if(print_num != 0)
    {
        for(int i = 0; i < print_num; i++)
        {
            std::cout<< std::setw(2) << static_cast<float>(h_A[i]) <<" ";
        }
        std::cout<< "\n";
        for(int i = 0; i < print_num; i++)
        {
            std::cout<< std::setw(2) << static_cast<float>(h_B[i]) <<" ";
        }
        std::cout<< "\n";
        for(int i = 0; i < print_num; i++)
        {
            std::cout<< std::setw(2) << static_cast<float>(h_C[i]) <<" ";
        }
        std::cout<< "\n";

    }



    if(need_show)
    {
        ACL_CHECK(aclrtMemcpy(h_show, SetThreadNum * show_size, d_show, SetThreadNum * show_size, ACL_MEMCPY_DEVICE_TO_HOST));
        for(int i = 0; i < SetThreadNum; i++)
        {
            for(int j = 0; j < 10; j++)
            {
                std::cout<< h_show[i * show_num + j] << " ";
            }
            std::cout<<"\n";
        }
        ACL_CHECK(aclrtFreeHost(h_show));
        ACL_CHECK(aclrtFree(d_show));

            
    }




    if(need_compare)
    {
        ElementType *reference_C;
        ACL_CHECK(aclrtMallocHost((void **)(&reference_C), C_size));
        ReadFile("./output/golden.bin", C_size, reference_C, C_size);

        int errorCount = 0;
        int printCount = 0;
        for (int i = 0; i < M * strideC; i++) {
            float diff_res = std::abs((float)(*((ElementType *)h_C + i)) - (float)(*((ElementType *)reference_C + i)));
            if (diff_res > (float)K * 0.0001) {
                errorCount++;
            }
            if (errorCount > M * strideC * 0.0001 && diff_res > (float)K * 0.0001 && printCount < 32) {
                //std::cout << (float)(*((ElementType *)h_C + i)) << " " << (float)(*((ElementType *)reference_C + i)) << std::endl;
                std::cout << "Element[" << i << "]: " << (float)(*((ElementType *)h_C + i)) 
                            << " vs " << (float)(*((ElementType *)reference_C + i)) << " (diff: " << diff_res << ")" << std::endl;
                printCount++;
            }
        }
        std::cout << "M: " << M << ", N: " << N << ", K: " << K << " " <<  "errorCount: " <<  errorCount << std::endl;
        if (errorCount < M * N * 0.0001) {
            std::cout << "[Compare success]" << std::endl;
        } else {
            std::cout << "[Compare failed]" << std::endl;
        }

    }

    ACL_CHECK(aclrtFreeHost(h_C));
    ACL_CHECK(aclrtFree(d_A));
    ACL_CHECK(aclrtFree(d_B));
    ACL_CHECK(aclrtFree(d_C));

    ACL_CHECK(aclrtDestroyStream(stream));
    ACL_CHECK(aclrtResetDevice(DEVICE));
    ACL_CHECK(aclFinalize());



}

int main (int argc, char** argv) {
    int M = 1024;
    int N = 1024;
    int K = 1024;
    if (argc > 1) M = std::stoi(argv[1]);
    if (argc > 2) N = std::stoi(argv[2]);
    if (argc > 2) K = std::stoi(argv[3]);


    run<__fp16>(M, N, K, K, K, N);

    return 0;
}


