#include <iostream>
#include <cstdint>
#include <sstream>
#include <array>
#include <acl/acl.h>

#include "tiling/platform/platform_ascendc.h"
#include "lightning_attention/block/block_mmad_multi_block.h"
#include "lightning_attention/device/device_matmul.h"
#include "include/matmul/policy/dispatch_policy.h"

// #include "lightning_attention/epilogue/block_epilogue_single_op_fp.h"
// #include "lightning_attention/epilogue/block_epilogue_single_op_bf.h"
// #include "lightning_attention/epilogue/block_epilogue_double_op.h"
// #include "lightning_attention/epilogue/block_epilogue_double_op_sv.h"

// #include "lightning_attention/epilogue/fusion/fusion_add.h"
// #include "lightning_attention/epilogue/fusion/fusion_mul.h"
// #include "lightning_attention/epilogue/fusion/fusion_muls.h"
#include "lightning_attention/epilogue/block_epilogue_multiply.h"
#include "lightning_attention/epilogue/block_epilogue_multiply_add.h"
#include "lightning_attention/epilogue/block_epilogue_multiply_scalar_add.h"

#include "../utils.h"

#include "lightning_attention/lightning_attention.h"

using namespace AscendC;
using namespace Atcos;
using namespace Atcos::Gemm;

// Type
using InputType = bfloat16_t;
using OutputType = float; // not type of O, it's the type of matrix multiplication output data type.

// MMAD
template<int32_t m, int32_t n, int32_t k>
using TileShape = AscendC::Shape<Int<m>, Int<n>, Int<k>>;

template<typename T, bool trans = false>
using MatmulTypeGm = AscendC::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, T, trans>;

using IntraBmm1 = Block::BlockMmad<
    MatmulMultiBlock<>,
    TileShape<256, 256, 128>,
    TileShape<128, 128, 128>,
    MatmulTypeGm<InputType>,
    MatmulTypeGm<InputType, true>,
    MatmulTypeGm<OutputType>
>;

using InterBmm = Block::BlockMmad<
    MatmulMultiBlock<>,
    TileShape<256, 128, 128>,
    TileShape<128, 128, 128>,
    MatmulTypeGm<InputType>,
    MatmulTypeGm<InputType>,
    MatmulTypeGm<OutputType>
>;

using IntraBmm2 = Block::BlockMmad<
    MatmulMultiBlock<>,
    TileShape<256, 128, 256>,
    TileShape<128, 128, 128>,
    MatmulTypeGm<InputType>,
    MatmulTypeGm<InputType>,
    MatmulTypeGm<OutputType>
>;

using UpdateBmm = Block::BlockMmad<
    MatmulMultiBlock<>,
    TileShape<256, 128, 256>,
    TileShape<128, 128, 128>,
    MatmulTypeGm<InputType, true>,
    MatmulTypeGm<InputType>,
    MatmulTypeGm<OutputType>
>;

// Epilogue
// using IntraBmm1Epilogue = BlockEpilogueSingleOp<
//     OutputType, InputType,
//     FusionMul<OutputType, InputType>
// >;

// using IntraBmm2Epilogue = BlockEpilogueDoubleOp<
//     OutputType, InputType,
//     FusionMul<OutputType, OutputType>,
//     FusionAdd<OutputType, InputType>
// >;

// using UpdateBmmEpilogue = BlockEpilogueDoubleOpSV<
//     OutputType, OutputType,
//     FusionMuls<OutputType>,
//     FusionAdd<OutputType, OutputType>
// >;

using mulEpilogue = BlockEpilogueMultiply<InputType>;
using mulAddEpilogue = BlockEpilogueMultiplyAdd<InputType>;
using mulScalarAddEpilogue = BlockEpilogueMultiplyScalarAdd<float>;

// Kernel
using MatmulKernel = LightningAttention<
    LightningAttentionProblemShape,
    IntraBmm1, mulEpilogue,
    InterBmm,
    IntraBmm2, mulAddEpilogue,
    UpdateBmm, mulScalarAddEpilogue
>;

using Arguments = MatmulKernel::Arguments;
using DeviceMatmul = Device::DeviceMatmul<MatmulKernel, KERNEL_TYPE_MIX_AIC_1_2>;

void MatmulOp(
    uint8_t* q,
    uint8_t* k,
    uint8_t* v,
    uint8_t* s,
    uint8_t* w,
    uint8_t* kv,
    uint8_t* o,
    int32_t batchSize,
    int32_t numHeads, 
    int32_t seqLen,
    int32_t headDim,
    int32_t blockSize,
    void* stream = nullptr
) {
    // Init args 
    Arguments args = {
        { batchSize, numHeads, seqLen, headDim, blockSize },
        { q, k, v, s, w, kv, o }
    };

    DeviceMatmul mm;

    size_t workspaceSize = DeviceMatmul::GetWorkspaceSize(args);

    ACT_CHECK(mm.CanImplement(args));
    mm.InitParams(args, w);
    mm();
}

void TestAclInit(aclrtContext &context, aclrtStream &stream, int32_t &deviceId)
{
    CHECK_ACL(aclInit(nullptr));
    CHECK_ACL(aclrtSetDevice(deviceId));
    CHECK_ACL(aclrtCreateContext(&context, deviceId));
    CHECK_ACL(aclrtCreateStream(&stream));
}

void TestAclDeInit(aclrtContext &context, aclrtStream &stream, int32_t &deviceId)
{
    CHECK_ACL(aclrtDestroyStream(stream));
    CHECK_ACL(aclrtDestroyContext(context));
    CHECK_ACL(aclrtResetDevice(deviceId));
    CHECK_ACL(aclFinalize());
}


void TestMatmul(
    int32_t batchSize,
    int32_t numHeads, 
    int32_t seqLen,
    int32_t headDim,
    int32_t blockSize
) {
    aclrtContext context;
    aclrtStream stream = nullptr;
    int32_t deviceId = 0;
    TestAclInit(context, stream, deviceId);

    size_t qSize = batchSize * numHeads * seqLen * headDim * sizeof(uint16_t);
    size_t kSize = batchSize * numHeads * seqLen * headDim * sizeof(uint16_t);
    size_t vSize = batchSize * numHeads * seqLen * headDim * sizeof(uint16_t);
    size_t sSize = numHeads * 1 * 1 * sizeof(float);
    size_t kvSize = batchSize * numHeads * headDim * headDim * sizeof(float);
    size_t oSize = batchSize * numHeads * seqLen * headDim * sizeof(uint16_t);
    size_t wSize = batchSize * numHeads * (
        sizeof(float) * (blockSize * blockSize * 2+ blockSize * headDim * 4 + headDim * headDim * 1) + 
        sizeof(uint16_t) * (blockSize * blockSize + blockSize * headDim + headDim * headDim)
    );

    uint8_t *qHost = nullptr;
    uint8_t *qDevice = nullptr;
    CHECK_ACL(aclrtMallocHost((void **)(&qHost), qSize));
    CHECK_ACL(aclrtMalloc((void **)&qDevice, qSize, ACL_MEM_MALLOC_HUGE_FIRST));
    ReadFile("./input/q.bin", qSize, qHost, qSize);
    CHECK_ACL(aclrtMemcpy(qDevice, qSize, qHost, qSize, ACL_MEMCPY_HOST_TO_DEVICE));

    uint8_t *kHost = nullptr;
    uint8_t *kDevice = nullptr;
    CHECK_ACL(aclrtMallocHost((void **)(&kHost), kSize));
    CHECK_ACL(aclrtMalloc((void **)&kDevice, kSize, ACL_MEM_MALLOC_HUGE_FIRST));
    ReadFile("./input/k.bin", kSize, kHost, kSize);
    CHECK_ACL(aclrtMemcpy(kDevice, kSize, kHost, kSize, ACL_MEMCPY_HOST_TO_DEVICE));

    uint8_t *vHost = nullptr;
    uint8_t *vDevice = nullptr;
    CHECK_ACL(aclrtMallocHost((void **)(&vHost), vSize));
    CHECK_ACL(aclrtMalloc((void **)&vDevice, vSize, ACL_MEM_MALLOC_HUGE_FIRST));
    ReadFile("./input/v.bin", vSize, vHost, vSize);
    CHECK_ACL(aclrtMemcpy(vDevice, vSize, vHost, vSize, ACL_MEMCPY_HOST_TO_DEVICE));

    uint8_t *sHost = nullptr;
    uint8_t *sDevice = nullptr;
    CHECK_ACL(aclrtMallocHost((void **)(&sHost), sSize));
    CHECK_ACL(aclrtMalloc((void **)&sDevice, sSize, ACL_MEM_MALLOC_HUGE_FIRST));
    ReadFile("./input/s.bin", sSize, sHost, sSize);
    CHECK_ACL(aclrtMemcpy(sDevice, sSize, sHost, sSize, ACL_MEMCPY_HOST_TO_DEVICE));

    uint8_t *kvHost = nullptr;
    uint8_t *kvDevice = nullptr;
    CHECK_ACL(aclrtMallocHost((void **)(&kvHost), kvSize));
    CHECK_ACL(aclrtMalloc((void **)&kvDevice, kvSize, ACL_MEM_MALLOC_HUGE_FIRST));
    ReadFile("./input/kv.bin", kvSize, kvHost, kvSize);
    CHECK_ACL(aclrtMemcpy(kvDevice, kvSize, kvHost, kvSize, ACL_MEMCPY_HOST_TO_DEVICE));

    uint8_t *oHost = nullptr;
    uint8_t *oDevice = nullptr;
    CHECK_ACL(aclrtMallocHost((void **)(&oHost), oSize));
    CHECK_ACL(aclrtMalloc((void **)&oDevice, oSize, ACL_MEM_MALLOC_HUGE_FIRST));

    uint8_t *wHost = nullptr;
    uint8_t *wDevice = nullptr;
    CHECK_ACL(aclrtMallocHost((void **)(&wHost), wSize));
    CHECK_ACL(aclrtMalloc((void **)&wDevice, wSize, ACL_MEM_MALLOC_HUGE_FIRST));


    MatmulOp(
        qDevice,
        kDevice,
        vDevice,
        sDevice,
        wDevice,
        kvDevice,
        oDevice,
        batchSize,
        numHeads, 
        seqLen,
        headDim,
        blockSize,
        stream
    );
    CHECK_ACL(aclrtSynchronizeStream(stream));

    CHECK_ACL(aclrtMemcpy(oHost, oSize, oDevice, oSize, ACL_MEMCPY_DEVICE_TO_HOST));
    WriteFile("./output/output_o.bin", oHost, oSize);

    CHECK_ACL(aclrtMemcpy(kvHost, kvSize, kvDevice, kvSize, ACL_MEMCPY_DEVICE_TO_HOST));
    WriteFile("./output/output_kv.bin", kvHost, kvSize);

    CHECK_ACL(aclrtMemcpy(wHost, wSize, wDevice, wSize, ACL_MEMCPY_DEVICE_TO_HOST));
    WriteFile("./output/output_w.bin", wHost, wSize);

    CHECK_ACL(aclrtFree(qDevice));
    CHECK_ACL(aclrtFreeHost(qHost));
    CHECK_ACL(aclrtFree(kDevice));
    CHECK_ACL(aclrtFreeHost(kHost));
    CHECK_ACL(aclrtFree(vDevice));
    CHECK_ACL(aclrtFreeHost(vHost));
    CHECK_ACL(aclrtFree(sDevice));
    CHECK_ACL(aclrtFreeHost(sHost));
    CHECK_ACL(aclrtFree(kvDevice));
    CHECK_ACL(aclrtFreeHost(kvHost));
    CHECK_ACL(aclrtFree(oDevice));
    CHECK_ACL(aclrtFreeHost(oHost));
    CHECK_ACL(aclrtFree(wDevice));
    CHECK_ACL(aclrtFreeHost(wHost));

    TestAclDeInit(context, stream, deviceId);
}

std::array<int32_t, 5> ParseInput(int32_t argc, const char* argv[]) {
    if (argc != 6) {
        printf("Shape not specified, Try to use shape [1, 1, 256, 128]\n");
        return {1, 1, 256, 128, 256};
    }

    try {
        int32_t val0 = std::stoi(argv[1]);
        int32_t val1 = std::stoi(argv[2]);
        int32_t val2 = std::stoi(argv[3]);
        int32_t val3 = std::stoi(argv[4]);
        int32_t val4 = std::stoi(argv[5]);

        printf("Try to use shape [%d, %d, %d, %d, %d]\n", val0, val1, val2, val3, val4);

        return { val0, val1, val2, val3, val4 };
    } catch (std::exception e) {
        printf("Wrong Shape. Try to use shape[1, 1, 256, 128]\n");
        return {1, 1, 256, 128, 256};
    }
}

int32_t main(int32_t argc, const char* args[]) {
    for (int32_t i = 0;i < argc;i++) {
        printf("arg[%d] = %s\n", i, args[i]);
    }

    const auto hps = ParseInput(argc, args);
    TestMatmul(
        hps[0],
        hps[1],
        hps[2],
        hps[3],
        hps[4]
    );
    return 0;
}