#include <iostream>
#include <vector>
#include "acl/acl.h"
// #include "kernel_operator.h"
#include "tiling/platform/platform_ascendc.h"
#include "aclnnop/aclnn_matmul.h"
#include "acl/acl_base.h"

#define CHECK_ACL(x)                                                                        \
    do                                                                                      \
    {                                                                                       \
        aclError __ret = x;                                                                 \
        if (__ret != ACL_ERROR_NONE)                                                        \
        {                                                                                   \
            std::cerr << __FILE__ << ":" << __LINE__ << " aclError:" << __ret << std::endl; \
        }                                                                                   \
    } while (0);

#define CHECK_RET(cond, return_expr) \
    do                               \
    {                                \
        if (!(cond))                 \
        {                            \
            return_expr;             \
        }                            \
    } while (0)

#define LOG_PRINT(message, ...)         \
    do                                  \
    {                                   \
        printf(message, ##__VA_ARGS__); \
    } while (0)

int64_t GetShapeSize(const std::vector<int64_t> &shape)
{
    int64_t shapeSize = 1;
    for (auto i : shape)
    {
        shapeSize *= i;
    }
    return shapeSize;
}

int Init(int32_t deviceId, aclrtStream *stream)
{
    // 固定写法，AscendCL初始化
    auto ret = aclInit(nullptr);
    CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclInit failed. ERROR: %d\n", ret); return ret);
    ret = aclrtSetDevice(deviceId);
    CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclrtSetDevice failed. ERROR: %d\n", ret); return ret);
    ret = aclrtCreateStream(stream);
    CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclrtCreateStream failed. ERROR: %d\n", ret);
              return ret);
    return 0;
}

template <typename T>
int CreateAclTensor(const std::vector<T> &hostData, const std::vector<int64_t> &shape,
                    void **deviceAddr, aclDataType dataType, aclTensor **tensor)
{
    auto size = GetShapeSize(shape) * sizeof(T);
    // 调用aclrtMalloc申请device侧内存
    auto ret = aclrtMalloc(deviceAddr, size, ACL_MEM_MALLOC_HUGE_FIRST);
    CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclrtMalloc failed. ERROR: %d\n", ret); return ret);
    // 调用aclrtMemcpy将host侧数据拷贝到device侧内存上
    ret = aclrtMemcpy(*deviceAddr, size, hostData.data(), size, ACL_MEMCPY_HOST_TO_DEVICE);
    CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclrtMemcpy failed. ERROR: %d\n", ret); return ret);

    // 计算连续tensor的strides
    std::vector<int64_t> strides(shape.size(), 1);
    for (int64_t i = shape.size() - 2; i >= 0; i--)
    {
        strides[i] = shape[i + 1] * strides[i + 1];
    }

    // 调用aclCreateTensor接口创建aclTensor
    *tensor = aclCreateTensor(shape.data(), shape.size(), dataType, strides.data(), 0,
                              aclFormat::ACL_FORMAT_ND, shape.data(), shape.size(), *deviceAddr);
    return 0;
}

int main(int argc, char *argv[])
{
    if (argc != 4)
    {
        printf("please input m n k!\n");
        return -1;
    }

    // auto ascendcPlatform = platform_ascendc::PlatformAscendCManager::GetInstance("Ascend910B2");
    // size_t systemWorkspaceSize = static_cast<size_t>(ascendcPlatform->GetLibApiWorkSpaceSize());
    // uint64_t l2_bw, hbm_bw;
    // auto aicNum = ascendcPlatform->GetCoreNumAic();
    // auto aivNum = ascendcPlatform->GetCoreNumAiv();
    // std::cout << "aicNum is :" << aicNum << std::endl;
    // std::cout << "aivNum is :" << aivNum << std::endl;
    // ascendcPlatform->GetCoreMemBw(platform_ascendc::CoreMemType::L2, l2_bw);
    // ascendcPlatform->GetCoreMemBw(platform_ascendc::CoreMemType::HBM, hbm_bw);
    // auto socVersion = ascendcPlatform->GetSocVersion();
    // std::cout << "***************SoC Version: " << static_cast<int>(socVersion) << "*************\n"
    //           << std::endl;
    // uint64_t l0a, l0b, l0c, l1, l2, ub, hbm;
    // ascendcPlatform->GetCoreMemSize(platform_ascendc::CoreMemType::L0_A, l0a);
    // ascendcPlatform->GetCoreMemSize(platform_ascendc::CoreMemType::L0_B, l0b);
    // ascendcPlatform->GetCoreMemSize(platform_ascendc::CoreMemType::L0_C, l0c);
    // ascendcPlatform->GetCoreMemSize(platform_ascendc::CoreMemType::L1, l1);
    // ascendcPlatform->GetCoreMemSize(platform_ascendc::CoreMemType::L2, l2);
    // ascendcPlatform->GetCoreMemSize(platform_ascendc::CoreMemType::UB, ub);
    // ascendcPlatform->GetCoreMemSize(platform_ascendc::CoreMemType::HBM, hbm);
    // printf("***************sysworkspacesize is : %ld*************\n", systemWorkspaceSize);
    // printf("***************L2bw is : %ld Byte/cycle*************\n", l2_bw);
    // printf("***************HBMbw is : %ld Byte/cycle*************\n", hbm_bw);
    // printf("***************L0_A is : %ld Byte*************\n", l0a);
    // printf("***************L0_B is : %ld Byte*************\n", l0b);
    // printf("***************L0_C is : %ld Byte*************\n", l0c);
    // printf("***************L1 is : %ld Byte*************\n", l1);
    // printf("***************L2 is : %ld Byte*************\n", l2);
    // printf("***************UB is : %ld Byte*************\n", ub);
    // printf("***************HBM is : %ld Byte*************\n", hbm);

    int32_t M, N, K;
    M = atoi(argv[1]);
    N = atoi(argv[2]);
    K = atoi(argv[3]);
    printf("the mnk is %d %d %d\n", M, N, K);
    // 1. （固定写法）device/stream初始化，参考AscendCL对外接口列表
    // 根据自己的实际device填写deviceId
    int32_t deviceId = 5;
    aclrtStream stream;
    auto ret = Init(deviceId, &stream);
    CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("Init acl failed. ERROR: %d\n", ret); return ret);

    // 2. 构造输入与输出，需要根据API的接口自定义构造
    std::vector<int64_t> selfShape = {M, K};
    std::vector<int64_t> mat2Shape = {K, N};
    std::vector<int64_t> outShape = {M, N};
    void *selfDeviceAddr = nullptr;
    void *mat2DeviceAddr = nullptr;
    void *outDeviceAddr = nullptr;
    aclTensor *self = nullptr;
    aclTensor *mat2 = nullptr;
    aclTensor *out = nullptr;

    // 1. 先定义一个标准的 float 值
    float host_val = 1.0f;

    // 2. 使用 aclFloatToFloat16 将 float 值转换为 aclFloat16 的比特表示
    aclFloat16 fp16_val = aclFloatToFloat16(host_val);

    std::vector<aclFloat16> selfHostData(M * K, fp16_val);
    std::vector<aclFloat16> mat2HostData(K * N, fp16_val);
    std::vector<aclFloat16> outHostData(M * N, 0);
    std::cout << "this is selfhostdata " << aclFloat16ToFloat(selfHostData[1]) << std::endl;
    // 创建self aclTensor
    ret = CreateAclTensor(selfHostData, selfShape, &selfDeviceAddr, aclDataType::ACL_FLOAT16, &self);
    CHECK_RET(ret == ACL_SUCCESS, return ret);

    // ===================================================================
    // ============ START: 检查 Device 数据的调试代码 ============
    // ===================================================================
    std::cout << "\n[DEBUG] Verifying data on Device after Host-to-Device copy..." << std::endl;

    // 1. 准备一块新的 Host 内存，用于“回读”数据，初始为0
    std::vector<aclFloat16> readBackData(M * K, 6);

    // 2. 将数据从 Device 拷贝回这块新的 Host 内存
    //    源地址: selfDeviceAddr (Device上的地址)
    //    目标地址: readBackData.data() (Host上的新地址)
    //    方向: ACL_MEMCPY_DEVICE_TO_HOST
    ret = aclrtMemcpy(readBackData.data(), readBackData.size() * sizeof(aclFloat16),
                      selfDeviceAddr, M * K * sizeof(aclFloat16),
                      ACL_MEMCPY_DEVICE_TO_HOST);
    CHECK_RET(ret == ACL_SUCCESS,
              LOG_PRINT("DEBUG: aclrtMemcpy D2H failed. ERROR: %d\n", ret);
              return ret);

    // 3. 打印“回读”回来的数据
    std::cout << "[DEBUG] Value read back from Device: " << aclFloat16ToFloat(readBackData[3]) << std::endl;
    if (aclFloat16ToFloat(readBackData[0]) == host_val)
    {
        std::cout << "[DEBUG] SUCCESS: Data on device is correct." << std::endl;
    }
    else
    {
        std::cout << "[DEBUG] FAILED: Data on device is INCORRECT or copy failed." << std::endl;
    }
    std::cout << "===================================================================\n"
              << std::endl;
    // ============ END: 调试代码结束 ============

    // 创建mat2 aclTensor
    ret = CreateAclTensor(mat2HostData, mat2Shape, &mat2DeviceAddr, aclDataType::ACL_FLOAT16, &mat2);
    CHECK_RET(ret == ACL_SUCCESS, return ret);
    // 创建out aclTensor
    ret = CreateAclTensor(outHostData, outShape, &outDeviceAddr, aclDataType::ACL_FLOAT16, &out);
    CHECK_RET(ret == ACL_SUCCESS, return ret);

    // 3. 调用CANN算子库API，需要修改为具体的Api名称
    int8_t cubeMathType = 1;
    uint64_t workspaceSize = 0;
    aclOpExecutor *executor;

    // 根据第一段接口计算出的workspaceSize申请device内存
    void *workspaceAddr = nullptr;

    int num_repeat = 100000;
    int warmup = 100000;

    aclrtEvent start, stop;
    float temp_time = 0;
    float time = 0;
    CHECK_ACL(aclrtCreateEvent(&start));
    CHECK_ACL(aclrtCreateEvent(&stop));

    for (int i = 0; i < warmup; i++)
    {
        workspaceSize = 0;
        ret = aclnnMatmulGetWorkspaceSize(self, mat2, out, cubeMathType, &workspaceSize, &executor);
        CHECK_RET(ret == ACL_SUCCESS,
                  LOG_PRINT("aclnnMatmulGetWorkspaceSize failed. ERROR: %d\n", ret);
                  return ret);

        if (workspaceSize > 0)
        {
            ret = aclrtMalloc(&workspaceAddr, workspaceSize, ACL_MEM_MALLOC_HUGE_FIRST);
            CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("allocate workspace failed. ERROR: %d\n", ret);
                      return ret);
        }
        ret = aclnnMatmul(workspaceAddr, workspaceSize, executor, stream);
        CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclnnMatmul failed. ERROR: %d\n", ret);
                  return ret);

        CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclrtSynchronizeStream failed. ERROR: %d\n", ret);
                  return ret);
    }

    for (int i = 0; i < num_repeat; ++i)
    {
        workspaceSize = 0;
        ret = aclnnMatmulGetWorkspaceSize(self, mat2, out, cubeMathType, &workspaceSize, &executor);
        CHECK_RET(ret == ACL_SUCCESS,
                  LOG_PRINT("aclnnMatmulGetWorkspaceSize failed. ERROR: %d\n", ret);
                  return ret);

        if (workspaceSize > 0)
        {
            ret = aclrtMalloc(&workspaceAddr, workspaceSize, ACL_MEM_MALLOC_HUGE_FIRST);
            CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("allocate workspace failed. ERROR: %d\n", ret);
                      return ret);
        }
        CHECK_ACL(aclrtSynchronizeStream(stream));
        CHECK_ACL(aclrtRecordEvent(start, stream));
        ret = aclnnMatmul(workspaceAddr, workspaceSize, executor, stream);
        CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclnnMatmul failed. ERROR: %d\n", ret);
                  return ret);

        ret = aclrtSynchronizeStream(stream);
        CHECK_ACL(aclrtRecordEvent(stop, stream));
        // CHECK_ACL(aclrtSynchronizeStream(stream));
        CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclrtSynchronizeStream failed. ERROR: %d\n", ret);
                  return ret);
        CHECK_ACL(aclrtSynchronizeEvent(stop));
        CHECK_ACL(aclrtEventElapsedTime(&temp_time, start, stop));
        time += temp_time;

        if (workspaceSize > 0)
        {
            ret = aclrtFree(workspaceAddr);
            CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("free workspace failed. ERROR: %d\n", ret);
                      return ret);
        }
    }

    // 5. 获取输出的值，将device侧内存上的结果拷贝至host侧，需要根据具体API的接口定义修改
    auto size = GetShapeSize(outShape);
    std::vector<aclFloat16> resultData(size, 0);
    ret = aclrtMemcpy(resultData.data(), resultData.size() * sizeof(resultData[0]), outDeviceAddr,
                      size * sizeof(resultData[0]), ACL_MEMCPY_DEVICE_TO_HOST);
    CHECK_RET(ret == ACL_SUCCESS,
              LOG_PRINT("copy result from device to host failed. ERROR: %d\n", ret);
              return ret);

    for (int64_t i = 0; i < 32; i++)
    {
        LOG_PRINT("result[%ld] is: %f\n", i, aclFloat16ToFloat(resultData[i]));
    }
    LOG_PRINT("... ...\n");
    printf("The repeat time is : %d\n", num_repeat);
    printf("%f TFLOPS\n", (float)2 * M * N * K / (time / num_repeat * 1e-3) / 1e12);

    // 6. 释放aclTensor和aclScalar，需要根据具体API的接口定义修改
    aclDestroyTensor(self);
    aclDestroyTensor(mat2);
    aclDestroyTensor(out);

    // 7. 释放device资源，需要根据具体API的接口定义修改
    aclrtFree(selfDeviceAddr);
    aclrtFree(mat2DeviceAddr);
    aclrtFree(outDeviceAddr);
    // if (workspaceSize > 0)
    // {
    //     aclrtFree(workspaceAddr);
    // }
    aclrtDestroyStream(stream);
    aclrtResetDevice(deviceId);
    aclFinalize();
    return 0;
}
