#include <iostream>
#include <vector>
#include <cmath>
#include <algorithm>
#include <random>
#include <chrono>
#include "acl/acl.h"
// #include "kernel_operator.h"
#include "tiling/platform/platform_ascendc.h"
#include "aclnnop/aclnn_matmul.h"
#include "acl/acl_base.h"

#define CHECK_ACL(x)                                                                        \
    do                                                                                      \
    {                                                                                       \
        aclError __ret = x;                                                                 \
        if (__ret != ACL_ERROR_NONE)                                                        \
        {                                                                                   \
            std::cerr << __FILE__ << ":" << __LINE__ << " aclError:" << __ret << std::endl; \
        }                                                                                   \
    } while (0);

#define CHECK_RET(cond, return_expr) \
    do                               \
    {                                \
        if (!(cond))                 \
        {                            \
            return_expr;             \
        }                            \
    } while (0)

#define LOG_PRINT(message, ...)         \
    do                                  \
    {                                   \
        printf(message, ##__VA_ARGS__); \
    } while (0)

int64_t GetShapeSize(const std::vector<int64_t> &shape)
{
    int64_t shapeSize = 1;
    for (auto i : shape)
    {
        shapeSize *= i;
    }
    return shapeSize;
}

// 生成随机FP16数据
void GenerateRandomFP16Data(std::vector<aclFloat16> &data, float minVal = -1.0f, float maxVal = 1.0f,
                            bool useConstant = false, float constantValue = 1.0f)
{
    if (useConstant)
    {
        // 使用常量值填充
        aclFloat16 fp16_val = aclFloatToFloat16(constantValue);
        std::fill(data.begin(), data.end(), fp16_val);
        return;
    }

    // 使用随机数生成器
    static std::mt19937 gen(std::chrono::steady_clock::now().time_since_epoch().count());
    std::uniform_real_distribution<float> dis(minVal, maxVal);

    for (size_t i = 0; i < data.size(); ++i)
    {
        float randomVal = dis(gen);
        data[i] = aclFloatToFloat16(randomVal);
    }
}

// CPU上执行矩阵乘法作为参考（用于验证）
void CPUMatmul(const std::vector<aclFloat16> &A, const std::vector<aclFloat16> &B,
               std::vector<float> &C, int64_t M, int64_t N, int64_t K)
{
    // C = A * B
    // A: M x K
    // B: K x N
    // C: M x N

    for (int64_t i = 0; i < M; ++i)
    {
        for (int64_t j = 0; j < N; ++j)
        {
            float sum = 0.0f;
            for (int64_t k = 0; k < K; ++k)
            {
                float a_val = aclFloat16ToFloat(A[i * K + k]);
                float b_val = aclFloat16ToFloat(B[k * N + j]);
                sum += a_val * b_val;
            }
            C[i * N + j] = sum;
        }
    }
}

// 验证矩阵乘法结果的正确性（支持随机数输入）
bool VerifyMatmulResult(const std::vector<aclFloat16> &resultData,
                        const std::vector<aclFloat16> &selfData,
                        const std::vector<aclFloat16> &mat2Data,
                        int64_t M, int64_t N, int64_t K,
                        float tolerance = 1e-2) // FP16精度较低，容差设置大一些
{
    bool allCorrect = true;
    int64_t incorrectCount = 0;
    float maxError = 0.0f;
    float maxRelativeError = 0.0f;

    LOG_PRINT("\n========== Verification Results ==========\n");
    LOG_PRINT("Matrix dimensions: M=%ld, N=%ld, K=%ld\n", M, N, K);
    LOG_PRINT("Tolerance: %.6f\n", tolerance);
    LOG_PRINT("Total elements to verify: %ld\n", M * N);

    // 在CPU上计算参考结果
    std::vector<float> expectedResult(M * N);
    CPUMatmul(selfData, mat2Data, expectedResult, M, N, K);

    // 检查每个元素
    for (int64_t i = 0; i < M * N; ++i)
    {
        float actualValue = aclFloat16ToFloat(resultData[i]);
        float expectedValue = expectedResult[i];
        float error = std::abs(actualValue - expectedValue);
        float relativeError = (expectedValue != 0) ? error / std::abs(expectedValue) : error;

        if (error > tolerance && relativeError > 0.01) // 绝对误差和相对误差都要考虑
        {
            allCorrect = false;
            incorrectCount++;

            // 打印前几个错误的详细信息
            if (incorrectCount <= 5)
            {
                int64_t row = i / N;
                int64_t col = i % N;
                LOG_PRINT("ERROR at position [%ld,%ld]: expected %.6f, got %.6f, error = %.6f, relative error = %.2f%%\n",
                          row, col, expectedValue, actualValue, error, relativeError * 100);
            }
        }

        // 更新最大误差
        if (error > maxError)
        {
            maxError = error;
        }
        if (relativeError > maxRelativeError)
        {
            maxRelativeError = relativeError;
        }
    }

    // 打印统计信息
    LOG_PRINT("\nStatistics:\n");
    LOG_PRINT("- Correct elements: %ld (%.2f%%)\n",
              M * N - incorrectCount,
              100.0 * (M * N - incorrectCount) / (M * N));
    LOG_PRINT("- Incorrect elements: %ld (%.2f%%)\n",
              incorrectCount,
              100.0 * incorrectCount / (M * N));
    LOG_PRINT("- Maximum absolute error: %.6f\n", maxError);
    LOG_PRINT("- Maximum relative error: %.2f%%\n", maxRelativeError * 100);

    // 采样一些元素的值进行展示
    LOG_PRINT("\nSample values (actual vs expected):\n");
    int64_t sampleSize = std::min((int64_t)10, M * N);
    for (int64_t i = 0; i < sampleSize; ++i)
    {
        LOG_PRINT("  result[%ld]: %.6f vs %.6f (diff: %.6f)\n",
                  i, aclFloat16ToFloat(resultData[i]), expectedResult[i],
                  std::abs(aclFloat16ToFloat(resultData[i]) - expectedResult[i]));
    }

    // 最终判定
    if (allCorrect)
    {
        LOG_PRINT("\n✓ VERIFICATION PASSED: All elements are correct within tolerance!\n");
    }
    else
    {
        LOG_PRINT("\n✗ VERIFICATION FAILED: Found %ld incorrect elements!\n", incorrectCount);
        if (incorrectCount > 5)
        {
            LOG_PRINT("  (Only showing first 5 errors)\n");
        }
    }
    LOG_PRINT("==========================================\n\n");

    return allCorrect;
}

int Init(int32_t deviceId, aclrtStream *stream)
{
    // 固定写法，AscendCL初始化
    auto ret = aclInit(nullptr);
    CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclInit failed. ERROR: %d\n", ret); return ret);
    ret = aclrtSetDevice(deviceId);
    CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclrtSetDevice failed. ERROR: %d\n", ret); return ret);
    ret = aclrtCreateStream(stream);
    CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclrtCreateStream failed. ERROR: %d\n", ret);
              return ret);
    return 0;
}

template <typename T>
int CreateAclTensor(const std::vector<T> &hostData, const std::vector<int64_t> &shape,
                    void **deviceAddr, aclDataType dataType, aclTensor **tensor)
{
    auto size = GetShapeSize(shape) * sizeof(T);
    // 调用aclrtMalloc申请device侧内存
    auto ret = aclrtMalloc(deviceAddr, size, ACL_MEM_MALLOC_HUGE_FIRST);
    CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclrtMalloc failed. ERROR: %d\n", ret); return ret);
    // 调用aclrtMemcpy将host侧数据拷贝到device侧内存上
    ret = aclrtMemcpy(*deviceAddr, size, hostData.data(), size, ACL_MEMCPY_HOST_TO_DEVICE);
    CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclrtMemcpy failed. ERROR: %d\n", ret); return ret);

    // 计算连续tensor的strides
    std::vector<int64_t> strides(shape.size(), 1);
    for (int64_t i = shape.size() - 2; i >= 0; i--)
    {
        strides[i] = shape[i + 1] * strides[i + 1];
    }

    // 调用aclCreateTensor接口创建aclTensor
    *tensor = aclCreateTensor(shape.data(), shape.size(), dataType, strides.data(), 0,
                              aclFormat::ACL_FORMAT_ND, shape.data(), shape.size(), *deviceAddr);
    return 0;
}

int main(int argc, char *argv[])
{
    if (argc < 4 || argc > 5)
    {
        printf("Usage: %s m n k [use_random]\n", argv[0]);
        printf("  m, n, k: matrix dimensions\n");
        printf("  use_random: 0 for constant values (default), 1 for random values\n");
        return -1;
    }

    int32_t M, N, K;
    M = atoi(argv[1]);
    N = atoi(argv[2]);
    K = atoi(argv[3]);
    bool useRandom = (argc == 5) ? (atoi(argv[4]) == 1) : false;

    printf("Matrix dimensions: M=%d, N=%d, K=%d\n", M, N, K);
    printf("Using %s values\n", useRandom ? "random" : "constant (1.0)");

    // 1. （固定写法）device/stream初始化，参考AscendCL对外接口列表
    // 根据自己的实际device填写deviceId
    int32_t deviceId = 7;
    aclrtStream stream;
    auto ret = Init(deviceId, &stream);
    CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("Init acl failed. ERROR: %d\n", ret); return ret);

    // 2. 准备shape和host数据（在循环外准备，每次循环都会使用）
    std::vector<int64_t> selfShape = {M, K};
    std::vector<int64_t> mat2Shape = {K, N};
    std::vector<int64_t> outShape = {M, N};

    // 准备host端数据
    std::vector<aclFloat16> selfHostData(M * K);
    std::vector<aclFloat16> mat2HostData(K * N);
    std::vector<aclFloat16> outHostData(M * N, 0);

    // 3. 初始化计时相关变量
    int num_repeat = 100;
    aclrtEvent start, stop;
    float temp_time = 0;
    float time = 0;
    CHECK_ACL(aclrtCreateEvent(&start));
    CHECK_ACL(aclrtCreateEvent(&stop));

    // 4. 主循环：每次都重新创建tensor和分配内存
    for (int i = 0; i < num_repeat; ++i)
    {
        // 每次循环重新生成数据（可选）
        if (i == 0 || useRandom) // 第一次必须生成，之后根据useRandom决定
        {
            GenerateRandomFP16Data(selfHostData, -1.0f, 1.0f, !useRandom, 1.0f);
            GenerateRandomFP16Data(mat2HostData, -1.0f, 1.0f, !useRandom, 1.0f);

            if (i == 0)
            {
                LOG_PRINT("\nFirst few elements of input matrices:\n");
                LOG_PRINT("Matrix A (self): ");
                for (int j = 0; j < std::min(5, (int)(M * K)); ++j)
                {
                    LOG_PRINT("%.3f ", aclFloat16ToFloat(selfHostData[j]));
                }
                LOG_PRINT("...\n");

                LOG_PRINT("Matrix B (mat2): ");
                for (int j = 0; j < std::min(5, (int)(K * N)); ++j)
                {
                    LOG_PRINT("%.3f ", aclFloat16ToFloat(mat2HostData[j]));
                }
                LOG_PRINT("...\n\n");
            }
        }

        // 每次循环都重新声明这些变量
        void *selfDeviceAddr = nullptr;
        void *mat2DeviceAddr = nullptr;
        void *outDeviceAddr = nullptr;
        aclTensor *self = nullptr;
        aclTensor *mat2 = nullptr;
        aclTensor *out = nullptr;

        // 创建self aclTensor（重新分配内存和创建）
        ret = CreateAclTensor(selfHostData, selfShape, &selfDeviceAddr, aclDataType::ACL_FLOAT16, &self);
        CHECK_RET(ret == ACL_SUCCESS, return ret);

        // 创建mat2 aclTensor（重新分配内存和创建）
        ret = CreateAclTensor(mat2HostData, mat2Shape, &mat2DeviceAddr, aclDataType::ACL_FLOAT16, &mat2);
        CHECK_RET(ret == ACL_SUCCESS, return ret);

        // 创建out aclTensor（重新分配内存和创建）
        ret = CreateAclTensor(outHostData, outShape, &outDeviceAddr, aclDataType::ACL_FLOAT16, &out);
        CHECK_RET(ret == ACL_SUCCESS, return ret);

        // 调用CANN算子库API
        int8_t cubeMathType = 1;
        uint64_t workspaceSize = 0;
        aclOpExecutor *executor;
        void *workspaceAddr = nullptr;

        ret = aclnnMatmulGetWorkspaceSize(self, mat2, out, cubeMathType, &workspaceSize, &executor);
        CHECK_RET(ret == ACL_SUCCESS,
                  LOG_PRINT("aclnnMatmulGetWorkspaceSize failed. ERROR: %d\n", ret);
                  return ret);

        if (workspaceSize > 0)
        {
            ret = aclrtMalloc(&workspaceAddr, workspaceSize, ACL_MEM_MALLOC_HUGE_FIRST);
            CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("allocate workspace failed. ERROR: %d\n", ret);
                      return ret);
        }

        // 同步并开始计时
        CHECK_ACL(aclrtSynchronizeStream(stream));
        CHECK_ACL(aclrtRecordEvent(start, stream));

        // 执行矩阵乘法
        ret = aclnnMatmul(workspaceAddr, workspaceSize, executor, stream);
        CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclnnMatmul failed. ERROR: %d\n", ret);
                  return ret);

        // 记录结束时间并同步
        CHECK_ACL(aclrtRecordEvent(stop, stream));
        CHECK_ACL(aclrtSynchronizeStream(stream));
        CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclrtSynchronizeStream failed. ERROR: %d\n", ret);
                  return ret);
        CHECK_ACL(aclrtSynchronizeEvent(stop));
        CHECK_ACL(aclrtEventElapsedTime(&temp_time, start, stop));
        time += temp_time;

        // 第一次循环时验证结果
        if (i == 0)
        {
            auto size = GetShapeSize(outShape);
            std::vector<aclFloat16> resultData(size, 0);
            ret = aclrtMemcpy(resultData.data(), resultData.size() * sizeof(resultData[0]), outDeviceAddr,
                              size * sizeof(resultData[0]), ACL_MEMCPY_DEVICE_TO_HOST);
            CHECK_RET(ret == ACL_SUCCESS,
                      LOG_PRINT("copy result from device to host failed. ERROR: %d\n", ret);
                      return ret);

            // 调用验证函数
            bool isCorrect = VerifyMatmulResult(resultData, selfHostData, mat2HostData, M, N, K);
            if (!isCorrect)
            {
                LOG_PRINT("WARNING: Computation result is incorrect!\n");
            }
        }

        // 最后一次循环时也验证结果，确保多次迭代后结果仍然正确
        if (i == num_repeat - 1)
        {
            auto size = GetShapeSize(outShape);
            std::vector<aclFloat16> resultData(size, 0);
            ret = aclrtMemcpy(resultData.data(), resultData.size() * sizeof(resultData[0]), outDeviceAddr,
                              size * sizeof(resultData[0]), ACL_MEMCPY_DEVICE_TO_HOST);
            CHECK_RET(ret == ACL_SUCCESS,
                      LOG_PRINT("copy result from device to host failed. ERROR: %d\n", ret);
                      return ret);

            LOG_PRINT("\nVerifying last iteration result...\n");
            bool isCorrect = VerifyMatmulResult(resultData, selfHostData, mat2HostData, M, N, K);
            if (!isCorrect)
            {
                LOG_PRINT("WARNING: Final computation result is incorrect!\n");
            }
        }

        // 释放本次循环的资源
        if (workspaceSize > 0)
        {
            ret = aclrtFree(workspaceAddr);
            CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("free workspace failed. ERROR: %d\n", ret);
                      return ret);
        }

        // 释放aclTensor
        aclDestroyTensor(self);
        aclDestroyTensor(mat2);
        aclDestroyTensor(out);

        // 释放device内存
        aclrtFree(selfDeviceAddr);
        aclrtFree(mat2DeviceAddr);
        aclrtFree(outDeviceAddr);

        // 可选：在每次迭代后强制同步，确保内存完全释放
        aclrtSynchronizeDevice();
    }

    // 5. 打印性能结果
    printf("\nAverage time per iteration: %.3f ms\n", time / num_repeat);
    printf("Performance: %.3f TFLOPS\n", (float)2 * M * N * K / (time / num_repeat * 1e-3) / 1e12);

    // 6. 清理事件和设备资源
    CHECK_ACL(aclrtDestroyEvent(start));
    CHECK_ACL(aclrtDestroyEvent(stop));
    aclrtDestroyStream(stream);
    aclrtResetDevice(deviceId);
    aclFinalize();

    return 0;
}