#include <chrono>
#include <iostream>
#include <CL/sycl.hpp>
#define random_float() (rand() / double(RAND_MAX))
using namespace std;
using namespace sycl;

// 单次外循环缓存数据块的大小
#define cache_block_x 8
#define cache_block_y 4

// 使用gpu计算矩阵相乘的结果和耗时
double gpu_kernel(float *A, float *B, float *C, int M, int N, int K, int block, sycl::queue &q) {
  
  // 按照缓存数据块大小划分矩阵
  auto rows = M / cache_block_x;
  auto cols = N / cache_block_y;

  // 定义块的范围和整个矩阵的范围
  auto local_ndrange  = range<2>(block, block);
  auto global_ndrange = range<2>(rows, cols);

  double time_cost = 0;
  // 矩阵相乘，并行计算每一块的结果
  auto e = q.submit([&](sycl::handler &h) {
      h.parallel_for<class k_name_t>(sycl::nd_range<2>(global_ndrange, local_ndrange), [=](sycl::nd_item<2> index) {
        
        // 获取缓存数据块的位置
        int row = cache_block_x * index.get_global_id(0);
        int col = cache_block_y * index.get_global_id(1);

        // 定义缓存数据块的变量
        float sum[cache_block_x][cache_block_y] = {0};
        float subA[cache_block_x] = {0};
        float subB[cache_block_y] = {0};

        for (int k = 0; k < N; k++) {
            // 一次读取计算tileX*tileY大小所需的所有数据，减少访问内存的次数从而减少耗时
            for(int m = 0; m < cache_block_x; m++)
                subA[m] = A[(row + m) * N + k];

            for(int p = 0; p < cache_block_y; p++) 
                subB[p] = B[k * N + p + col];

            for (int m = 0; m < cache_block_x; m++)
                for (int p = 0; p < cache_block_y; p++)
                    sum[m][p] += subA[m] * subB[p];
        }

        for (int m = 0; m < cache_block_x; m++) {
            for (int p = 0; p < cache_block_y; p++) {
                C[(row + m) * N + col + p] = sum[m][p];
            }
        }

    });
  });
    e.wait();
    // 计算运行时间并返回
    time_cost += (e.get_profiling_info<info::event_profiling::command_end>() -e.get_profiling_info<info::event_profiling::command_start>()) /1000/1000;
    return(time_cost);
}

// 使用cpu串行计算矩阵相乘的结果和耗时
double cpu_kernel(float *A, float *B, float *C, int M, int N, int K) {
    
    double time_cost = 0.0;
    std::chrono::high_resolution_clock::time_point s, e;

    // 串行计算矩阵相乘
    s = std::chrono::high_resolution_clock::now();
    for(int i = 0; i < M; i++) {
        for(int j = 0; j < N; j++) {
            float sum = 0;
            for(int k = 0; k < K; k++) {
                sum +=  A[i * K + k] * B[k * N  + j];
            }
            C[i * N + j] = sum;
        }
    }
    e = std::chrono::high_resolution_clock::now();
    // 计算运行时间并返回
    time_cost = std::chrono::duration<float, std::milli>(e - s).count();
    return(time_cost);
}

// 判断cpu和gpu计算结果是否正确
int judge(float* C_cpu, float* C_gpu, int l) {
    int x = 0;
    for (int i = 0; i < l; i++) {
        if (fabs(C_cpu[i] - C_gpu[i]) > 1e-3) {
            x++;
            printf("\n%lf, %lf", C_cpu[i], C_gpu[i]);
        }
    }
    return(x);
}

void gemm(const int M, const int N, const int K, const int block, const int iterations, sycl::queue& q) {
    // 输出基本信息
    cout << "\n矩阵相乘: A(" << M << "*" << K << ")* B(" << K << "*" << N << ") = C(" << M << "*" << N << ")\n";
    cout << "分块间距: " << block << std::endl;
    cout << "缓存数据块大小: " << cache_block_x << "*" << cache_block_y<<std::endl;
    // 分配内存
    auto A = malloc_shared<float>(M * K, q);
    auto B = malloc_shared<float>(K * N, q);
    auto C_gpu = malloc_shared<float>(M * N, q);
    auto C_cpu = malloc_host<float>(M * N, q);

    // 初始化输入矩阵
    for (int i = 0; i < M * K; i++)
        A[i] = random_float();
    for (int i = 0; i < K * N; i++)
        B[i] = random_float();
    for (int i = 0; i < M * N; i++) {
        C_gpu[i] = 0;
        C_cpu[i] = 0;
    }


    double time_cost_gpu = 0;
    double time_cost_cpu = 0;

    // 使用gpu计算矩阵相乘的结果和耗时
    int warmup = 10;
    for (int a = 0; a < iterations + warmup; a++) {
        float duration = gpu_kernel(A, B, C_gpu, M, N, K, block, q);
        if (a >= warmup) time_cost_gpu += duration;
    }
    time_cost_gpu = time_cost_gpu / iterations;

    // 使用cpu计算矩阵相乘的结果和耗时
    warmup = 2;
    for (int a = 0; a < iterations / 2 + warmup; a++) {
        float duration = cpu_kernel(A, B, C_cpu, M, N, K);
        if (a >= warmup) time_cost_cpu += duration;
    }
    time_cost_cpu = time_cost_cpu / iterations / 2;

    // 比较cpu和gpu运算结果是否不同
    int diff = 0;
    diff = judge(C_cpu, C_gpu, M * N);
    if (diff > 0) cout << "\n矩阵相乘的串行计算结果与并行计算结果有\n" << diff << "处差异";
    cout << "\n并行计算(GPU)耗时: " << time_cost_gpu << " ms\n";
    cout << "串行计算(CPU)耗时: " << time_cost_cpu << " ms\n";

    free(A, q);
    free(B, q);
    free(C_gpu, q);
    free(C_cpu, q);

}

int main() {

    auto plist = cl::sycl::property_list{ cl::sycl::property::queue::enable_profiling() };
    queue q(cl::sycl::gpu_selector{}, plist);

    int M = 1024;
    int N = 1024;
    int K = 1024;
    int block = 8;
    gemm(M, N, K, block, 10, q);
}
