#include<iostream>
#include<cuda_runtime.h>
#include<vector>
#include "../MyTimer.h"
#include<string>
#include<math.h>
#include<sstream>


class Matrix
{
public:
    void show()
    {
        for(int i = 0; i < len && i < 1000; i++)
        {
            if(i % m == 0)
                std::cout<<std::endl;
            std::cout << f[i] << " ";
        }
    }
    Matrix(Matrix& M, bool is_CUDA_mem)
    {
        len = M.len;
        m = M.m;
        n = M.n;
        this->is_CUDA_mem = is_CUDA_mem;
        if(is_CUDA_mem)
        {
            cudaHostAlloc((void**)&f, sizeof(float) * len, cudaHostAllocMapped);
        }
        else
        {
            f = new float[len];
        }
        memcpy(f, M.f, sizeof(float) * len);
    }
    Matrix(int m, int n, bool is_CUDA_mem, bool need_init){
        len = m * n;
        this->m = m;
        this->n = n;
        this->is_CUDA_mem = is_CUDA_mem;
        if(is_CUDA_mem)
            cudaHostAlloc((void**)&f, sizeof(float) * len, cudaHostAllocMapped);
        else 
            f = new float[len];
        for(int i = 0; i < len; i++)
        {
            if(need_init) f[i] = rand()%10;
            else f[i] = 0;
        }
    }

    __host__ __device__ float& operator[](int index) {
        return f[index];
    }

    __host__ __device__ const float& operator[](int index) const {
        return f[index];
    }

    ~Matrix()
    {
        if(is_on_device)
            cudaFree(f);
        else
        {
            if(is_CUDA_mem)
                cudaFreeHost(f);
            else 
                delete []f;
        }
    }

    void moveToDevice() {
        if (is_on_device) return; // 已经在 Device 上
        float* d_ptr;
        cudaMalloc((void**)&d_ptr, sizeof(float) * len);
        cudaMemcpy(d_ptr, f, sizeof(float) * len, cudaMemcpyHostToDevice);

        // 释放 Host 内存
        if (is_CUDA_mem)
            cudaFreeHost(f);
        else
            delete[] f;

        f = d_ptr;
        is_on_device = true;
    }

    // Device -> Host (in-place)
    void moveToHost() {
        if (!is_on_device) return; // 已经在 Host 上
        float* h_ptr;
        if (is_CUDA_mem)
            cudaHostAlloc((void**)&h_ptr, sizeof(float) * len, cudaHostAllocMapped);
        else
            h_ptr = new float[len];

        cudaMemcpy(h_ptr, f, sizeof(float) * len, cudaMemcpyDeviceToHost);

        // 释放 Device 内存
        cudaFree(f);

        f = h_ptr;
        is_on_device = false;
    }
    
    float* f;//指针成员必须cudamemcpy

    //标量成员会作为参数传过去。不需要处理
    int m, n, len;
    bool is_CUDA_mem;
    bool is_on_device = 0;

};

void print_result(
    std::vector<std::string> &names, 
    std::vector<float>&T_times, std::vector<float>&norm_times, 
    std::vector<int>& answer_right
)
{
    for(int i = 0 ; i < names.size(); i++)
    {
        std::cout<<names[i]<<" T_time:" << T_times[i] << " norm_time:"<< norm_times[i]<<" result:"<<answer_right[i] <<std::endl;
    }
}

bool is_same(float* A, Matrix& B)
{
    for(int i = 0; i < B.len; i++)
        if(A[i] != B[i]) return 0;
    return 1;
}

void common(
    Matrix& in, Matrix& out, float* norm, 
    std::vector<std::string> &names, 
    std::vector<float>&T_times, std::vector<float>&norm_times, 
    std::vector<int>& answer_right
)
{
    GPUTimer t;
    t.start();
    //std::cout<<in.n;
    for(int i = 0; i < in.n; i++)
    {
        for(int j = 0; j < in.m; j++)
        {
            out[j * in.n + i] = in[i * in.m + j];
        }
    }
    T_times.push_back(t.stop());

    t.start();
    float temp_data;
    for(int i = 0; i < out.n;i++)
    {
        norm[i] = 0;
        for(int j = 0; j < out.m; j++)
        {
            temp_data = out[i * out.m + j];
            norm[i] += temp_data * temp_data;
        }
        norm[i] = pow(norm[i], 0.5);
    }
    norm_times.push_back(t.stop());


    names.push_back("cpu的baseline");
    answer_right.push_back(11);
}



__global__ void CUDA_T_1(Matrix in, float* out, int Unroll_times,bool is_row)
{
    //最朴素的
    int x_index = blockDim.x * blockIdx.x * Unroll_times + threadIdx.x;
    int y_index = blockDim.y * blockIdx.y + threadIdx.y;
    int in_index = y_index * in.m + x_index;
    int out_index = x_index * in.n + y_index;

    if(x_index + (Unroll_times - 1) * blockDim.x < in.m && y_index < in.n) 
    {
        if(is_row)
        {
            for(int i = 0 ; i < Unroll_times; i++)
            {
                out[out_index + i * blockDim.x * in.n] = in[in_index + i * blockDim.x]; //相当于负责了本来右边的线程块的部分
            }
        }
        else    
        {
            for(int i = 0 ; i < Unroll_times; i++)
            {
                out[in_index + i * blockDim.x] = in[out_index + i * blockDim.x * in.n];
            }
        }
    }
    //这么做的性能瓶颈出在什么地方？是从in读取的时候容易bank冲突，还是写入out的时候按列（按列的写入类似原理的开销似乎是无法避免的）？
}


//这里还是有点搞不懂
__global__ void CUDA_T_Diagonal(Matrix in, float* out, int Unroll_times,bool is_row)
{
    int blk_y = blockIdx.x;

}



void cuda_run(
    Matrix& m_common, Matrix& correct_answer,
    std::vector<std::string> &names, 
    std::vector<float>&T_times, std::vector<float>&norm_times, 
    std::vector<int>& answer_right,
    int type, int Unroll_times
)
{
    GPUTimer t;
    float* out;
    float* out_host = new float[m_common.len];
    int nBs = m_common.len * sizeof(float);
    cudaMalloc((float **)&out, nBs);
    Matrix in(m_common, 0);
    int answer_right_now = 0;

    dim3 block, grid;
    switch(type)
    {
        case 1:case 2://朴素的行列
            block.x = 32; //线程最大是1024个。所以不能64*64
            block.y = 32;
            grid.x = (m_common.m + block.x - 1) / (block.x);
            grid.y = (m_common.n + block.y - 1)/(block.y);
            break;
        case 3:case 4://n路展开的行列
            
            block.x = 32;
            block.y = 32;
            grid.x = (m_common.m + block.x * Unroll_times - 1) / (block.x * Unroll_times);
            grid.y = (m_common.n + block.y - 1)/(block.y);
            break;    
    }

    t.start();
    in.moveToDevice();

    std::stringstream ss;
    switch(type)
    {
        case 1:
            CUDA_T_1<<<grid, block>>>(in, out, Unroll_times, 1);
            names.push_back("基础方法的行主读取");
            break;
        case 2:
            CUDA_T_1<<<grid, block>>>(in, out, Unroll_times, 0);
            names.push_back("基础方法的列主读取");
            break;
        case 3:
            CUDA_T_1<<<grid, block>>>(in, out, Unroll_times, 1);
            ss << Unroll_times << "路展开的的行主读取";
            names.push_back(ss.str());
            break;
        case 4:
            CUDA_T_1<<<grid, block>>>(in, out, Unroll_times, 0);
            ss << Unroll_times << "路展开的的列主读取";
            names.push_back(ss.str());
            break;
    }
    cudaError_t err = cudaGetLastError();
    if (err != cudaSuccess) {
        std::cerr << "Kernel launch failed: " << cudaGetErrorString(err) << std::endl;
    }


    cudaDeviceSynchronize();
    cudaMemcpy(out_host, out, nBs,cudaMemcpyDeviceToHost);

    T_times.push_back(t.stop());

    for(int i = 0 ; i < 100; i++)
    {
        //std::cout << out[i] << " " <<correct_answer[i]<<std::endl;
    }
    answer_right_now += is_same(out_host, correct_answer) ? 1 : 0;


    
    //answer_right_now += is_same(out, correct_answer) ? 10 : 0;
    norm_times.push_back(0);
    answer_right.push_back(answer_right_now);

}


int main()
{
    int m = 1 << 10, n = 1 <<8;
    Matrix m_common(m, n, 0, 1);
    //Matrix m_cuda(m_common, 1);
    Matrix commom_out(n, m, 0, 0);
    float* common_norm = new float[m];

    std::vector<std::string> names;
    std::vector<float> T_times;
    std::vector<float> norm_times; 
    std::vector<int> answer_right;


    common(m_common, commom_out, common_norm, names, T_times, norm_times, answer_right);
    //commom_out.show();
    cuda_run(m_common, commom_out, names, T_times, norm_times, answer_right, 1, 1);
    // cuda_run(m_common, commom_out, names, T_times, norm_times, answer_right, 2, 1);
    // cuda_run(m_common, commom_out, names, T_times, norm_times, answer_right, 3, 4);
    // cuda_run(m_common, commom_out, names, T_times, norm_times, answer_right, 4, 4);





    print_result(names, T_times, norm_times, answer_right);
    
}

