#include<iostream>
#include<cuda_runtime.h>
#include<vector>
#include "../MyTimer.h"
#include<string>
#include<math.h>
#include<sstream>




void get_error()
{
    cudaError_t err = cudaGetLastError();
    if (err != cudaSuccess) {
        printf("Kernel launch failed: %s\n", cudaGetErrorString(err));
        // 这里可以添加更多的错误处理逻辑，比如退出程序
    }

}
void print_result(
    std::vector<std::string>& names, 
    std::vector<std::vector<float>>& times_stages,
    std::vector<std::vector<bool>>& answer_rights
)
{
    for(int i = 0; i < names.size(); i++)
    {
        std::cout<<names[i];
        printf(
            " stage1:%fs, stage2:%fs, stage3:%fs ", 
            times_stages[i][0], times_stages[i][1], times_stages[i][2]
        );
        std::cout<<answer_rights[i][0]<<answer_rights[i][1]<<answer_rights[i][2]<<std::endl;
    }
}


class Matrix {
public:
    int m, n, len, nBs;
    float* f;      // 数据指针（可能指向 host，也可能指向 device）
    bool on_device;

    __host__ __device__ int size() const { return len; }

    Matrix(int m, int n, bool init = true) 
        : m(m), n(n), len(m * n), nBs(sizeof(float) * len), on_device(false) 
    {
        cudaHostAlloc((void**)&f, nBs, cudaHostAllocMapped);
        //f = new float[len];
        if (init) {
            for (int i = 0; i < len; i++) f[i] = rand() % 10;
        } else {
            memset(f, 0, nBs);
        }
    }

    // 包装已有内存
    Matrix(int m, int n, float* ptr, bool on_dev) 
        : m(m), n(n), len(m * n), nBs(sizeof(float) * len), f(ptr), on_device(on_dev) {}

    // 拷贝到 Device
    Matrix* toDevice(bool need_copy = 1) {
        if (on_device) return this;

        float* d_ptr;
        cudaMalloc(&d_ptr, nBs);
        if(need_copy) cudaMemcpy(d_ptr, f, nBs, cudaMemcpyHostToDevice);

        Matrix host_wrapper(m, n, d_ptr, true);  // 临时 host-side 对象
        Matrix* d_matrix;
        cudaMalloc((void**)&d_matrix, sizeof(Matrix));
        cudaMemcpy(d_matrix, &host_wrapper, sizeof(Matrix), cudaMemcpyHostToDevice);

        return d_matrix;  // 注意：这是 device 上的指针
    }
    // 拷贝回 Host
    // Matrix* toHost() {
    //     if (!on_device) return this;


    //     float* h_ptr;
    //     cudaHostAlloc((void**)&h_ptr, nBs, cudaHostAllocMapped);
    //     cudaMemcpy(h_ptr, f, nBs, cudaMemcpyDeviceToHost);

    //     return new Matrix(m, n, h_ptr, false);
    // }
    

    __host__ __device__ float& operator[](int i) { return f[i]; }
    __host__ __device__ const float& operator[](int i) const { return f[i]; }

        void freeHost() {
        if (!on_device && f) {
            cudaFreeHost(f);
            f = nullptr;
        }
    }

    void freeDevice() {
        if (on_device && f) {
            cudaFree(f);
            f = nullptr;
        }
    }


};


static void copy_Matrix(Matrix* dst, Matrix* src, int offset, int snBs, cudaStream_t stream, bool H2D = 1) {
    if(H2D)
    {
        // 先把 device 端 Matrix 的 meta 拷到 host
        Matrix meta(0, 0, false);
        cudaMemcpy(&meta, dst, sizeof(Matrix), cudaMemcpyDeviceToHost);

        // 然后用 meta.f（是 device 内存）作为目标地址
        cudaMemcpyAsync(meta.f + offset, src->f + offset, snBs,
                        cudaMemcpyHostToDevice, stream);
    }
    else
    {
        Matrix meta(0, 0, false);
        cudaMemcpy(&meta, src, sizeof(Matrix), cudaMemcpyDeviceToHost);

        cudaMemcpyAsync(dst->f + offset, meta.f + offset, snBs,
                        cudaMemcpyDeviceToHost, stream);
    }
}



Matrix* toHost(Matrix* d_matrix, bool need_copy = 1) {
    // 先把 Matrix 元数据拷到 host
    Matrix meta(0, 0, false);
    cudaMemcpy(&meta, d_matrix, sizeof(Matrix), cudaMemcpyDeviceToHost);

    // 再把数据拷到 host
    float* h_ptr;
    cudaHostAlloc((void**)&h_ptr, meta.nBs, cudaHostAllocMapped);
    if(need_copy) cudaMemcpy(h_ptr, meta.f, meta.nBs, cudaMemcpyDeviceToHost);

    // 返回新的 host Matrix
    return new Matrix(meta.m, meta.n, h_ptr, false);
}

bool is_same(Matrix* A, Matrix* B)
{
    for(int i = 0; i < A->size(); i++)
        if((*A)[i] != (*B)[i]) {
            std::cout<<"at:"<<i<<" "<< (*A)[i] << " " << (*B)[i]<<std::endl;
            return 0;
        }
    return 1;
}

bool is_same(float* A, float* B, int len)
{
    for(int i = 0; i < len; i++)
        if(A[i] != B[i]) return 0;
    return 1;
}


void common(
    Matrix* A, Matrix* B, Matrix* C, Matrix* D, float* results, 
    std::vector<std::string>& names, 
    std::vector<std::vector<float>>& times_stages,
    std::vector<std::vector<bool>>& answer_rights
)
{
    int m = A->m, n = A->n;
    std::vector<float> times;
    std::vector<bool> answer_right;
    //stage1
    GPUTimer t;
    t.start();
    for(int i = 0; i < A->size(); i++)
        (*C)[i] = (*A)[i] + (*B)[i];
    times.push_back(t.stop());
    answer_right.push_back(1);


    t.start();
    for(int i = 0; i < A->size(); i++)
        (*D)[i] = (*C)[i] * (*C)[i];
    times.push_back(t.stop());
    answer_right.push_back(1);

    t.start();
    for(int j = 0; j < m; j++)
    {
        for(int i = 0; i < n; i++)
        {
            results[j] += (*D)[i * m + j];
        }
    }
    times.push_back(t.stop());
    answer_right.push_back(1);


    times_stages.push_back(times);
    answer_rights.push_back(answer_right);
    names.push_back("串行");

}




__global__ void GPU_common_stage1(Matrix* A, Matrix* B, Matrix* C, int offset = 0)
{

    int x_idx = blockDim.x * blockIdx.x + threadIdx.x;
    int y_idx = blockDim.y * blockIdx.y + threadIdx.y;
    int index = y_idx * A->m + x_idx + offset;

    if(x_idx < A->m && y_idx < A->n)
    {
        (*C)[index] = (*A)[index] + (*B)[index];
        //printf("%d: %f, ", index, (*C)[index]);
    }

}
__global__ void GPU_common_stage2(Matrix* C, Matrix* D, int offset = 0)
{
    int x_idx = blockDim.x * blockIdx.x + threadIdx.x;
    int y_idx = blockDim.y * blockIdx.y + threadIdx.y;
    int index = y_idx * C->m + x_idx + offset;
    float C_val = (*C)[index];

    if(x_idx < C->m && y_idx < C->n)
    {
        (*D)[index] = C_val * C_val;
    }

}

__global__ void GPU_common_stage3(Matrix* D, float* results, int offset = 0)
{
    extern __shared__ float sdata[];
    int m = D->m, n = D->n;
    int col_idx = blockIdx.x + offset; //第x个线程块负责第x列的规约
    int tid = threadIdx.x;

    float sum = 0;
    for(int i = tid; i < n; i += blockDim.x)
    {
        sum += (*D)[i * m + col_idx];
    }

    //sum是每个线程规约的内容，接下来只需要把所有线程的结果规约到results[col_idx]即可。
    sdata[tid] = sum;
    __syncthreads();

    for(int i = blockDim.x / 2 ; i >= 1; i >>= 1)
    {
        if(tid < i)
            sdata[tid] += sdata[tid + i];
        __syncthreads();
    }
    if(tid == 0)
        results[col_idx] = sdata[0];

}

void GPU1(
    Matrix* A, Matrix* B, Matrix* C_answer, Matrix* D_answer, float* results_answer, 
    std::vector<std::string>& names, 
    std::vector<std::vector<float>>& times_stages,
    std::vector<std::vector<bool>>& answer_rights
)
{
    int m = A->m, n = A->n, len = A->size();
    std::vector<float> times;
    std::vector<bool> answer_right;
    Matrix* C = new Matrix(m, n, 0);
    Matrix* D = new Matrix(m, n, 0);

    dim3 block(1<<5, 1<<5);
    dim3 grid((m + block.x - 1) / block.x, (n + block.y - 1) / block.y);

    GPUTimer t;
    t.start();
    Matrix* d_A = A->toDevice();
    Matrix* d_B = B->toDevice();
    Matrix* d_C = C->toDevice();
    GPU_common_stage1<<<grid, block>>>(d_A, d_B, d_C);
    cudaDeviceSynchronize();
    Matrix* h_C = toHost(d_C);
    times.push_back(t.stop());
    answer_right.push_back(is_same(h_C, C_answer));
    delete h_C;
    delete C;



    t.start();
    Matrix* d_D = D->toDevice();
    GPU_common_stage2<<<grid, block>>>(d_C, d_D);
    cudaDeviceSynchronize();
    Matrix* h_D = toHost(d_D);
    times.push_back(t.stop());
    answer_right.push_back(is_same(h_D, D_answer));
    delete h_D;
    delete D;



    float* results;
    cudaHostAlloc((void**)&results, sizeof(float) * m, cudaHostAllocMapped);
    float* d_results;
    cudaMalloc((void**)&d_results, sizeof(float) * m);
    t.start();
    GPU_common_stage3<<<m, (1<<10), sizeof(float) * (1<<10)>>>(d_D, d_results);
    cudaDeviceSynchronize();
    get_error();
    cudaMemcpy(results, d_results, sizeof(float) * m, cudaMemcpyDeviceToHost);
    times.push_back(t.stop());
    answer_right.push_back(is_same(results, results_answer, m));
    cudaFreeHost(results);
    cudaFree(d_results);

    
    times_stages.push_back(times);
    answer_rights.push_back(answer_right);
    names.push_back("无流GPU");


}


void GPU2(
    Matrix* A, Matrix* B, Matrix* C_answer, Matrix* D_answer, float* results_answer, 
    std::vector<std::string>& names, 
    std::vector<std::vector<float>>& times_stages,
    std::vector<std::vector<bool>>& answer_rights,
    int nStreams
)
{
    int m = A->m, n = A->n, len = A->size();
    std::vector<float> times;
    std::vector<bool> answer_right;
    Matrix* C = new Matrix(m, n, 0);
    Matrix* D = new Matrix(m, n, 0);

    dim3 block(1<<5, 1<<5);
    dim3 grid((m + block.x - 1) / block.x, ((n + block.y - 1) / block.y) / nStreams);

    GPUTimer t;
    t.start();
    Matrix* d_A = A->toDevice(0);
    Matrix* d_B = B->toDevice(0);
    Matrix* d_C = C->toDevice(0);
    Matrix* h_C = new Matrix(m, n, 0);


    int streamSize = len / nStreams;
    int snBs = sizeof(float) * streamSize;
    cudaStream_t streams[nStreams];


    for(int i = 0; i < nStreams; i++)
    {
        cudaStreamCreate(&streams[i]);
    }
    for(int i = 0; i < nStreams; i++)
    {
        int offset = i * streamSize;
        copy_Matrix(d_A, A, offset, snBs, streams[i]); 
        copy_Matrix(d_B, B, offset, snBs, streams[i]);      
        GPU_common_stage1<<<grid, block, 0, streams[i]>>>(d_A, d_B, d_C, offset);  
        get_error(); 
        copy_Matrix(h_C, d_C, offset, snBs, streams[i], 0);    
    }
    for (int i = 0; i < nStreams; i++) {
        cudaStreamSynchronize(streams[i]);
    }

    times.push_back(t.stop());
    answer_right.push_back(is_same(h_C, C_answer));
    //std::cout<<times[0] << " " <<answer_right[0] << std::endl;
    delete h_C;
    delete C;



    t.start();
    Matrix* d_D = D->toDevice(0);
    for(int i = 0; i < nStreams; i++)
    {
        int offset = i * streamSize;
        GPU_common_stage2<<<grid, block, 0, streams[i]>>>(d_C, d_D, offset);  
        get_error(); 
        copy_Matrix(D, d_D, offset, snBs, streams[i], 0);    
    }
    for (int i = 0; i < nStreams; i++) {
        cudaStreamSynchronize(streams[i]);
    }

    times.push_back(t.stop());
    answer_right.push_back(is_same(D, D_answer));
    //std::cout<<times[1] << " " <<answer_right[1] << std::endl;
    delete D;

    
    

    int stage3_block_size = 1 << 10, stage3_streamSize = m / nStreams, stage3_snBs = stage3_streamSize * sizeof(float);
    float* h_results;
    cudaHostAlloc((void**)&h_results, sizeof(float) * m, cudaHostAllocMapped);
    float* d_results;
    cudaMalloc((void**)&d_results, sizeof(float) * m);
    t.start();
    for(int i = 0; i < nStreams; i++)
    {
        int offset = i * stage3_streamSize;
        GPU_common_stage3<<<stage3_streamSize, stage3_block_size, sizeof(float) * stage3_block_size, streams[i]>>>(d_D, d_results, offset);  
        get_error(); 
        cudaMemcpyAsync(h_results + offset, d_results + offset, stage3_snBs, cudaMemcpyDeviceToHost, streams[i]); 
    }
    for (int i = 0; i < nStreams; i++) {
        cudaStreamSynchronize(streams[i]);
    }

    times.push_back(t.stop());
    answer_right.push_back(is_same(h_results, results_answer, m));
    //std::cout<<times[2] << " " <<answer_right[2] << std::endl;
    cudaFreeHost(h_results);
    cudaFree(d_results);

    
    times_stages.push_back(times);
    answer_rights.push_back(answer_right);
    names.push_back("有流GPU");

}

void GPU3(
    Matrix* A, Matrix* B, Matrix* C_answer, Matrix* D_answer, float* results_answer, 
    std::vector<std::string>& names, 
    std::vector<std::vector<float>>& times_stages,
    std::vector<std::vector<bool>>& answer_rights,
    int nStreams
)
{
    int m = A->m, n = A->n, len = A->size();
    std::vector<float> times;
    std::vector<bool> answer_right;
    Matrix* C = new Matrix(m, n, 0);
    Matrix* D = new Matrix(m, n, 0);

    dim3 block(1<<5, 1<<5);
    dim3 grid((m + block.x - 1) / block.x, ((n + block.y - 1) / block.y) / nStreams);

    GPUTimer t1;
    t1.start();
    Matrix* d_A = A->toDevice(0);
    Matrix* d_B = B->toDevice(0);
    Matrix* d_C = C->toDevice(0);
    Matrix* h_C = new Matrix(m, n, 0);


    int streamSize = len / nStreams;
    int snBs = sizeof(float) * streamSize;
    cudaStream_t streams[nStreams];



    cudaEvent_t stage1_done;
    cudaEventCreate(&stage1_done);
    cudaEvent_t stage1_evts[nStreams];
    for(int i = 0; i < nStreams; i++)
    {
        cudaStreamCreate(&streams[i]);
        cudaEventCreate(&stage1_evts[i]);
    }
    for(int i = 0; i < nStreams; i++)
    {
        int offset = i * streamSize;
        copy_Matrix(d_A, A, offset, snBs, streams[i]); 
        copy_Matrix(d_B, B, offset, snBs, streams[i]);      
        GPU_common_stage1<<<grid, block, 0, streams[i]>>>(d_A, d_B, d_C, offset);  
        cudaEventRecord(stage1_done, streams[i]); 
        cudaEventRecord(stage1_evts[i], streams[i]);
        get_error(); 
        copy_Matrix(h_C, d_C, offset, snBs, streams[i], 0);    
    }
    // for (int i = 0; i < nStreams; i++) {
    //     cudaStreamSynchronize(streams[i]);
    // }

    // cudaEventSynchronize(stage1_done);
    // times.push_back(t1.stop());
    // answer_right.push_back(is_same(h_C, C_answer));
    // std::cout<<times[0] << " " <<answer_right[0] << std::endl;
    delete h_C;
    delete C;


    
    GPUTimer t2;
    t2.start();
    cudaStream_t streams2[nStreams];
    cudaEvent_t stage2_done;
    cudaEventCreate(&stage2_done);
    cudaEvent_t stage2_evts[nStreams];
    for(int i = 0; i < nStreams; i++)
    {
        cudaStreamCreate(&streams2[i]);
        cudaEventCreate(&stage2_evts[i]);
    }

    Matrix* d_D = D->toDevice(0);
    for(int i = 0; i < nStreams; i++)
    {
        int offset = i * streamSize;
        cudaStreamWaitEvent(streams2[i], stage1_evts[i], 0);
        GPU_common_stage2<<<grid, block, 0, streams2[i]>>>(d_C, d_D, offset);  
        cudaEventRecord(stage2_done, streams2[i]); 
        cudaEventRecord(stage2_evts[i], streams2[i]);
        get_error(); 
        copy_Matrix(D, d_D, offset, snBs, streams2[i], 0);    
    }
    

    // cudaEventSynchronize(stage2_done);
    // times.push_back(t2.stop());
    // answer_right.push_back(is_same(D, D_answer));
    // std::cout<<times[1] << " " <<answer_right[1] << std::endl;
    delete D;



    

    GPUTimer t3;
    t3.start();
    cudaStream_t streams3[nStreams];
    cudaEvent_t stage3_done;
    cudaEventCreate(&stage3_done);
    for(int i = 0; i < nStreams; i++)
    {
        cudaStreamCreate(&streams3[i]);
    }
    int stage3_block_size = 1 << 10, stage3_streamSize = m / nStreams, stage3_snBs = stage3_streamSize * sizeof(float);
    float* h_results;
    cudaHostAlloc((void**)&h_results, sizeof(float) * m, cudaHostAllocMapped);
    float* d_results;
    cudaMalloc((void**)&d_results, sizeof(float) * m);
    for(int i = 0; i < nStreams; i++)
    {
        int offset = i * stage3_streamSize;
        cudaStreamWaitEvent(streams3[i], stage2_evts[i], 0);
        GPU_common_stage3<<<stage3_streamSize, stage3_block_size, sizeof(float) * stage3_block_size, streams[i]>>>(d_D, d_results, offset);  
        cudaEventRecord(stage3_done, streams3[i]); 
        get_error(); 
        cudaMemcpyAsync(h_results + offset, d_results + offset, stage3_snBs, cudaMemcpyDeviceToHost, streams[i]); 
    }


    // cudaEventSynchronize(stage3_done);
    // times.push_back(t3.stop());
    // answer_right.push_back(is_same(h_results, results_answer, m));
    // std::cout<<times[2] << " " <<answer_right[2] << std::endl;
    cudaFreeHost(h_results);
    cudaFree(d_results);

    
    // times_stages.push_back(times);
    // answer_rights.push_back(answer_right);
    // names.push_back("有流GPU");

    std::cout<<t1.stop()<<std::endl;

}

int main()
{   
    int m = 1 << 12, n = 1 << 10;
    Matrix* A = new Matrix(m, n, 1);
    Matrix* B = new Matrix(m, n, 1);
    Matrix* C = new Matrix(m, n, 0);
    Matrix* D = new Matrix(m, n, 0);
    float* results;
    cudaHostAlloc((void**)&results, sizeof(float) * m, cudaHostAllocMapped);
    std::vector<std::string>names;
    std::vector<std::vector<float>>times_stages;
    std::vector<std::vector<bool>>answer_rights;

    common(A, B, C, D, results, names, times_stages, answer_rights);
    //GPU1(A, B, C, D, results, names, times_stages, answer_rights);
    GPU2(A, B, C, D, results, names, times_stages, answer_rights, 4);
    GPU3(A, B, C, D, results, names, times_stages, answer_rights, 4);
    print_result(names, times_stages, answer_rights);
}



















