#include "gpu.h"
#include "PCFG.h"
#include <cuda_runtime.h>
#include <iostream>
#include <cstring>

// CUDA核函数：计算每个PT的概率done
__global__ void calculateProbabilityKernel(float* preterm_prob, float* multiplier, float* divisor, 
                                          int* offsets, float* results, int num_pts) {
    int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
    
    if (pt_idx < num_pts) {
        // 获取当前PT的起始和结束位置
        int start = offsets[pt_idx];
        int end = offsets[pt_idx + 1];
        
        // 从preterm_prob开始计算
        float probability = preterm_prob[pt_idx];
        
        // 依次乘以每个乘数并除以每个除数
        for (int i = start; i < end; i++) {
            if (divisor[i] > 0) { // 防止除零错误
                probability *= (multiplier[i] / divisor[i]);
            }
            else if( divisor[i] == 0) {
                // 如果除数为0，直接跳过这个乘数
                printf("出现除数为0的情况，跳过该乘数\n");
                continue;
            }
        }
        
        // 存储结果
        results[pt_idx] = probability;
    }
}

void GPU_CalProb(std::vector<PT> &new_pts, model &m){
    // 如果PT列表为空，直接返回
    if (new_pts.empty()) {
        return;
    }
    
    std::cout<< "GPU_CalProb called with " << new_pts.size() << " PTs." << endl;


    int num_pts = new_pts.size();
      float *preterm_prob = new float[num_pts];
      std::cout << "preterm_prob allocated." << std::endl << std::flush;

      int *offsets = new int[num_pts + 1];
      std::cout << "offsets allocated." << std::endl
                << std::flush;

      float *results = new float[num_pts];
      std::cout << "results allocated." << std::endl
                << std::flush;

      // float* preterm_prob = new float[num_pts];

      // 每个pt有若干个乘数和除数
      // 先统计一共有多少个乘数除数
      int total_count = 0;
      // int *offsets = new int[num_pts+1];
      for (int i = 0; i < num_pts; ++i)
      {
        preterm_prob[i] = new_pts[i].prob;
        offsets[i] = total_count;
        total_count += new_pts[i].curr_indices.size();
    }
    offsets[num_pts] = total_count; // 最后一个PT的结束位置
    // printf("total_count: %d\n", total_count);

    float *multiplier = new float[total_count];
    std::cout << "multiplier allocated." << std::endl
              << std::flush;

    float *divisor = new float[total_count];
    std::cout << "divisor allocated." << std::endl;
    std::cout.flush();
    int k = 0;
    for(int i = 0; i < num_pts; ++i) {
      int index = 0;
      for (int idx : new_pts[i].curr_indices)
      {
        auto &pt = new_pts[i];
        if (pt.content[index].type == 1)
        {
          multiplier[k]= m.letters[m.FindLetter(pt.content[index])].ordered_freqs[idx];
          divisor [k]= m.letters[m.FindLetter(pt.content[index])].total_freq;
          k++;
        }
        else if (pt.content[index].type == 2)
        {
          multiplier[k] = m.digits[m.FindDigit(pt.content[index])].ordered_freqs[idx];
          divisor[k] = m.digits[m.FindDigit(pt.content[index])].total_freq;
          k++;
        }
        else if (pt.content[index].type == 3)
        {
          multiplier[k] = m.symbols[m.FindSymbol(pt.content[index])].ordered_freqs[idx];
          divisor[k] = m.symbols[m.FindSymbol(pt.content[index])].total_freq;
          k++;
        }
        index += 1;
      }
    }
    
    // 分配GPU内存
    float *d_preterm_prob, *d_multiplier, *d_divisor, *d_results;
    int *d_offsets;
    cudaMalloc((void**)&d_preterm_prob, num_pts * sizeof(float));
    cudaMalloc((void**)&d_multiplier, total_count * sizeof(float));
    cudaMalloc((void**)&d_divisor, total_count * sizeof(float));
    cudaMalloc((void**)&d_offsets, (num_pts + 1) * sizeof(int));
    cudaMalloc((void**)&d_results, num_pts * sizeof(float));
    
    // 将数据从主机复制到设备
    cudaMemcpy(d_preterm_prob, preterm_prob, num_pts * sizeof(float), cudaMemcpyHostToDevice);
    cudaMemcpy(d_multiplier, multiplier, total_count * sizeof(float), cudaMemcpyHostToDevice);
    cudaMemcpy(d_divisor, divisor, total_count * sizeof(float), cudaMemcpyHostToDevice);
    cudaMemcpy(d_offsets, offsets, (num_pts + 1) * sizeof(int), cudaMemcpyHostToDevice);
    // 设置CUDA核函数的执行配置
    int blockSize = 256; // 每个块的线程数
    int numBlocks = (num_pts + blockSize - 1) / blockSize; // 计算需要的块数
    // 调用CUDA核函数
    calculateProbabilityKernel<<<numBlocks, blockSize>>>(d_preterm_prob, d_multiplier, d_divisor, 
                                                         d_offsets, d_results, num_pts);
    // 检查CUDA错误
    cudaError_t err = cudaGetLastError();
    if (err != cudaSuccess) {
        std::cerr << "CUDA error: " << cudaGetErrorString(err) << std::endl;
        return;
    }
    
    // 从设备复制结果到主机
    // float* results = new float[num_pts];
    cudaMemcpy(results, d_results, num_pts * sizeof(float), cudaMemcpyDeviceToHost);
    
    // 将结果存储回PT对象
    for (int i = 0; i < num_pts; ++i) {
        new_pts[i].prob = results[i];
    }
    // 释放GPU内存
    cudaFree(d_preterm_prob);
    cudaFree(d_multiplier);
    cudaFree(d_divisor);
    cudaFree(d_offsets);
    cudaFree(d_results);
    // 释放主机内存
    delete[] results;
    delete[] preterm_prob;
    delete[] multiplier;
    delete[] divisor;
    delete[] offsets;
}