#ifndef LOSSFUNCTIONGPU_H
#define LOSSFUNCTIONGPU_H

#include <stdexcept>
#include <cuda_runtime.h>
#include "loss_function.h"
#include <device_launch_parameters.h>
#include <vector>
#include <iostream>

#define THREADS_PER_BLOCK 256
#define BLOCKS_PER_SM 8  // 假设每个SM可以最优运行8个块


__global__ void computeBCEGradient(float *outputs, float *expected, float *gradients, int N) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    const float epsilon = 1e-9;
    if (idx < N) {
        float y = expected[idx];
        float yHat = fmin(fmax(outputs[idx], epsilon), 1.0f - epsilon); // Limit yHat within (epsilon, 1-epsilon)
        gradients[idx] = -y / yHat + (1 - y) / (1 - yHat);
    }
}

__global__ void computeBCELoss(float *outputs, float *expected, float *lossComponents, int N) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    const float epsilon = 1e-9;
    if (idx < N) {
        float y = expected[idx];
        float yHat = fmin(fmax(outputs[idx], epsilon), 1.0f - epsilon); // Limit yHat within (epsilon, 1-epsilon)
        lossComponents[idx] = -y * log(yHat) - (1 - y) * log(1 - yHat);
    }
}

class BinaryCrossEntropyLossCUDA : public ILossFunction {
public:
    float loss(const std::vector<std::vector<float>>& outputs, const std::vector<std::vector<float>>& expectedOutputs) const override {
        int totalElements = 0;
        for (const auto &vec : outputs) totalElements += vec.size();

        float *d_outputs, *d_expected, *d_lossComponents;
        cudaMalloc(&d_outputs, totalElements * sizeof(float));
        cudaMalloc(&d_expected, totalElements * sizeof(float));
        cudaMalloc(&d_lossComponents, totalElements * sizeof(float));

        std::vector<float> linearOutputs, linearExpected;
        for (size_t i = 0; i < outputs.size(); ++i) {
            linearOutputs.insert(linearOutputs.end(), outputs[i].begin(), outputs[i].end());
            linearExpected.insert(linearExpected.end(), expectedOutputs[i].begin(), expectedOutputs[i].end());
        }

        cudaMemcpy(d_outputs, linearOutputs.data(), totalElements * sizeof(float), cudaMemcpyHostToDevice);
        cudaMemcpy(d_expected, linearExpected.data(), totalElements * sizeof(float), cudaMemcpyHostToDevice);

        int blocks = (totalElements + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
        computeBCELoss<<<blocks, THREADS_PER_BLOCK>>>(d_outputs, d_expected, d_lossComponents, totalElements);

        std::vector<float> lossComponents(totalElements);
        cudaMemcpy(lossComponents.data(), d_lossComponents, totalElements * sizeof(float), cudaMemcpyDeviceToHost);

        float totalLoss = 0.0;
        for (float loss : lossComponents) {
            totalLoss += loss;
        }
        totalLoss /= totalElements;

        cudaFree(d_outputs);
        cudaFree(d_expected);
        cudaFree(d_lossComponents);

        return totalLoss;
    }


    std::vector<std::vector<float>> gradient(const std::vector<std::vector<float>>& outputs,
                                              const std::vector<std::vector<float>>& expectedOutputs) const override {
        // 将数据转为线性数组以用于CUDA处理
        int totalElements = 0;
        for (const auto &vec : outputs) totalElements += vec.size();

        float *d_outputs, *d_expected, *d_gradients;
        cudaMalloc(&d_outputs, totalElements * sizeof(float));
        cudaMalloc(&d_expected, totalElements * sizeof(float));
        cudaMalloc(&d_gradients, totalElements * sizeof(float));

        // 临时线性数组用于拷贝数据
        std::vector<float> linearOutputs, linearExpected;
        for (size_t i = 0; i < outputs.size(); ++i) {
            linearOutputs.insert(linearOutputs.end(), outputs[i].begin(), outputs[i].end());
            linearExpected.insert(linearExpected.end(), expectedOutputs[i].begin(), expectedOutputs[i].end());
        }

        cudaMemcpy(d_outputs, linearOutputs.data(), totalElements * sizeof(float), cudaMemcpyHostToDevice);
        cudaMemcpy(d_expected, linearExpected.data(), totalElements * sizeof(float), cudaMemcpyHostToDevice);

        // 启动CUDA核函数
        int blocks = (totalElements + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
        computeBCEGradient<<<blocks, THREADS_PER_BLOCK>>>(d_outputs, d_expected, d_gradients, totalElements);

        // 拷贝回结果
        std::vector<float> linearGradients(totalElements);
        cudaMemcpy(linearGradients.data(), d_gradients, totalElements * sizeof(float), cudaMemcpyDeviceToHost);

        // 将线性数组转换回原始二维向量形式
        std::vector<std::vector<float>> gradients(outputs.size());
        int k = 0;
        for (size_t i = 0; i < outputs.size(); ++i) {
            gradients[i].resize(outputs[i].size());
            for (size_t j = 0; j < outputs[i].size(); ++j) {
                gradients[i][j] = linearGradients[k++];
            }
        }

        cudaFree(d_outputs);
        cudaFree(d_expected);
        cudaFree(d_gradients);

        return gradients;
    }
};





#endif //LOSSFUNCTIONGPU_H