#include <cudnn.h>
#include <vector>
#include <iostream> // 用于错误输出
#include <stdexcept> // 用于抛出异常

class ConvolutionLayer {
private:
    cudnnHandle_t handle;
    cudnnTensorDescriptor_t inputDesc;
    cudnnTensorDescriptor_t outputDesc;
    cudnnFilterDescriptor_t filterDesc;
    cudnnConvolutionDescriptor_t convDesc;
    void* workspace;
    size_t workspaceSize;
    int inChannels, outChannels, kernelSize, stride, padding;
    cudnnConvolutionFwdAlgo_t algo;

public:
    ConvolutionLayer(int inChannels, int outChannels, int kernelSize, int stride, int padding) :
        inChannels(inChannels), outChannels(outChannels), kernelSize(kernelSize), stride(stride), padding(padding), workspace(nullptr), workspaceSize(0) {

        // 1. 创建 cuDNN handle
        cudnnStatus_t status = cudnnCreate(&handle);
        if (status != CUDNN_STATUS_SUCCESS) {
            throw std::runtime_error("cuDNN handle creation failed: " + std::to_string(status));
        }

        // 2. 创建描述符
        cudnnCreateTensorDescriptor(&inputDesc);
        cudnnCreateTensorDescriptor(&outputDesc);
        cudnnCreateFilterDescriptor(&filterDesc);
        cudnnCreateConvolutionDescriptor(&convDesc);

        // 选择最佳卷积算法
        status = cudnnGetConvolutionForwardAlgorithm(handle, inputDesc, filterDesc, convDesc, outputDesc, CUDNN_CONVOLUTION_FWD_PREFERENCE_FASTEST, 0, &algo);
        if (status != CUDNN_STATUS_SUCCESS) {
            throw std::runtime_error("cuDNN get forward algorithm failed: " + std::to_string(status));
        }
    }

    ~ConvolutionLayer() {
        // 释放资源
        if (workspace) {
            cudaFree(workspace);
        }
        cudnnDestroyConvolutionDescriptor(convDesc);
        cudnnDestroyFilterDescriptor(filterDesc);
        cudnnDestroyTensorDescriptor(outputDesc);
        cudnnDestroyTensorDescriptor(inputDesc);
        cudnnDestroy(handle);
    }

    void forward(const float* input, float* output, int batchSize, int height, int width, const float* weights) {
        // 3. 设置张量和卷积描述符
        cudnnSetTensor4dDescriptor(inputDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batchSize, inChannels, height, width);

        int outputHeight = (height + 2 * padding - kernelSize) / stride + 1;
        int outputWidth = (width + 2 * padding - kernelSize) / stride + 1;
        cudnnSetTensor4dDescriptor(outputDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batchSize, outChannels, outputHeight, outputWidth);

        cudnnSetFilter4dDescriptor(filterDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, outChannels, inChannels, kernelSize, kernelSize);

        cudnnSetConvolution2dDescriptor(convDesc, padding, padding, stride, stride, 1, 1, CUDNN_CONVOLUTION, CUDNN_DATA_FLOAT);

        // 4. 获取 workspace 大小并分配内存
        cudnnStatus_t status = cudnnGetConvolutionForwardWorkspaceSize(handle, inputDesc, filterDesc, convDesc, outputDesc, algo, &workspaceSize);
        if (status != CUDNN_STATUS_SUCCESS) {
            throw std::runtime_error("cuDNN get workspace size failed: " + std::to_string(status));
        }

        if (workspaceSize > 0) {
          cudaMalloc(&workspace, workspaceSize);
           if (!workspace){
                throw std::runtime_error("cudaMalloc failed");
           }
        }
        
        // 5. 执行卷积
        float alpha = 1.0f;
        float beta = 0.0f;
        status = cudnnConvolutionForward(handle, &alpha, inputDesc, input, filterDesc, weights, convDesc, algo, workspace, workspaceSize, &beta, outputDesc, output);
        if (status != CUDNN_STATUS_SUCCESS) {
            throw std::runtime_error("cuDNN convolution forward failed: " + std::to_string(status));
        }
    }
};