#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <cudnn.h>

#define CHANNELS 3

void checkCudnnError(cudnnStatus_t status) {
    if (status != CUDNN_STATUS_SUCCESS) {
        printf("cuDNN error: %s\n", cudnnGetErrorString(status));
        exit(1);
    }
}

int main(int argc, char **argv) {
    if (argc != 4) {
        printf("Usage: %s <input_size> <kernel_size> <stride>\n", argv[0]);
        return 1;
    }

    int input_size = atoi(argv[1]);
    int kernel_size = atoi(argv[2]);
    int stride = atoi(argv[3]);
    int padding = (kernel_size - 1) / 2;
    int output_size = (input_size + 2 * padding - kernel_size) / stride + 1;

    // Create cuDNN handle
    cudnnHandle_t cudnn;
    checkCudnnError(cudnnCreate(&cudnn));

    // Create tensor descriptors
    cudnnTensorDescriptor_t input_desc, output_desc;
    cudnnFilterDescriptor_t kernel_desc;
    cudnnConvolutionDescriptor_t conv_desc;

    checkCudnnError(cudnnCreateTensorDescriptor(&input_desc));
    checkCudnnError(cudnnCreateTensorDescriptor(&output_desc));
    checkCudnnError(cudnnCreateFilterDescriptor(&kernel_desc));
    checkCudnnError(cudnnCreateConvolutionDescriptor(&conv_desc));

    // Set tensor descriptors
    int n = 1; // batch size
    int c = CHANNELS;
    int h = input_size;
    int w = input_size;
    checkCudnnError(cudnnSetTensor4dDescriptor(input_desc, CUDNN_TENSOR_NCHW, 
                                              CUDNN_DATA_FLOAT, n, c, h, w));

    h = output_size;
    w = output_size;
    checkCudnnError(cudnnSetTensor4dDescriptor(output_desc, CUDNN_TENSOR_NCHW, 
                                              CUDNN_DATA_FLOAT, n, c, h, w));

    // Set kernel descriptor
    checkCudnnError(cudnnSetFilter4dDescriptor(kernel_desc, CUDNN_DATA_FLOAT, 
                                             CUDNN_TENSOR_NCHW, c, c, 
                                             kernel_size, kernel_size));

    // Set convolution descriptor
    checkCudnnError(cudnnSetConvolution2dDescriptor(conv_desc, padding, padding, 
                                                  stride, stride, 1, 1, 
                                                  CUDNN_CROSS_CORRELATION, 
                                                  CUDNN_DATA_FLOAT));

    // Allocate and initialize host memory
    size_t input_bytes = n * c * input_size * input_size * sizeof(float);
    size_t kernel_bytes = c * c * kernel_size * kernel_size * sizeof(float);
    size_t output_bytes = n * c * output_size * output_size * sizeof(float);

    float *h_input = (float *)malloc(input_bytes);
    float *h_kernel = (float *)malloc(kernel_bytes);
    float *h_output = (float *)malloc(output_bytes);

    // Initialize with random values
    for (int i = 0; i < n * c * input_size * input_size; i++)
        h_input[i] = (float)rand() / RAND_MAX;
    for (int i = 0; i < c * c * kernel_size * kernel_size; i++)
        h_kernel[i] = (float)rand() / RAND_MAX;

    // Allocate device memory
    float *d_input, *d_kernel, *d_output;
    cudaMalloc(&d_input, input_bytes);
    cudaMalloc(&d_kernel, kernel_bytes);
    cudaMalloc(&d_output, output_bytes);

    // Copy data to device
    cudaMemcpy(d_input, h_input, input_bytes, cudaMemcpyHostToDevice);
    cudaMemcpy(d_kernel, h_kernel, kernel_bytes, cudaMemcpyHostToDevice);

    // Find convolution algorithm
    cudnnConvolutionFwdAlgo_t algo;
    int returnedAlgoCount;
    checkCudnnError(cudnnGetConvolutionForwardAlgorithm(cudnn, input_desc, 
                                                      kernel_desc, conv_desc, 
                                                      output_desc, 
                                                      CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 
                                                      0, &algo));

    // Get workspace size
    size_t workspace_size = 0;
    checkCudnnError(cudnnGetConvolutionForwardWorkspaceSize(cudnn, input_desc, 
                                                          kernel_desc, conv_desc, 
                                                          output_desc, algo, 
                                                          &workspace_size));

    // Allocate workspace
    void *d_workspace = NULL;
    if (workspace_size > 0) {
        cudaMalloc(&d_workspace, workspace_size);
    }

    // Perform convolution
    cudaEvent_t start, stop;
    cudaEventCreate(&start);
    cudaEventCreate(&stop);
    cudaEventRecord(start);

    float alpha = 1.0f, beta = 0.0f;
    checkCudnnError(cudnnConvolutionForward(cudnn, &alpha, input_desc, d_input, 
                                          kernel_desc, d_kernel, conv_desc, 
                                          algo, d_workspace, workspace_size, 
                                          &beta, output_desc, d_output));

    cudaEventRecord(stop);
    cudaEventSynchronize(stop);

    float milliseconds = 0;
    cudaEventElapsedTime(&milliseconds, start, stop);

    printf("cuDNN Convolution Time: %.2f ms\n", milliseconds);

    // Copy result back to host
    cudaMemcpy(h_output, d_output, output_bytes, cudaMemcpyDeviceToHost);

    // Cleanup
    free(h_input);
    free(h_kernel);
    free(h_output);
    cudaFree(d_input);
    cudaFree(d_kernel);
    cudaFree(d_output);
    if (d_workspace) cudaFree(d_workspace);

    cudnnDestroyTensorDescriptor(input_desc);
    cudnnDestroyTensorDescriptor(output_desc);
    cudnnDestroyFilterDescriptor(kernel_desc);
    cudnnDestroyConvolutionDescriptor(conv_desc);
    cudnnDestroy(cudnn);

    return 0;
}