#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>

#define CHANNELS 3

__global__ void im2col_kernel(float *col, float *img, int img_size, 
                             int kernel_size, int stride, int padding, 
                             int col_height, int col_width) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    if (idx >= col_height * col_width) return;

    int c_out = idx / (col_width / (kernel_size * kernel_size * CHANNELS));
    int c_in = idx % (kernel_size * kernel_size * CHANNELS);
    
    int ky = c_in / (kernel_size * CHANNELS);
    int kx = (c_in % (kernel_size * CHANNELS)) / CHANNELS;
    int kc = c_in % CHANNELS;
    
    int y_out = c_out / ((img_size + 2 * padding - kernel_size) / stride + 1);
    int x_out = c_out % ((img_size + 2 * padding - kernel_size) / stride + 1);
    
    int y_img = y_out * stride + ky - padding;
    int x_img = x_out * stride + kx - padding;
    
    if (y_img >= 0 && y_img < img_size && x_img >= 0 && x_img < img_size) {
        col[idx] = img[(y_img * img_size + x_img) * CHANNELS + kc];
    } else {
        col[idx] = 0.0f;
    }
}

void matrixMultiply(float *A, float *B, float *C, int m, int n, int k) {
    // GEMM实现代码(使用CUDA或调用cublas)
    // 简化为示例，实际应使用优化实现
    for (int i = 0; i < m; ++i) {
        for (int j = 0; j < n; ++j) {
            float sum = 0.0f;
            for (int l = 0; l < k; ++l) {
                sum += A[i * k + l] * B[l * n + j];
            }
            C[i * n + j] = sum;
        }
    }
}

int main(int argc, char **argv) {
    if (argc != 4) {
        printf("Usage: %s <input_size> <kernel_size> <stride>\n", argv[0]);
        return 1;
    }

    int input_size = atoi(argv[1]);
    int kernel_size = atoi(argv[2]);
    int stride = atoi(argv[3]);
    int padding = (kernel_size - 1) / 2;
    int output_size = (input_size + 2 * padding - kernel_size) / stride + 1;
    
    // Allocate host memory
    size_t img_bytes = input_size * input_size * CHANNELS * sizeof(float);
    size_t kernel_bytes = kernel_size * kernel_size * CHANNELS * CHANNELS * sizeof(float);
    size_t col_height = output_size * output_size;
    size_t col_width = kernel_size * kernel_size * CHANNELS;
    size_t col_bytes = col_height * col_width * sizeof(float);
    size_t output_bytes = output_size * output_size * CHANNELS * sizeof(float);

    float *h_img = (float *)malloc(img_bytes);
    float *h_kernel = (float *)malloc(kernel_bytes);
    float *h_col = (float *)malloc(col_bytes);
    float *h_output = (float *)malloc(output_bytes);

    // Initialize with random values
    for (int i = 0; i < input_size * input_size * CHANNELS; i++)
        h_img[i] = (float)rand() / RAND_MAX;
    for (int i = 0; i < kernel_size * kernel_size * CHANNELS * CHANNELS; i++)
        h_kernel[i] = (float)rand() / RAND_MAX;

    // Allocate device memory
    float *d_img, *d_kernel, *d_col, *d_output;
    cudaMalloc(&d_img, img_bytes);
    cudaMalloc(&d_kernel, kernel_bytes);
    cudaMalloc(&d_col, col_bytes);
    cudaMalloc(&d_output, output_bytes);

    // Copy input to device
    cudaMemcpy(d_img, h_img, img_bytes, cudaMemcpyHostToDevice);

    // Perform im2col
    cudaEvent_t start, stop;
    cudaEventCreate(&start);
    cudaEventCreate(&stop);
    cudaEventRecord(start);

    int threads = 256;
    int blocks = (col_height * col_width + threads - 1) / threads;
    im2col_kernel<<<blocks, threads>>>(d_col, d_img, input_size, kernel_size, 
                                     stride, padding, col_height, col_width);

    // Perform GEMM (in practice, use cublasSgemm)
    // Here we copy back to host for simplicity
    cudaMemcpy(h_col, d_col, col_bytes, cudaMemcpyDeviceToHost);
    cudaMemcpy(h_kernel, d_kernel, kernel_bytes, cudaMemcpyHostToDevice);
    
    matrixMultiply(h_col, h_kernel, h_output, col_height, CHANNELS, col_width);

    cudaEventRecord(stop);
    cudaEventSynchronize(stop);

    float milliseconds = 0;
    cudaEventElapsedTime(&milliseconds, start, stop);

    printf("im2col + GEMM Time: %.2f ms\n", milliseconds);

    // Cleanup
    free(h_img);
    free(h_kernel);
    free(h_col);
    free(h_output);
    cudaFree(d_img);
    cudaFree(d_kernel);
    cudaFree(d_col);
    cudaFree(d_output);

    return 0;
}