#include <iostream>
#include <vector>
#include <string>
#include <fstream>
#include <chrono>
#include <iomanip>
#include <numeric>
#include <algorithm>
#include <cstring>

// ===================================================================================
// Helper for CUDA Error Handling - DO NOT MODIFY BEGIN
// ===================================================================================
#define checkCudaErrors(val) check((val), #val, __FILE__, __LINE__)
void check(cudaError_t err, const char* const func, const char* const file, const int line) {
    if (err != cudaSuccess) {
        std::cerr << "CUDA error at " << file << ":" << line << std::endl;
        std::cerr << cudaGetErrorString(err) << " " << func << std::endl;
        exit(1);
    }
}
// ===================================================================================
// Helper for CUDA Error Handling - DO NOT MODIFY END
// ===================================================================================

// ===================================================================================
// Data and Parameter Loading Functions - DO NOT MODIFY BEGIN
// ===================================================================================
std::vector<std::vector<float>> read_mnist_images(const std::string& path) {
    std::ifstream file(path, std::ios::binary);
    if (!file) { std::cerr << "Cannot open file: " << path << std::endl; return {}; }
    int magic_number = 0, num_images = 0, num_rows = 0, num_cols = 0;
    file.read((char*)&magic_number, 4); magic_number = __builtin_bswap32(magic_number);
    file.read((char*)&num_images, 4); num_images = __builtin_bswap32(num_images);
    file.read((char*)&num_rows, 4); num_rows = __builtin_bswap32(num_rows);
    file.read((char*)&num_cols, 4); num_cols = __builtin_bswap32(num_cols);
    std::vector<std::vector<float>> images(num_images, std::vector<float>(num_rows * num_cols));
    std::vector<unsigned char> buffer(num_rows * num_cols);
    for (int i = 0; i < num_images; ++i) {
        file.read((char*)buffer.data(), buffer.size());
        for (size_t j = 0; j < buffer.size(); ++j) {
            images[i][j] = (static_cast<float>(buffer[j]) / 255.0f - 0.5f) / 0.5f; // Normalization
        }
    }
    return images;
}

std::vector<int> read_mnist_labels(const std::string& path) {
    std::ifstream file(path, std::ios::binary);
    if (!file) { std::cerr << "Cannot open file: " << path << std::endl; return {}; }
    int magic_number = 0, num_items = 0;
    file.read((char*)&magic_number, 4); magic_number = __builtin_bswap32(magic_number);
    file.read((char*)&num_items, 4); num_items = __builtin_bswap32(num_items);
    std::vector<int> labels(num_items);
    std::vector<unsigned char> buffer(num_items);
    file.read((char*)buffer.data(), num_items);
    for(int i = 0; i < num_items; ++i) { labels[i] = static_cast<int>(buffer[i]); }
    return labels;
}

std::vector<float> read_param(const std::string& path) {
    std::ifstream file(path);
    if (!file) { std::cerr << "Cannot open parameter file: " << path << std::endl; return {}; }
    std::vector<float> params; float param;
    while (file >> param) { params.push_back(param); }
    return params;
}

// ===================================================================================
// Data and Parameter Loading Functions - DO NOT MODIFY END
// ===================================================================================


// ========================== CUDA Kernels & 工具函数（允许区域） ==========================
#include <cuda_runtime.h>


// 批处理版卷积核：blockIdx.y 为 batch，blockIdx.x 为 out_channel，thread 负责 (y,x)
__global__ void conv2d_kernel_batch(
    const float* __restrict__ input,   // [B, in_channels, H, W]
    const float* __restrict__ weight,  // [out_channels, in_channels, K, K]
    const float* __restrict__ bias,    // [out_channels]
    float* __restrict__ output,        // [B, out_channels, H-K+1, W-K+1]
    int B, int in_channels, int out_channels, int H, int W, int K)
{
    int b  = blockIdx.y;
    int oc = blockIdx.x;
    int y  = threadIdx.y;
    int x  = threadIdx.x;
    int outH = H - K + 1;
    int outW = W - K + 1;
    if (b >= B || oc >= out_channels || y >= outH || x >= outW) return;

    const int in_stride  = in_channels * H * W;
    const int out_stride = out_channels * outH * outW;

    float sum = bias[oc];
    int out_idx = b * out_stride + oc * outH * outW + y * outW + x;

    for (int ic = 0; ic < in_channels; ++ic) {
        int in_base = b * in_stride + ic * H * W + y * W + x;
        int w_base  = oc * (in_channels * K * K) + ic * (K * K);
        #pragma unroll
        for (int ky = 0; ky < 5; ++ky) {
            int ib = in_base + ky * W;
            int wb = w_base  + ky * K;
            #pragma unroll
            for (int kx = 0; kx < 5; ++kx) {
                sum = fmaf(input[ib + kx], weight[wb + kx], sum);
            }
        }
    }
    output[out_idx] = sum;
}

// 批处理版最大池化：blockIdx.x=channel, blockIdx.y=batch, thread 负责 (y,x)
__global__ void maxpool2d_kernel_batch(
    const float* __restrict__ input,   // [B, channels, H, W]
    float* __restrict__ output,        // [B, channels, H/2, W/2]
    int B, int channels, int H, int W)
{
    int c = blockIdx.x;
    int b = blockIdx.y;
    int y = threadIdx.y;
    int x = threadIdx.x;

    int outH = H >> 1;
    int outW = W >> 1;
    if (b >= B || c >= channels || y >= outH || x >= outW) return;

    int in_stride  = channels * H * W;
    int out_stride = channels * outH * outW;

    int in_base = b * in_stride + c * H * W + (y << 1) * W + (x << 1);
    float v0 = input[in_base + 0];
    float v1 = input[in_base + 1];
    float v2 = input[in_base + W + 0];
    float v3 = input[in_base + W + 1];

    float m01 = fmaxf(v0, v1);
    float m23 = fmaxf(v2, v3);
    float maxval = fmaxf(m01, m23);

    output[b * out_stride + c * outH * outW + y * outW + x] = maxval;
}

// 批处理版全连接：grid.x=batch，grid.y=ceil(out_features/blockDim.x)，threadIdx.x 为该 block 内的输出索引
__global__ void linear_kernel_batch(
    const float* __restrict__ input,   // [B, in_features]
    const float* __restrict__ weight,  // [out_features, in_features]
    const float* __restrict__ bias,    // [out_features]
    float* __restrict__ output,        // [B, out_features]
    int B, int in_features, int out_features)
{
    int b = blockIdx.x;                                // 当前样本
    int o = blockIdx.y * blockDim.x + threadIdx.x;     // 当前输出索引
    if (b >= B || o >= out_features) return;

    const float* in_ptr = input  + b * in_features;
    const float* w_ptr  = weight + o * in_features;

    float sum = __ldg(&bias[o]);

    // 4 元向量化累加（减少循环与访存次数）
    int n4 = in_features >> 2; // 除以4
    const float4* in4 = reinterpret_cast<const float4*>(in_ptr);
    const float4* w4  = reinterpret_cast<const float4*>(w_ptr);

    #pragma unroll
    for (int i = 0; i < n4; ++i) {
        float4 a = in4[i];
        float4 b4 = w4[i];
        sum = fmaf(a.x, b4.x, sum);
        sum = fmaf(a.y, b4.y, sum);
        sum = fmaf(a.z, b4.z, sum);
        sum = fmaf(a.w, b4.w, sum);
    }

    // 尾部处理（in_features 非4倍数时）
    int rem = in_features & 3;
    if (rem) {
        int base = n4 << 2;
        #pragma unroll
        for (int i = 0; i < rem; ++i) {
            sum = fmaf(__ldg(&in_ptr[base + i]), __ldg(&w_ptr[base + i]), sum);
        }
    }

    output[b * out_features + o] = sum;
}

// 批处理版 argmax：每个样本一个 block，单线程顺序扫描（10 类下足够快）
__global__ void argmax_batch_kernel(const float* input, int B, int size, int* output) {
    int b = blockIdx.x;
    if (b >= B) return;
    const float* in = input + b * size;
    float maxv = in[0];
    int maxi = 0;
    for (int i = 1; i < size; ++i) {
        float v = in[i];
        if (v > maxv) { maxv = v; maxi = i; }
    }
    output[b] = maxi;
}
// 其他kernel实现
__global__ void add_kernel(const float* src, float* dst, int size) {
    int i = blockIdx.x * blockDim.x + threadIdx.x;
    if (i < size) dst[i] += src[i];
}
__global__ void div_kernel(float* data, int T, int size) {
    int i = blockIdx.x * blockDim.x + threadIdx.x;
    if (i < size) data[i] /= T;
}

// 有状态 IFNode：v += x; spike = v >= thr; 如果发放则 v -= thr（软重置），输出 spike
__global__ void ifnode_state_kernel_1d(
    const float* x,    // 当前步该层线性输出
    float* v,          // 该层膜电位缓冲（跨时间累加）
    float* spikes,     // 本步发放（0/1）
    float threshold,
    int size)
{
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    if (idx < size) {
        float vi = v[idx] + x[idx];
        float s = (vi >= threshold) ? 1.0f : 0.0f;
        // 改为硬重置：发放后膜电位清零
        if (s > 0.0f) vi = 0.0f;
        v[idx] = vi;
        spikes[idx] = s;
    }
}
// ========================== SNN 推理主流程（允许区域） ==========================

std::vector<int> scnn_inference(
    const std::vector<std::vector<float>>& images,
    float* d_conv1_w, float* d_conv1_b, float* d_conv2_w, float* d_conv2_b,
    float* d_fc1_w,   float* d_fc1_b,   float* d_fc2_w,   float* d_fc2_b,
    float* d_fc3_w,   float* d_fc3_b
)
{
    std::vector<int> predictions;
    const int num_images = images.size();
    predictions.reserve(num_images); // 只保留这一处
    const int T = 8;
    const int batch_size = 64; // 可按显存调整

    // 网络结构参数（可根据实际权重 shape 调整）
    const int in_channels = 1, img_h = 28, img_w = 28;
    const int conv1_out = 6, conv1_k = 5;
    const int conv2_out = 16, conv2_k = 5;

    // 5x5 卷积 + 2x2 池化 的几何尺寸
    const int conv1_out_h = img_h - conv1_k + 1; // 24
    const int conv1_out_w = img_w - conv1_k + 1; // 24
    const int pool1_h = conv1_out_h / 2;         // 12
    const int pool1_w = conv1_out_w / 2;         // 12
    const int conv2_out_h = pool1_h - conv2_k + 1; // 8
    const int conv2_out_w = pool1_w - conv2_k + 1; // 8
    const int pool2_h = conv2_out_h / 2;         // 4
    const int pool2_w = conv2_out_w / 2;         // 4

    const int fc1_in = conv2_out * pool2_h * pool2_w; // 16*4*4=256
    const int fc1_out = 120, fc2_out = 84, fc3_out = 10;

    // 分配 GPU 内存（尺寸修正为与 K=5 一致）
    float *d_image = nullptr, *d_conv1_out = nullptr, *d_if1_out = nullptr, *d_pool1_out = nullptr;
    float *d_conv2_out = nullptr,              *d_pool2_out = nullptr;
    float *d_fc1_out = nullptr, *d_fc2_out = nullptr, *d_fc3_out = nullptr;
    int *d_pred = nullptr;

    const int THREADS = 256;

    float *d_v_conv1 = nullptr, *d_v_conv2 = nullptr, *d_v_fc1 = nullptr, *d_v_fc2 = nullptr;

    // 注意：各缓冲加上 batch_size 维度
    checkCudaErrors(cudaMalloc(&d_image,      batch_size*in_channels*img_h*img_w*sizeof(float)));
    checkCudaErrors(cudaMalloc(&d_conv1_out,  batch_size*conv1_out*conv1_out_h*conv1_out_w*sizeof(float)));
    checkCudaErrors(cudaMalloc(&d_if1_out,    batch_size*conv1_out*conv1_out_h*conv1_out_w*sizeof(float)));
    checkCudaErrors(cudaMalloc(&d_pool1_out,  batch_size*conv1_out*pool1_h*pool1_w*sizeof(float)));
    checkCudaErrors(cudaMalloc(&d_conv2_out,  batch_size*conv2_out*conv2_out_h*conv2_out_w*sizeof(float)));
    checkCudaErrors(cudaMalloc(&d_pool2_out,  batch_size*conv2_out*pool2_h*pool2_w*sizeof(float)));
    checkCudaErrors(cudaMalloc(&d_fc1_out,    batch_size*fc1_out*sizeof(float)));
    checkCudaErrors(cudaMalloc(&d_fc2_out,    batch_size*fc2_out*sizeof(float)));
    checkCudaErrors(cudaMalloc(&d_fc3_out,    batch_size*fc3_out*sizeof(float)));
    checkCudaErrors(cudaMalloc(&d_pred,       batch_size*sizeof(int)));

    // 膜电位缓存（批维度）
    checkCudaErrors(cudaMalloc(&d_v_conv1, batch_size*conv1_out*conv1_out_h*conv1_out_w*sizeof(float)));
    checkCudaErrors(cudaMalloc(&d_v_conv2, batch_size*conv2_out*conv2_out_h*conv2_out_w*sizeof(float)));
    checkCudaErrors(cudaMalloc(&d_v_fc1,   batch_size*fc1_out*sizeof(float)));
    checkCudaErrors(cudaMalloc(&d_v_fc2,   batch_size*fc2_out*sizeof(float)));

    // FC3 累加器（批维度）
    // 复用累加缓冲
    float* d_accum = nullptr;
    checkCudaErrors(cudaMalloc(&d_accum, batch_size*fc3_out*sizeof(float)));



    for (int b0 = 0; b0 < (int)images.size(); b0 += batch_size) {
        int curB = std::min(batch_size, (int)images.size() - b0);

        // 组装当前批的主机缓冲并拷贝到设备
        std::vector<float> h_batch(curB*in_channels*img_h*img_w);
        for (int b = 0; b < curB; ++b) {
            const float* src = images[b0 + b].data();
            std::memcpy(&h_batch[b*in_channels*img_h*img_w], src, in_channels*img_h*img_w*sizeof(float));
        }
        checkCudaErrors(cudaMemcpy(d_image, h_batch.data(), curB*in_channels*img_h*img_w*sizeof(float), cudaMemcpyHostToDevice));

        // 重置膜电位与累加器（按 curB 尺寸）
        checkCudaErrors(cudaMemset(d_v_conv1, 0, curB*conv1_out*conv1_out_h*conv1_out_w*sizeof(float)));
        checkCudaErrors(cudaMemset(d_v_conv2, 0, curB*conv2_out*conv2_out_h*conv2_out_w*sizeof(float)));
        checkCudaErrors(cudaMemset(d_v_fc1,   0, curB*fc1_out*sizeof(float)));
        checkCudaErrors(cudaMemset(d_v_fc2,   0, curB*fc2_out*sizeof(float)));
        checkCudaErrors(cudaMemset(d_accum,   0, curB*fc3_out*sizeof(float)));

        // Conv1（批）移出 T 循环
        conv2d_kernel_batch<<<dim3(conv1_out, curB), dim3(conv1_out_h, conv1_out_w)>>>(
            d_image, d_conv1_w, d_conv1_b, d_conv1_out,
            curB, in_channels, conv1_out, img_h, img_w, conv1_k);

        for (int t = 0; t < T; ++t) {
            // IF1（批）
            {
                int size1 = curB * conv1_out * conv1_out_h * conv1_out_w;
                int blocks = (size1 + THREADS - 1) / THREADS;
                ifnode_state_kernel_1d<<<blocks, THREADS>>>(d_conv1_out, d_v_conv1, d_if1_out, 1.0f, size1);
            }
            // Pool1（批）
            maxpool2d_kernel_batch<<<dim3(conv1_out, curB), dim3(pool1_h, pool1_w)>>>(
                d_if1_out, d_pool1_out, curB, conv1_out, conv1_out_h, conv1_out_w);

            // Conv2（批）
            conv2d_kernel_batch<<<dim3(conv2_out, curB), dim3(conv2_out_h, conv2_out_w)>>>(
                d_pool1_out, d_conv2_w, d_conv2_b, d_conv2_out,
                curB, conv1_out, conv2_out, pool1_h, pool1_w, conv2_k);

            // IF2（批）
            {
                int size2 = curB * conv2_out * conv2_out_h * conv2_out_w;
                int blocks = (size2 + THREADS - 1) / THREADS;
                ifnode_state_kernel_1d<<<blocks, THREADS>>>(d_conv2_out, d_v_conv2, d_conv2_out, 1.0f, size2);
            }
            // Pool2（批）
            maxpool2d_kernel_batch<<<dim3(conv2_out, curB), dim3(pool2_h, pool2_w)>>>(
                d_conv2_out, d_pool2_out, curB, conv2_out, conv2_out_h, conv2_out_w);

            // FC1→IF3（批）
            {
                dim3 grid_fc1(curB, (fc1_out + THREADS - 1) / THREADS);
                linear_kernel_batch<<<grid_fc1, THREADS>>>(d_pool2_out, d_fc1_w, d_fc1_b, d_fc1_out, curB, fc1_in, fc1_out);
                int size_fc1 = curB * fc1_out;
                int blocks = (size_fc1 + THREADS - 1) / THREADS;
                ifnode_state_kernel_1d<<<blocks, THREADS>>>(d_fc1_out, d_v_fc1, d_fc1_out, 1.0f, size_fc1);
            }
            // FC2→IF4（批）
            {
                dim3 grid_fc2(curB, (fc2_out + THREADS - 1) / THREADS);
                linear_kernel_batch<<<grid_fc2, THREADS>>>(d_fc1_out, d_fc2_w, d_fc2_b, d_fc2_out, curB, fc1_out, fc2_out);
                int size_fc2 = curB * fc2_out;
                int blocks = (size_fc2 + THREADS - 1) / THREADS;
                ifnode_state_kernel_1d<<<blocks, THREADS>>>(d_fc2_out, d_v_fc2, d_fc2_out, 1.0f, size_fc2);
            }
            // FC3 累加（批）
            {
                dim3 grid_fc3(curB, (fc3_out + THREADS - 1) / THREADS);
                linear_kernel_batch<<<grid_fc3, THREADS>>>(d_fc2_out, d_fc3_w, d_fc3_b, d_fc3_out, curB, fc2_out, fc3_out);
                int size_fc3 = curB * fc3_out;
                add_kernel<<<(size_fc3 + THREADS - 1) / THREADS, THREADS>>>(d_fc3_out, d_accum, size_fc3);
            }
        }

        // T 均值（批）+ argmax（批）
        {
            int size_fc3 = curB * fc3_out;
            div_kernel<<<(size_fc3 + THREADS - 1) / THREADS, THREADS>>>(d_accum, T, size_fc3);
            argmax_batch_kernel<<<curB, 1>>>(d_accum, curB, fc3_out, d_pred);
        }

        // 拷回当前批的预测
        std::vector<int> h_pred(curB);
        checkCudaErrors(cudaMemcpy(h_pred.data(), d_pred, curB*sizeof(int), cudaMemcpyDeviceToHost));
        for (int b = 0; b < curB; ++b) predictions.push_back(h_pred[b]);
    }

    // 释放所有设备内存
    checkCudaErrors(cudaFree(d_image));
    checkCudaErrors(cudaFree(d_conv1_out));
    checkCudaErrors(cudaFree(d_if1_out));
    checkCudaErrors(cudaFree(d_pool1_out));
    checkCudaErrors(cudaFree(d_conv2_out));
    checkCudaErrors(cudaFree(d_pool2_out));
    checkCudaErrors(cudaFree(d_fc1_out));
    checkCudaErrors(cudaFree(d_fc2_out));
    checkCudaErrors(cudaFree(d_fc3_out));
    checkCudaErrors(cudaFree(d_pred));
    checkCudaErrors(cudaFree(d_accum));
    checkCudaErrors(cudaFree(d_v_conv1));
    checkCudaErrors(cudaFree(d_v_conv2));
    checkCudaErrors(cudaFree(d_v_fc1));
    checkCudaErrors(cudaFree(d_v_fc2));

    return predictions;
}

// ===================================================================================
// Main Function -  DO NOT MODIFY BEGIN
// ===================================================================================
int main(int argc, char* argv[]) {
    if (argc < 2) {
        std::cerr << "Usage: " << argv[0] << " <path_to_model_and_data_dir>" << std::endl;
        return 1;
    }
	std::string dir = argv[1];
	
    // Load test data
    auto images = read_mnist_images(dir + "/../../.." + "/data/FashionMNIST/raw/t10k-images-idx3-ubyte");
    auto labels = read_mnist_labels(dir + "/../../.." + "/data/FashionMNIST/raw/t10k-labels-idx1-ubyte");
    if (images.empty() || labels.empty()) return 1;

    // Load model parameters to host memory
    auto conv1_w = read_param(dir + "/conv1.weight.txt");
    auto conv1_b = read_param(dir + "/conv1.bias.txt");
    auto conv2_w = read_param(dir + "/conv2.weight.txt");
    auto conv2_b = read_param(dir + "/conv2.bias.txt");
    auto fc1_w = read_param(dir + "/fc1.weight.txt");
    auto fc1_b = read_param(dir + "/fc1.bias.txt");
    auto fc2_w = read_param(dir + "/fc2.weight.txt");
    auto fc2_b = read_param(dir + "/fc2.bias.txt");
    auto fc3_w = read_param(dir + "/fc3.weight.txt");
    auto fc3_b = read_param(dir + "/fc3.bias.txt");
    
    // --- 1. Allocate all necessary GPU memory ---
    // Device pointers for parameters
    float *d_conv1_w, *d_conv1_b, *d_conv2_w, *d_conv2_b;
    float *d_fc1_w, *d_fc1_b, *d_fc2_w, *d_fc2_b, *d_fc3_w, *d_fc3_b;

    // Allocate parameters
    checkCudaErrors(cudaMalloc(&d_conv1_w, conv1_w.size() * sizeof(float)));
    checkCudaErrors(cudaMalloc(&d_conv1_b, conv1_b.size() * sizeof(float)));
    checkCudaErrors(cudaMalloc(&d_conv2_w, conv2_w.size() * sizeof(float)));
    checkCudaErrors(cudaMalloc(&d_conv2_b, conv2_b.size() * sizeof(float)));
    checkCudaErrors(cudaMalloc(&d_fc1_w,   fc1_w.size() * sizeof(float)));
    checkCudaErrors(cudaMalloc(&d_fc1_b,   fc1_b.size() * sizeof(float)));
    checkCudaErrors(cudaMalloc(&d_fc2_w,   fc2_w.size() * sizeof(float)));
    checkCudaErrors(cudaMalloc(&d_fc2_b,   fc2_b.size() * sizeof(float)));
    checkCudaErrors(cudaMalloc(&d_fc3_w,   fc3_w.size() * sizeof(float)));
    checkCudaErrors(cudaMalloc(&d_fc3_b,   fc3_b.size() * sizeof(float)));

    // --- 2. Copy constant parameters from host to device ---
    checkCudaErrors(cudaMemcpy(d_conv1_w, conv1_w.data(), conv1_w.size() * sizeof(float), cudaMemcpyHostToDevice));
    checkCudaErrors(cudaMemcpy(d_conv1_b, conv1_b.data(), conv1_b.size() * sizeof(float), cudaMemcpyHostToDevice));
    checkCudaErrors(cudaMemcpy(d_conv2_w, conv2_w.data(), conv2_w.size() * sizeof(float), cudaMemcpyHostToDevice));
    checkCudaErrors(cudaMemcpy(d_conv2_b, conv2_b.data(), conv2_b.size() * sizeof(float), cudaMemcpyHostToDevice));
    checkCudaErrors(cudaMemcpy(d_fc1_w, fc1_w.data(), fc1_w.size() * sizeof(float), cudaMemcpyHostToDevice));
    checkCudaErrors(cudaMemcpy(d_fc1_b, fc1_b.data(), fc1_b.size() * sizeof(float), cudaMemcpyHostToDevice));
    checkCudaErrors(cudaMemcpy(d_fc2_w, fc2_w.data(), fc2_w.size() * sizeof(float), cudaMemcpyHostToDevice));
    checkCudaErrors(cudaMemcpy(d_fc2_b, fc2_b.data(), fc2_b.size() * sizeof(float), cudaMemcpyHostToDevice));
    checkCudaErrors(cudaMemcpy(d_fc3_w, fc3_w.data(), fc3_w.size() * sizeof(float), cudaMemcpyHostToDevice));
    checkCudaErrors(cudaMemcpy(d_fc3_b, fc3_b.data(), fc3_b.size() * sizeof(float), cudaMemcpyHostToDevice));

    // Start timer
    auto start = std::chrono::high_resolution_clock::now();
    
// ===================================================================================
// Main Function -  DO NOT MODIFY END
// ===================================================================================

    // --- 3. Perform inference ---
    // Pass device pointers to the inference function
    std::vector<int> predictions = scnn_inference(images,
        d_conv1_w, d_conv1_b, d_conv2_w, d_conv2_b,
        d_fc1_w, d_fc1_b, d_fc2_w, d_fc2_b, d_fc3_w, d_fc3_b
        // YOU CAN ADD MORE PARAMETERS HERE!!!
        );
    
// ===================================================================================
// Main Function -  DO NOT MODIFY BEGIN
// ===================================================================================

    // Synchronize to ensure all GPU work is done before stopping the timer
    checkCudaErrors(cudaDeviceSynchronize());
    
    // Stop timer
    auto end = std::chrono::high_resolution_clock::now();
    std::chrono::duration<double> diff = end - start;
    
    // --- 4. Free all allocated GPU memory ---
    checkCudaErrors(cudaFree(d_conv1_w));
    checkCudaErrors(cudaFree(d_conv1_b));
    checkCudaErrors(cudaFree(d_conv2_w));
    checkCudaErrors(cudaFree(d_conv2_b));
    checkCudaErrors(cudaFree(d_fc1_w));
    checkCudaErrors(cudaFree(d_fc1_b));
    checkCudaErrors(cudaFree(d_fc2_w));
    checkCudaErrors(cudaFree(d_fc2_b));
    checkCudaErrors(cudaFree(d_fc3_w));
    checkCudaErrors(cudaFree(d_fc3_b));
    
    // Calculate accuracy
    int correct_predictions = 0;
    for (size_t i = 0; i < labels.size(); ++i) {
        if (predictions[i] == labels[i]) {
            correct_predictions++;
        }
    }
    double accuracy = static_cast<double>(correct_predictions) / labels.size();
    
    // Output result in the required format
    std::cout << std::fixed << std::setprecision(4) << diff.count() << ":" << accuracy << std::endl;
    
    return 0;
}
// ===================================================================================
// Main Function -  DO NOT MODIFY END
// ===================================================================================