__attribute__((global, cooperative)) void kernel_conv2d(int IC, int IH, int IW, int OC,
                                                        float *__restrict dev_inp, float *__restrict dev_weight, float *__restrict dev_out)
{
    // __shared__ float convkernel_l2_buffer[5000];

    // 获取线程的数量
    int thread_num = GetThreadNum();
    // 获取当前线程ID
    int thread_idx = GetThreadIdx();

    // 获取线程的局部索引
    int local_idx = threadIdx.z * (blockDim.x * blockDim.y) + threadIdx.y * blockDim.x + threadIdx.x;

    // 定义与初始化DTE
    tops_dte_ctx_t dte_convkernel_inp; // 卷积核输入DTE
    tops_dte_ctx_t dte_inp;
    tops_dte_ctx_t dte_out; // 数据输入DTE

    dte_convkernel_inp.init();
    dte_inp.init();
    dte_out.init();

    // 定义事件
    tops::event event_convkernel_inp;
    tops::event event_inp;
    tops::event event_out;

    // l3缓存形状
    int32_t global_weight_shape[4] = {OC, IC, 3, 3};    // 卷积核形状
    int32_t global_inp_shape[4] = {1, IC, IH, IW};      // 输入形状
    int32_t global_out_shape[4] = {1, OC, (IH - 2), (IW - 2)}; // 输出形状

    // l1缓存形状
    int32_t private_weight_shape[4] = {1, IC, 3, 3};    // 私有卷积核形状
    int32_t private_inp_shape[4] = {1, 1, IH, IW};      // 私有输入形状
    int32_t private_out_shape[4] = {1, 1, (IH - 2), (IW - 2)}; // 私有输出形状

    // L3内存绑定
    tops::mdspan global_weight(tops::Global, dev_weight, global_weight_shape);
    tops::mdspan global_inp(tops::Global, dev_inp, global_inp_shape);
    tops::mdspan global_out(tops::Global, dev_out, global_out_shape);

    // 分配l1缓存
    __valigned__ float convkernel_l1_buffer[1 * IC * 3 * 3];
    __valigned__ float l1_buffer_inp[1 * 1 * IH * IW];
    __valigned__ float l1_buffer_out[1 * 1 * (IH - 2) * (IW - 2)];

    tops::mdspan private_weight(tops::Private, convkernel_l1_buffer, private_weight_shape);
    tops::mdspan private_inp(tops::Private, l1_buffer_inp, private_inp_shape);
    tops::mdspan private_out(tops::Private, l1_buffer_out, private_out_shape);

    // 分配循环
    int oc_loops = OC;
    int nr_working_threads = (oc_loops <= thread_num) ? oc_loops : thread_num;

    // 结束多余的工作线程
    if (thread_idx >= nr_working_threads)
    {
        return;
    }

    // 计算当前线程需要处理的OC数
    int base_oc_loops = oc_loops / nr_working_threads;
    int thread_oc_loops = base_oc_loops + ((base_oc_loops * nr_working_threads + thread_idx) < oc_loops ? 1 : 0);

    for(auto i = 0; i < thread_oc_loops; i++)
    {
        // 计算oc的全局索引
        int global_oc_idx = i * nr_working_threads + thread_idx;

        event_convkernel_inp = tops::slice_async(dte_convkernel_inp, private_weight, global_weight, {global_oc_idx, 0, 0, 0});
        tops::wait(event_convkernel_inp);

        for(auto j = 0; j < IC; j++)
        {
            int global_ic_idx = j;
            
            event_inp = tops::slice_async(dte_inp, private_inp, global_inp, {0, global_ic_idx, 0, 0});
            tops::wait(event_inp);

            for (auto k = 0; k < IH - 2; k++)
            {
                int global_ih_idx = k;

                for (auto g = 0; g < IW - 2; g += 32)
                {
                    int global_iw_idx = g;
                    int local_iw_index = global_ih_idx * IW + global_iw_idx;

                    const auto &v1 = tops::vload<vfloat>(l1_buffer_inp + local_iw_index);
                    const auto &v2 = tops::vload<vfloat>(l1_buffer_inp + local_iw_index + 1);
                    const auto &v3 = tops::vload<vfloat>(l1_buffer_inp + local_iw_index + 2);

                    const auto &v4 = tops::vload<vfloat>(l1_buffer_inp + local_iw_index + IW);
                    const auto &v5 = tops::vload<vfloat>(l1_buffer_inp + local_iw_index + IW + 1);
                    const auto &v6 = tops::vload<vfloat>(l1_buffer_inp + local_iw_index + IW + 2);

                    const auto &v7 = tops::vload<vfloat>(l1_buffer_inp + local_iw_index + 2 * IW);
                    const auto &v8 = tops::vload<vfloat>(l1_buffer_inp + local_iw_index + 2 * IW + 1);
                    const auto &v9 = tops::vload<vfloat>(l1_buffer_inp + local_iw_index + 2 * IW + 2);

                    const auto &w1 = tops::vload<vfloat>(convkernel_l1_buffer + 9 * global_ic_idx + 0);
                    const auto &w2 = tops::vload<vfloat>(convkernel_l1_buffer + 9 * global_ic_idx + 1);
                    const auto &w3 = tops::vload<vfloat>(convkernel_l1_buffer + 9 * global_ic_idx + 2);
                    const auto &w4 = tops::vload<vfloat>(convkernel_l1_buffer + 9 * global_ic_idx + 3);
                    const auto &w5 = tops::vload<vfloat>(convkernel_l1_buffer + 9 * global_ic_idx + 4);
                    const auto &w6 = tops::vload<vfloat>(convkernel_l1_buffer + 9 * global_ic_idx + 5);
                    const auto &w7 = tops::vload<vfloat>(convkernel_l1_buffer + 9 * global_ic_idx + 6);
                    const auto &w8 = tops::vload<vfloat>(convkernel_l1_buffer + 9 * global_ic_idx + 7);
                    const auto &w9 = tops::vload<vfloat>(convkernel_l1_buffer + 9 * global_ic_idx + 8);

                    const auto &vw1 = tops::vmul<vfloat>(v1, w1);
                    const auto &vw2 = tops::vmul<vfloat>(v2, w2);
                    const auto &vw3 = tops::vmul<vfloat>(v3, w3);
                    const auto &vw4 = tops::vmul<vfloat>(v4, w4);
                    const auto &vw5 = tops::vmul<vfloat>(v5, w5);
                    const auto &vw6 = tops::vmul<vfloat>(v6, w6);
                    const auto &vw7 = tops::vmul<vfloat>(v7, w7);
                    const auto &vw8 = tops::vmul<vfloat>(v8, w8);
                    const auto &vw9 = tops::vmul<vfloat>(v9, w9);

                    const auto &vsum1 = tops::vadd<vfloat>(vw1, vw2);
                    const auto &vsum2 = tops::vadd<vfloat>(vw3, vw4);
                    const auto &vsum3 = tops::vadd<vfloat>(vw5, vw6);
                    const auto &vsum4 = tops::vadd<vfloat>(vw7, vw8);
                    
                    const auto &vsum5 = tops::vadd<vfloat>(vsum1, vsum2);
                    const auto &vsum6 = tops::vadd<vfloat>(vsum3, vsum4);

                    const auto &vsum7 = tops::vadd<vfloat>(vsum5, vsum6);

                    const auto &vsum = tops::vadd<vfloat>(vsum7, vw9);

                    tops::vstore(vsum, l1_buffer_out + global_ih_idx * (IW - 2) + global_iw_idx);
                }
            }
        }

    }

    // 销毁DTE
    dte_convkernel_inp.destroy();
    dte_inp.destroy();
    dte_out.destroy();
}

void GCU_Conv2D(int IC, int IH, int IW, int OC, float *__restrict dev_inp, float *__restrict dev_weight, float *__restrict dev_out)
{

    static const size_t blocks = 2;
    static const size_t threads = 12;
    // 调用kernel
    kernel_conv2d<<<blocks, threads>>>(IC, IH, IW, OC, dev_inp, dev_weight, dev_out);
}
