__attribute__((global, cooperative)) void kernel_conv2d(int IC, int IH, int IW, int OC,
                                                        float *__restrict dev_inp, float *__restrict dev_weight, float *__restrict dev_out)
{
    __shared__ float convkernel_l2_buffer[5000];

    // 获取线程的数量
    int thread_num = GetThreadNum();
    // 获取当前线程ID
    int thread_idx = GetThreadIdx();

    // 获取线程的局部索引
    int local_idx = threadIdx.z * (blockDim.x * blockDim.y) + threadIdx.y * blockDim.x + threadIdx.x;

    // 定义与初始化dte
    tops_dte_ctx_t dte_convkernel_inp; // 卷积核输入DTE
    tops_dte_ctx_t dte_inp;            // 数据输入DTE
    tops_dte_ctx_t dte_out;

    __private_dte__ tops_dte_ctx_t dte_l2_convkernel; // 卷积核L2 DTE

    // 定义事件
    tops::event event_convkernel_inp;
    tops::event event_inp;
    tops::event event_out;

    tops::event event_l2_convkernel;

    dte_convkernel_inp.init();
    dte_inp.init();
    dte_out.init();

    dte_l2_convkernel.init();
    // l3缓存形状
    int32_t global_weight_shape[4] = {OC, IC, 3, 3};    // 卷积核形状
    int32_t global_inp_shape[4] = {1, IC, IH, IW};      // 输入形状
    int32_t global_out_shape[3] = {OC, IH - 2, IW - 2}; // 输出形状

    // l2缓存形状
    int32_t shared_weight_shape[4] = {1, IC, 3, 3}; // 共享卷积核形状

    int32_t private_weight_shape[4] = {1, IC, 3, 3};    // 私有卷积核形状
    int32_t private_inp_shape[4] = {1, 1, IH, IW};      // 私有输入形状
    int32_t private_out_shape[3] = {1, IH - 2, IW - 2}; // 私有输出形状

    // L3内存绑定
    tops::mdspan global_weight(tops::Global, dev_weight, global_weight_shape);
    tops::mdspan global_inp(tops::Global, dev_inp, global_inp_shape);
    tops::mdspan global_out(tops::Global, dev_out, global_out_shape);

    // L2内存绑定
    tops::mdspan shared_weight(tops::Shared, convkernel_l2_buffer + local_idx * (IC * 3 * 3), shared_weight_shape);

    // 分配l1缓存
    __valigned__ float convkernel_l1_buffer[1 * IC * 3 * 3];
    __valigned__ float inp_l1_buffer[1 * 1 * IH * IW];
    __valigned__ float out_l1_buffer[1 * (IH - 2) * (IW - 2)];

    // 全量化配置DTE
    dte_convkernel_inp.config_slice(tops::mdspan(tops::Private, convkernel_l1_buffer, private_weight_shape), shared_weight, {0, 0, 0, 0});
    dte_inp.config_slice(tops::mdspan(tops::Private, inp_l1_buffer, private_inp_shape), global_inp, {0, 0, 0, 0});
    dte_out.config_deslice(global_out, tops::mdspan(tops::Private, out_l1_buffer, private_out_shape), {thread_idx, 0, 0});

    dte_l2_convkernel.config_slice(shared_weight, global_weight, {thread_idx, 0, 0, 0});

    // 分配循环
    auto oc_loops = OC;
    auto nr_working_threads = oc_loops <= thread_num ? oc_loops : thread_num;

    // 结束多余的工作线程
    if (thread_idx >= nr_working_threads)
    {
        return;
    }

    // 计算当前线程需要处理的OC数
    auto base_oc_loops = oc_loops / nr_working_threads;
    auto thread_oc_loops = base_oc_loops + ((base_oc_loops * nr_working_threads + thread_idx) < oc_loops ? 1 : 0);

    // DTE绑定
    dte_l2_convkernel.connect(dte_convkernel_inp);

    // 卷积计算
    for (auto i = 0; i < thread_oc_loops; i++)
    {

        for (auto i = 0; i < (IH - 2) * (IW - 2); i++)
        {
            out_l1_buffer[i] = 0;
        }
        // 计算oc的全局索引
        auto global_oc_index = i * nr_working_threads + thread_idx;

        dte_l2_convkernel.set_src_offset(0, global_oc_index);

        event_l2_convkernel = dte_l2_convkernel.trigger();
        event_convkernel_inp = dte_convkernel_inp.trigger();

        tops::wait(event_convkernel_inp);

        for (auto j = 0; j < IC; j++)
        {

            auto global_ic_index = j;

            dte_inp.set_src_offset(1, global_ic_index);

            event_inp = dte_inp.trigger();

            tops::wait(event_inp);

            for (auto k = 0; k < IH - 2; k++)
            {

                auto global_ih_index = k;

                for (auto g = 0; g < IW - 2; g++)
                {

                    auto global_iw_index = g;

                    auto x1 = inp_l1_buffer[global_ih_index * IW + global_iw_index];
                    auto x2 = inp_l1_buffer[global_ih_index * IW + global_iw_index + 1];
                    auto x3 = inp_l1_buffer[global_ih_index * IW + global_iw_index + 2];
                    auto x4 = inp_l1_buffer[(global_ih_index + 1) * IW + global_iw_index];
                    auto x5 = inp_l1_buffer[(global_ih_index + 1) * IW + global_iw_index + 1];
                    auto x6 = inp_l1_buffer[(global_ih_index + 1) * IW + global_iw_index + 2];
                    auto x7 = inp_l1_buffer[(global_ih_index + 2) * IW + global_iw_index];
                    auto x8 = inp_l1_buffer[(global_ih_index + 2) * IW + global_iw_index + 1];
                    auto x9 = inp_l1_buffer[(global_ih_index + 2) * IW + global_iw_index + 2];

                    auto w1 = convkernel_l1_buffer[global_ic_index * 9 + 0];
                    auto w2 = convkernel_l1_buffer[global_ic_index * 9 + 1];
                    auto w3 = convkernel_l1_buffer[global_ic_index * 9 + 2];
                    auto w4 = convkernel_l1_buffer[global_ic_index * 9 + 3];
                    auto w5 = convkernel_l1_buffer[global_ic_index * 9 + 4];
                    auto w6 = convkernel_l1_buffer[global_ic_index * 9 + 5];
                    auto w7 = convkernel_l1_buffer[global_ic_index * 9 + 6];
                    auto w8 = convkernel_l1_buffer[global_ic_index * 9 + 7];
                    auto w9 = convkernel_l1_buffer[global_ic_index * 9 + 8];

                    out_l1_buffer[global_ih_index * (IW - 2) + global_iw_index] += w1 * x1 + w2 * x2 + w3 * x3 + w4 * x4 + w5 * x5 + w6 * x6 + w7 * x7 + w8 * x8 + w9 * x9;
                }
            }
        }

        dte_out.set_dst_offset(0, global_oc_index);
        event_out = dte_out.trigger();
        tops::wait(event_out);
    }

    // 销毁DTE
    dte_convkernel_inp.destroy(); // 卷积核输入
    dte_inp.destroy();            // 数据输入
    dte_out.destroy();            // 数据输出

    dte_l2_convkernel.destroy(); // 卷积核L2
}

void GCU_Conv2D(int IC, int IH, int IW, int OC, float *__restrict dev_inp, float *__restrict dev_weight, float *__restrict dev_out)
{

    static const size_t blocks = 2;
    static const size_t threads = 12;
    // 调用kernel
    kernel_conv2d<<<blocks, threads>>>(IC, IH, IW, OC, dev_inp, dev_weight, dev_out);
}
