import hidet
from hidet.ir.type import tensor_type
from hidet.lang import tensor, grid, as_tensor_pointer
from hidet.lang.layout import row_major, column_major
from hidet.lang.cpu import (
    avx_f32x8_store,
    avx_f32x8_fmadd,
    avx_f32x8_load,
    avx_f32x8_broadcast,
)
from hidet.lang.cpu import (
    avx_f32x8_store_aligned,
    avx_f32x8_load_aligned,
    avx_f32x8_setzero,
)
from hidet.lang.cpu import avx_f32x8_unpacklo, avx_f32x8_unpackhi
from hidet.lang.cpu import avx_f32x8_shuffle, avx_f32x8_cast_f32x4
from hidet.lang.cpu import avx_f32x8_insert_f32x4, avx_f32x8_permute2f32x4
from hidet.lang.cpu import cpu_atomic_load_n, cpu_atomic_add_fetch, cpu_atomic_fetch_xor

from typing import List, Union
from hidet.ir.dtypes import float32, int32
from hidet.ir.expr import cast
from hidet.ir.module import IRModule
from hidet.ir.compute import TensorNode
from hidet.ir.stmt import DeclareScope
from hidet.ir.task import Task
from hidet.ir.compute import compute, reduce
from hidet.graph.ops.utils import input_like, broadcast_shape, can_mutually_broadcast
from hidet.ir.library import tune
from hidet.graph.operator import Operator, Tensor
from hidet.graph.ops.utils import broadcast_indices
from hidet.lang import attrs

from hidet.utils.benchmark import benchmark_func

hidet.option.cache_dir("./outs/cache")

MR, NR = 6, 16
MC = 2016
NC = 384
KC = 560
ways = (1, 4, 2, 1)
m_size = 2048
n_size = 2048
k_size = 2048

tune.check(
    MC % MR == NC % NR == 0, "Tile size must divide the corresponding block size"
)

with hidet.script_module() as module:
    # Get the number of threads...
    loop5_nways, loop3_nways, macro_nways, loop1_nways = ways
    nthreads = loop5_nways * loop3_nways * macro_nways * loop1_nways

    packa_thrcomm_barrier_sense = module.define_global_var(
        name="pack_a_barrier_sense", var_type=int32[nthreads]
    )
    packa_thrcomm_threads_arrived = module.define_global_var(
        name="pack_a_threads_arrived", var_type=int32[nthreads]
    )

    packb_thrcomm_barrier_sense = module.define_global_var(
        name="pack_b_barrier_sense", var_type=int32[nthreads]
    )
    packb_thrcomm_barrier_threads_arrived = module.define_global_var(
        name="pack_b_threads_arrived", var_type=int32[nthreads]
    )

    @hidet.script
    def init_thr(sense: ~int32, arrived: ~int32, size: int32):
        attrs.func_kind = "cpu_internal"
        for i in range(size):
            sense[i] = 0
            arrived[i] = 0

    # Helpers
    packed_a_type = tensor_type(
        "float32", layout=row_major(MC // MR, 1) * column_major(MR, KC)
    )
    packed_b_type = tensor_type(
        "float32", layout=row_major(1, NC // NR) * row_major(KC, NR)
    )

    # Get the number of threads remaining at each level
    loop4_nthreads = nthreads // loop5_nways
    loop3_nthreads = loop4_nthreads
    macro_nthreads = loop3_nthreads // loop3_nways
    loop1_nthreads = macro_nthreads // macro_nways

    packb_nthreads = loop3_nthreads
    packa_nthreads = macro_nthreads

    packed_a_buffers_needed = loop3_nways * loop5_nways

    @hidet.script
    def thread_range_sub(n_way: int32, work_id: int32, n: int32, bf: int32, start: ~int32, end: ~int32):  # type: ignore
        attrs.func_kind = "cpu_internal"
        if n_way == 1:
            start[0] = 0
            end[0] = n
            return
        all_start = 0
        all_end = n
        size = all_end - all_start

        n_bf_whole = size // bf
        n_bf_left = size % bf

        n_bf_lo = n_bf_whole // n_way
        n_bf_hi = n_bf_whole // n_way

        n_th_lo = n_bf_whole % n_way
        # If some partitions must have more block_factors than others,
        # assign the slightly larger partitions to lower index threads
        if n_th_lo != 0:
            n_bf_lo += 1
        # Compute the actual widths (in units of rows/columns) of individual threads in the low and high groups
        size_lo = n_bf_lo * bf
        size_hi = n_bf_hi * bf

        # Pre-compute the starting indices of the low and high groups
        lo_start = all_start
        hi_start = all_start + n_th_lo * size_lo

        # Compute the start and end of individual threads' ranges
        if work_id < n_th_lo:
            start[0] = lo_start + work_id * size_lo
            end[0] = lo_start + (work_id + 1) * size_lo
        else:
            start[0] = hi_start + (work_id - n_th_lo) * size_hi
            end[0] = hi_start + (work_id - n_th_lo + 1) * size_hi

            # Add the remainder to the last thread's end
            if work_id == n_way - 1:
                end[0] += n_bf_left
                end[0] = min(end[0], all_end)

    @hidet.script
    def thread_range_jrir(
        work_id: int32, n_way: int32, n: int32, bf: int32, start: ~int32, end: ~int32, inc: ~int32  # type: ignore
    ):
        attrs.func_kind = "cpu_internal"
        start[0] = work_id
        end[0] = n
        inc[0] = n_way

    @hidet.script
    def determine_blocksize_f_sub(i: int32, dim: int32, b_alg: int32) -> int32:  # type: ignore
        attrs.func_kind = "cpu_internal"
        dim_left_now = dim - i
        b_now = -1
        if dim_left_now <= b_alg:
            b_now = dim_left_now
        else:
            b_now = b_alg
        assert b_now >= 0
        return b_now

    @hidet.script
    def not_edge(i: int32, n_iter: int32, n_left: int32) -> bool:  # type: ignore
        attrs.func_kind = "cpu_internal"
        return i != n_iter - 1 or n_left == 0

    @hidet.script
    def packa_index(work_id_loop5: int32, work_id_loop3: int32) -> int32:  # type: ignore
        attrs.func_kind = "cpu_internal"
        return work_id_loop5 * loop3_nways + work_id_loop3

    # Thread barrier
    @hidet.script
    def thrcomm_barrier(barrier_sense: ~int32, barrier_threads_arrived: ~int32, num_threads: int32):  # type: ignore
        attrs.func_kind = "cpu_internal"
        if num_threads == 1:
            return
        orig_sense = cpu_atomic_load_n(barrier_sense, 0)  # _ATOMIC_RELAXED

        # Register the current thread's arrival by incrementing the counter
        my_threads_arrived = cpu_atomic_add_fetch(
            barrier_threads_arrived, 1, 4
        )  # _ATOMIC_ACQ_REL

        if my_threads_arrived == num_threads:
            barrier_threads_arrived[0] = 0
            cpu_atomic_fetch_xor(barrier_sense, 1, 3)  # _ATOMIC_RELEASE
        else:
            while cpu_atomic_load_n(barrier_sense, 2) == orig_sense:  # _ATOMIC_ACQUIRE
                pass

    @hidet.script
    def micro_kernel(
        a: packed_a_type,
        b: packed_b_type,  # type: ignore
        c_ptr: ~float32,
        pb: int32,
        msize: int32,
        nsize: int32,
        is_first: bool,
    ):
        attrs.func_kind = "cpu_internal"
        c = as_tensor_pointer(c_ptr, dtype=float32, shape=[msize, nsize])
        c0 = avx_f32x8_load(~c[0, 0])
        c08 = avx_f32x8_load(~c[0, 8])
        c1 = avx_f32x8_load(~c[1, 0])
        c18 = avx_f32x8_load(~c[1, 8])
        c2 = avx_f32x8_load(~c[2, 0])
        c28 = avx_f32x8_load(~c[2, 8])
        c3 = avx_f32x8_load(~c[3, 0])
        c38 = avx_f32x8_load(~c[3, 8])
        c4 = avx_f32x8_load(~c[4, 0])
        c48 = avx_f32x8_load(~c[4, 8])
        c5 = avx_f32x8_load(~c[5, 0])
        c58 = avx_f32x8_load(~c[5, 8])

        if is_first:
            c0 = avx_f32x8_setzero()
            c08 = avx_f32x8_setzero()
            c1 = avx_f32x8_setzero()
            c18 = avx_f32x8_setzero()
            c2 = avx_f32x8_setzero()
            c28 = avx_f32x8_setzero()
            c3 = avx_f32x8_setzero()
            c38 = avx_f32x8_setzero()
            c4 = avx_f32x8_setzero()
            c48 = avx_f32x8_setzero()
            c5 = avx_f32x8_setzero()
            c58 = avx_f32x8_setzero()
        a_ptr = cast(a, ~float32)
        b_ptr = cast(b, ~float32)

        for _ in range(pb):
            bb0to7 = avx_f32x8_load_aligned(b_ptr)
            bb8to15 = avx_f32x8_load_aligned(b_ptr + 8)

            aa1 = avx_f32x8_broadcast(a_ptr)
            c0 = avx_f32x8_fmadd(aa1, bb0to7, c0)
            c08 = avx_f32x8_fmadd(aa1, bb8to15, c08)

            aa2 = avx_f32x8_broadcast(a_ptr + 1)
            c1 = avx_f32x8_fmadd(aa2, bb0to7, c1)
            c18 = avx_f32x8_fmadd(aa2, bb8to15, c18)

            aa3 = avx_f32x8_broadcast(a_ptr + 2)
            c2 = avx_f32x8_fmadd(aa3, bb0to7, c2)
            c28 = avx_f32x8_fmadd(aa3, bb8to15, c28)

            aa4 = avx_f32x8_broadcast(a_ptr + 3)
            c3 = avx_f32x8_fmadd(aa4, bb0to7, c3)
            c38 = avx_f32x8_fmadd(aa4, bb8to15, c38)

            aa5 = avx_f32x8_broadcast(a_ptr + 4)
            c4 = avx_f32x8_fmadd(aa5, bb0to7, c4)
            c48 = avx_f32x8_fmadd(aa5, bb8to15, c48)

            aa6 = avx_f32x8_broadcast(a_ptr + 5)
            c5 = avx_f32x8_fmadd(aa6, bb0to7, c5)
            c58 = avx_f32x8_fmadd(aa6, bb8to15, c58)

            a_ptr = a_ptr + 6
            b_ptr = b_ptr + 16

        # Store the results
        avx_f32x8_store(c_ptr, c0)
        avx_f32x8_store(c_ptr + 8, c08)

        avx_f32x8_store(c_ptr + nsize, c1)
        avx_f32x8_store(c_ptr + (nsize + 8), c18)

        avx_f32x8_store(c_ptr + 2 * nsize, c2)
        avx_f32x8_store(c_ptr + (2 * nsize + 8), c28)

        avx_f32x8_store(c_ptr + 3 * nsize, c3)
        avx_f32x8_store(c_ptr + (3 * nsize + 8), c38)

        avx_f32x8_store(c_ptr + 4 * nsize, c4)
        avx_f32x8_store(c_ptr + (4 * nsize + 8), c48)

        avx_f32x8_store(c_ptr + 5 * nsize, c5)
        avx_f32x8_store(c_ptr + (5 * nsize + 8), c58)

    #### Some setup code ####
    packed_b_height = min(KC, k_size)
    packed_b_width = min(NC, (n_size + NR - 1) // NR * NR)

    packed_b_total_width = packed_b_width * loop5_nways
    packed_b_total_size = packed_b_total_width * packed_b_height
    packed_b_individual_size = packed_b_width * packed_b_height

    packed_a_individual_height = min(MC, (m_size + MR - 1) // MR * MR)
    packed_a_total_height = packed_a_individual_height * packed_a_buffers_needed

    packed_a_width = min(KC, (k_size + 8 - 1) // 8 * 8)

    packed_a_total_size = packed_a_total_height * packed_a_width
    packed_a_individual_size = packed_a_width * packed_a_individual_height

    packb_buf_ptr = module.define_global_var(
        name="packb_buf_ptr", var_type=float32[packed_b_total_size]
    )
    packa_buf_ptr = module.define_global_var(
        name="packa_buf_ptr", var_type=float32[packed_a_total_size]
    )

    packb_buf = cast(packb_buf_ptr, ~float32)
    packa_buf = cast(packa_buf_ptr, ~float32)

    ##### Start of the loops around micro kernel #####

    @hidet.script
    def gemm_pack_a(
        loop3_partition_a: ~float32,
        loop3_partition_a_width: int32,
        loop3_partition_a_height: int32,
        packed_a_buf: ~float32,
        work_id_packa: int32,
    ):
        attrs.func_kind = "cpu_internal"
        packed_a_tensor = as_tensor_pointer(
            packed_a_buf,
            float32,
            layout=row_major(packed_a_individual_height // MR, 1)
            * column_major(MR, packed_a_width),
        )

        npanels_full_a = loop3_partition_a_height // MR
        panel_a_remainder = loop3_partition_a_height % MR

        npanels_a = npanels_full_a + (1 if panel_a_remainder > 0 else 0)

        for ii_panel in range(npanels_a):
            if ii_panel % packa_nthreads != work_id_packa % packa_nthreads:
                continue

            a_curr_panel_row_start = ii_panel * MR
            a_curr_panel_height = min(
                MR, loop3_partition_a_height - a_curr_panel_row_start
            )

            if a_curr_panel_height == MR:  # unroll the packing by 8
                k_iters = loop3_partition_a_width // 8
                k_remainder = loop3_partition_a_width % 8
                col = 0
                for k_iter in range(k_iters):
                    col = k_iter * 8
                    a_curr_panel_col = loop3_partition_a + (
                        a_curr_panel_row_start * k_size + col
                    )

                    v0 = avx_f32x8_load(a_curr_panel_col)
                    v1 = avx_f32x8_load(a_curr_panel_col + k_size)
                    v2 = avx_f32x8_load(a_curr_panel_col + (2 * k_size))
                    v3 = avx_f32x8_load(a_curr_panel_col + (3 * k_size))
                    v4 = avx_f32x8_load(a_curr_panel_col + (4 * k_size))
                    v5 = avx_f32x8_load(a_curr_panel_col + (5 * k_size))

                    unpack0 = avx_f32x8_unpacklo(v0, v1)
                    unpack1 = avx_f32x8_unpackhi(v0, v1)
                    unpack2 = avx_f32x8_unpacklo(v2, v3)
                    unpack3 = avx_f32x8_unpackhi(v2, v3)
                    unpack4 = avx_f32x8_unpacklo(v4, v5)
                    unpack5 = avx_f32x8_unpackhi(v4, v5)

                    shf0 = avx_f32x8_shuffle(unpack0, unpack2, 0x44)
                    shf1 = avx_f32x8_shuffle(unpack4, unpack0, 0xE4)
                    shf2 = avx_f32x8_shuffle(unpack2, unpack4, 0xEE)
                    shf3 = avx_f32x8_shuffle(unpack5, unpack1, 0xE4)
                    shf4 = avx_f32x8_shuffle(unpack3, unpack5, 0xEE)
                    shf5 = avx_f32x8_shuffle(unpack1, unpack3, 0x44)

                    low_shf1 = avx_f32x8_cast_f32x4(shf1)
                    res0 = avx_f32x8_insert_f32x4(shf0, low_shf1, 0x1)
                    res1 = avx_f32x8_permute2f32x4(shf0, shf1, 0x31)

                    low_shf5 = avx_f32x8_cast_f32x4(shf5)
                    res2 = avx_f32x8_insert_f32x4(shf2, low_shf5, 0x1)
                    res3 = avx_f32x8_permute2f32x4(shf2, shf5, 0x31)

                    low_shf4 = avx_f32x8_cast_f32x4(shf4)
                    res4 = avx_f32x8_insert_f32x4(shf3, low_shf4, 0x1)
                    res5 = avx_f32x8_permute2f32x4(shf3, shf4, 0x31)

                    avx_f32x8_store_aligned(
                        ~packed_a_tensor[a_curr_panel_row_start, col], res0
                    )
                    avx_f32x8_store_aligned(
                        ~packed_a_tensor[a_curr_panel_row_start + 2, col + 1], res2
                    )
                    avx_f32x8_store_aligned(
                        ~packed_a_tensor[a_curr_panel_row_start + 4, col + 2], res4
                    )
                    avx_f32x8_store_aligned(
                        ~packed_a_tensor[a_curr_panel_row_start, col + 4], res1
                    )
                    avx_f32x8_store_aligned(
                        ~packed_a_tensor[a_curr_panel_row_start + 2, col + 5], res3
                    )
                    avx_f32x8_store_aligned(
                        ~packed_a_tensor[a_curr_panel_row_start + 4, col + 6], res5
                    )
                remaining_start_col = k_iters * 8
                for remain_off in range(k_remainder):
                    curr_remain_col = remaining_start_col + remain_off
                    for micropanel_row in range(MR):
                        packed_a_tensor[
                            a_curr_panel_row_start + micropanel_row, curr_remain_col
                        ] = loop3_partition_a[
                            (micropanel_row + a_curr_panel_row_start) * k_size
                            + curr_remain_col
                        ]
            else:
                remain_start_row = npanels_full_a * MR
                for remain_col in range(loop3_partition_a_width):
                    for remain_row in range(panel_a_remainder):
                        packed_a_tensor[remain_start_row + remain_row, remain_col] = (
                            loop3_partition_a[
                                (remain_row + remain_start_row) * k_size + remain_col
                            ]
                        )
                    remain_row = panel_a_remainder
                    while remain_row < MR:
                        packed_a_tensor[remain_start_row + remain_row, remain_col] = 0.0
                        remain_row += 1

    @hidet.script
    def gemm_pack_b(
        loop4_partition_b: ~float32,
        loop4_partition_b_width: int32,
        loop4_partition_b_height: int32,
        packed_b_buf: ~float32,
        work_id_packb: int32,
    ):
        attrs.func_kind = "cpu_internal"
        npanels_full_b = loop4_partition_b_width // NR
        npanels_b_remainder = loop4_partition_b_width % NR

        npanels_b = npanels_full_b + (1 if npanels_b_remainder != 0 else 0)
        packedb_panel_stride = packed_b_height * NR

        # Loop for the packing of B
        for i_panel in range(npanels_b):
            if i_panel % packb_nthreads != work_id_packb % packb_nthreads:
                continue
            packed_b_buff_curr = packed_b_buf + (i_panel * packedb_panel_stride)
            curr_panel_start = i_panel * NR
            curr_panel_width = min(NR, loop4_partition_b_width - curr_panel_start)

            if curr_panel_width == NR:
                k_iters = loop4_partition_b_height // 8
                k_remainder = loop4_partition_b_height % 8

                row = 0
                for k_iter in range(k_iters):
                    row = k_iter * 8
                    b_panel = loop4_partition_b + (row * n_size + curr_panel_start)
                    b00 = avx_f32x8_load(b_panel)
                    b08 = avx_f32x8_load(b_panel + 8)

                    avx_f32x8_store_aligned(packed_b_buff_curr, b00)
                    avx_f32x8_store_aligned(packed_b_buff_curr + 8, b08)
                    packed_b_buff_curr += 16

                    b10 = avx_f32x8_load(b_panel + n_size)
                    b18 = avx_f32x8_load(b_panel + (n_size + 8))

                    avx_f32x8_store_aligned(packed_b_buff_curr, b10)
                    avx_f32x8_store_aligned(packed_b_buff_curr + 8, b18)
                    packed_b_buff_curr += 16

                    b20 = avx_f32x8_load(b_panel + (2 * n_size))
                    b28 = avx_f32x8_load(b_panel + (2 * n_size + 8))

                    avx_f32x8_store_aligned(packed_b_buff_curr, b20)
                    avx_f32x8_store_aligned(packed_b_buff_curr + 8, b28)
                    packed_b_buff_curr += 16

                    b30 = avx_f32x8_load(b_panel + (3 * n_size))
                    b38 = avx_f32x8_load(b_panel + (3 * n_size + 8))

                    avx_f32x8_store_aligned(packed_b_buff_curr, b30)
                    avx_f32x8_store_aligned(packed_b_buff_curr + 8, b38)
                    packed_b_buff_curr += 16

                    b40 = avx_f32x8_load(b_panel + (4 * n_size))
                    b48 = avx_f32x8_load(b_panel + (4 * n_size + 8))

                    avx_f32x8_store_aligned(packed_b_buff_curr, b40)
                    avx_f32x8_store_aligned(packed_b_buff_curr + 8, b48)
                    packed_b_buff_curr += 16

                    b50 = avx_f32x8_load(b_panel + (5 * n_size))
                    b58 = avx_f32x8_load(b_panel + (5 * n_size + 8))

                    avx_f32x8_store_aligned(packed_b_buff_curr, b50)
                    avx_f32x8_store_aligned(packed_b_buff_curr + 8, b58)
                    packed_b_buff_curr += 16

                    b60 = avx_f32x8_load(b_panel + (6 * n_size))
                    b68 = avx_f32x8_load(b_panel + (6 * n_size + 8))

                    avx_f32x8_store_aligned(packed_b_buff_curr, b60)
                    avx_f32x8_store_aligned(packed_b_buff_curr + 8, b68)
                    packed_b_buff_curr += 16

                    b70 = avx_f32x8_load(b_panel + (7 * n_size))
                    b78 = avx_f32x8_load(b_panel + (7 * n_size + 8))

                    avx_f32x8_store_aligned(packed_b_buff_curr, b70)
                    avx_f32x8_store_aligned(packed_b_buff_curr + 8, b78)

                    packed_b_buff_curr += 16

                row = k_iters * 8
                for _ in range(k_remainder):
                    b_panel = loop4_partition_b + (row * n_size + curr_panel_start)
                    b00 = avx_f32x8_load(b_panel)
                    b08 = avx_f32x8_load(b_panel + 8)
                    avx_f32x8_store_aligned(packed_b_buff_curr, b00)
                    avx_f32x8_store_aligned(packed_b_buff_curr + 8, b08)
                    packed_b_buff_curr += 16
                    row += 1

            else:
                packed_b_remaining_buf = packed_b_buf + (
                    npanels_full_b * packedb_panel_stride
                )
                if npanels_b_remainder > 0:
                    remain_col_start = npanels_full_b * NR
                    for remain_row in range(loop4_partition_b_height):
                        packed_b_remaining_buf_curr = packed_b_remaining_buf + (
                            remain_row * NR
                        )
                        for remain_col in range(npanels_b_remainder):
                            packed_b_remaining_buf_curr[0] = loop4_partition_b[
                                (remain_row * n_size) + (remain_col_start + remain_col)
                            ]
                            packed_b_remaining_buf_curr += 1
                        zero_fill_col = npanels_b_remainder
                        while zero_fill_col < NR:
                            packed_b_remaining_buf_curr[0] = 0.0
                            packed_b_remaining_buf_curr += 1
                            zero_fill_col += 1

    @hidet.script
    def gemm_macro(
        packed_a: ~float32,
        packed_b: ~float32,
        c: float32[m_size, n_size],
        c_row_off: int32,
        c_col_off: int32,
        macro_m: int32,
        macro_n: int32,
        macro_k: int32,
        ps_packed_a: int32,
        ps_packed_b: int32,
        comm_id_macro: int32,
        work_id_macro: int32,
        is_first: bool,
    ):
        attrs.func_kind = "cpu_internal"
        comm_id_1st_loop = comm_id_macro % loop1_nthreads
        work_id_1st_loop = comm_id_1st_loop // (loop1_nthreads // loop1_nways)

        n_iter = macro_n // NR
        n_remainder = macro_n % NR
        m_iter = macro_m // MR
        m_remainder = macro_m % MR

        if n_remainder > 0:
            n_iter += 1
        if m_remainder > 0:
            m_iter += 1

        jr_start = -1
        jr_end = -1
        ir_start = -1
        ir_end = -1
        jr_inc = -1
        ir_inc = -1

        thread_range_jrir(
            work_id_macro, macro_nways, n_iter, 1, ~jr_start, ~jr_end, ~jr_inc
        )

        thread_range_jrir(
            work_id_1st_loop, loop1_nways, m_iter, 1, ~ir_start, ~ir_end, ~ir_inc
        )

        rstep_a = ps_packed_a
        cstep_b = ps_packed_b

        cstep_c = NR
        rstep_c = n_size * MR

        macro_c_cast = as_tensor_pointer(
            ~c[c_row_off, c_col_off], dtype=float32, shape=(m_size, n_size)
        )
        temp_c = tensor(
            scope=DeclareScope.Default,
            dtype=float32,
            layout=row_major(MR, NR),
            is_static=False,
        )
        j = jr_start
        while j < jr_end:
            b1 = packed_b + j * cstep_b
            c1 = macro_c_cast + j * cstep_c
            n_cur = NR if not_edge(j, n_iter, n_remainder) else n_remainder

            i = ir_start
            while i < ir_end:
                a1 = packed_a + i * rstep_a
                c11 = c1 + i * rstep_c
                c11 = as_tensor_pointer(c11, dtype=float32, shape=(m_size, n_size))
                m_cur = MR if not_edge(i, m_iter, m_remainder) else m_remainder

                if m_cur == MR and n_cur == NR:
                    micro_kernel(a1, b1, c11, macro_k, m_size, n_size, is_first)
                else:
                    for i, j in grid(MR, NR):
                        temp_c[i, j] = 0.0
                    micro_kernel(
                        a1, b1, cast(temp_c, ~float32), macro_k, MR, NR, is_first
                    )
                    if not is_first:
                        for mm, nn in grid(m_cur, n_cur):
                            c11[mm, nn] += temp_c[mm, nn]
                    else:
                        for mm, nn in grid(m_cur, n_cur):
                            c11[mm, nn] = temp_c[mm, nn]

                i += ir_inc
            j += jr_inc

    @hidet.script
    def gemm_3rd_loop(
        a: float32[m_size, k_size],
        packed_b: ~float32,
        c: float32[m_size, n_size],
        loop3_partition_a_start_col: int32,
        loop3_partition_b_start_col: int32,
        loop3_partition_a_width: int32,
        loop3_partition_b_width: int32,
        comm_id_3rd_loop: int32,
        work_id_3rd_loop: int32,
        is_first: bool,
        work_id_5th_loop: int32,
    ):
        attrs.func_kind = "cpu_internal"
        comm_id_macro = comm_id_3rd_loop % macro_nthreads
        work_id_macro = comm_id_macro // (macro_nthreads // macro_nways)
        work_id_packa = comm_id_macro

        m_start_loop3 = 0
        m_end_loop3 = 0
        thread_range_sub(
            loop3_nways, work_id_3rd_loop, m_size, MR, ~m_start_loop3, ~m_end_loop3
        )

        ii = m_start_loop3
        while ii < m_end_loop3:
            b_alg_loop3 = determine_blocksize_f_sub(ii, m_size, MC)
            b_alg_loop3 = min(b_alg_loop3, m_end_loop3 - ii)
            loop3_partition_a_start_row = ii
            loop3_partition_a_height = b_alg_loop3

            loop3_partition_a = cast(a, ~float32) + (
                loop3_partition_a_start_row * k_size + loop3_partition_a_start_col
            )

            packed_a_idx = packa_index(work_id_5th_loop, work_id_3rd_loop)
            packed_a_buf = packa_buf + (packed_a_idx * packed_a_individual_size)

            thrcomm_barrier(
                ~packa_thrcomm_barrier_sense[packed_a_idx],
                ~packa_thrcomm_threads_arrived[packed_a_idx],
                packa_nthreads,
            )

            gemm_pack_a(
                loop3_partition_a,
                loop3_partition_a_width,
                loop3_partition_a_height,
                packed_a_buf,
                work_id_packa,
            )

            # This marks the end of the packing of A,
            # so a barrier is needed
            thrcomm_barrier(
                ~packa_thrcomm_barrier_sense[packed_a_idx],
                ~packa_thrcomm_threads_arrived[packed_a_idx],
                packa_nthreads,
            )

            gemm_macro(
                packed_a_buf,
                packed_b,
                c,
                loop3_partition_a_start_row,
                loop3_partition_b_start_col,
                loop3_partition_a_height,
                loop3_partition_b_width,
                loop3_partition_a_width,
                MR * packed_a_width,
                packed_b_height * NR,
                comm_id_macro,
                work_id_macro,
                is_first,
            )
            ii += b_alg_loop3

    @hidet.script
    def gemm_4th_loop(
        a: float32[m_size, k_size],
        b: float32[k_size, n_size],
        c: float32[k_size, n_size],
        loop5_partition_b_width: int32,
        loop5_partition_b_start_col: int32,
        comm_id_4th_loop: int32,
        work_id_5th_loop: int32,
    ):
        attrs.func_kind = "cpu_internal"
        i_loop4 = 0

        comm_id_3rd_loop = comm_id_4th_loop % loop3_nthreads
        work_id_3rd_loop = comm_id_3rd_loop // (loop3_nthreads // loop3_nways)
        work_id_packb = comm_id_3rd_loop

        while i_loop4 < k_size:
            b_alg_loop4 = determine_blocksize_f_sub(i_loop4, k_size, KC)
            b_alg_loop4 = min(b_alg_loop4, k_size - i_loop4)

            loop4_partition_b_height = b_alg_loop4
            loop4_partition_b_width = loop5_partition_b_width
            loop4_partition_b_start_row = i_loop4
            loop4_partition_b_start_col = loop5_partition_b_start_col

            loop4_partition_a_start_col = i_loop4
            is_first = i_loop4 == 0

            packed_b_buf = packb_buf + (packed_b_individual_size * work_id_5th_loop)

            loop4_partition_b = cast(b, ~float32) + (
                loop4_partition_b_start_row * n_size + loop4_partition_b_start_col
            )

            thrcomm_barrier(
                ~packb_thrcomm_barrier_sense[work_id_5th_loop],
                ~packb_thrcomm_barrier_threads_arrived[work_id_5th_loop],
                packb_nthreads,
            )

            gemm_pack_b(
                loop4_partition_b,
                loop4_partition_b_width,
                loop4_partition_b_height,
                packed_b_buf,
                work_id_packb,
            )

            thrcomm_barrier(
                ~packb_thrcomm_barrier_sense[work_id_5th_loop],
                ~packb_thrcomm_barrier_threads_arrived[work_id_5th_loop],
                packb_nthreads,
            )

            gemm_3rd_loop(
                a,
                packed_b_buf,
                c,
                loop4_partition_a_start_col,
                loop4_partition_b_start_col,
                loop4_partition_b_height,
                loop4_partition_b_width,
                comm_id_3rd_loop,
                work_id_3rd_loop,
                is_first,
                work_id_5th_loop,
            )

            thrcomm_barrier(
                ~packb_thrcomm_barrier_sense[work_id_5th_loop],
                ~packb_thrcomm_barrier_threads_arrived[work_id_5th_loop],
                packb_nthreads,
            )

            i_loop4 += b_alg_loop4

    @hidet.script
    def gemm_5th_loop(
        a: float32[m_size, k_size],
        b: float32[k_size, n_size],
        c: float32[m_size, n_size],
        work_id_5th_loop: int32,
        comm_id_5th_loop: int32,
    ):
        attrs.func_kind = "cpu_internal"
        comm_id_4th_loop = comm_id_5th_loop % loop4_nthreads

        loop5_my_start = -1
        loop5_my_end = -1
        thread_range_sub(
            loop5_nways, work_id_5th_loop, n_size, NR, ~loop5_my_start, ~loop5_my_end
        )

        loop5_iter = loop5_my_start
        while loop5_iter < loop5_my_end:
            b_alg_loop5 = determine_blocksize_f_sub(loop5_iter, loop5_my_end, NC)
            b_alg_loop5 = min(b_alg_loop5, loop5_my_end - loop5_iter)

            loop5_partition_b_width = b_alg_loop5
            loop5_partition_b_start_col = loop5_iter
            gemm_4th_loop(
                a,
                b,
                c,
                loop5_partition_b_width,
                loop5_partition_b_start_col,
                comm_id_4th_loop,
                work_id_5th_loop,
            )
            loop5_iter += b_alg_loop5

    ################### Start of the main kernel ###################
    @hidet.script
    def matmul_kernel_x86_v3(
        a: float32[m_size, k_size], # type: ignore
        b: float32[k_size, n_size], # type: ignore
        c: float32[m_size, n_size], # type: ignore
    ):
        attrs.func_kind = "cpu_kernel"

        init_thr(
            packa_thrcomm_barrier_sense, packa_thrcomm_threads_arrived, loop3_nways
        )
        init_thr(
            packb_thrcomm_barrier_sense,
            packb_thrcomm_barrier_threads_arrived,
            loop5_nways,
        )

        parallel_attr = "p" + str(nthreads)
        # The outermost loop spawning threads
        for tidx in grid(nthreads, attrs=parallel_attr):
            tid_5th_loop = tidx
            work_id_5th_loop = tid_5th_loop // (nthreads // loop5_nways)
            comm_id_5th_loop = tid_5th_loop

            gemm_5th_loop(a, b, c, work_id_5th_loop, comm_id_5th_loop)


mod = module.build()


a = hidet.randn([m_size, k_size])
b = hidet.randn([k_size, n_size])
c = hidet.empty([m_size, n_size])


import numpy as np

mod(a, b, c)
c2 = a @ b
np.testing.assert_allclose(c.numpy(), c2.numpy(), atol=1e-4, rtol=1e-4)
hidet_latency = benchmark_func(lambda: mod(a, b, c), repeat=50)
print(hidet_latency)
