#! python3
import torch as pt
import numpy as np
import torch.nn.functional as F
import math
import os,copy
# from command_generator import utils
import random

MM_TILE_X3 = 32
MM_TILE_Y3 = 4
MM_TILE_X = 8
MM_TILE_Y = 32
MM_SUBBLOCK_NUM = 4
MM_SUBBLOCK_RP = 2
# yhb: blocklinear: shape = [x, y] => [x/8, y/32, x8, y32]
# add 2 rows packed firstly
def convertMMWeight(tensor):
    x = tensor.shape[0]
    y = tensor.shape[1]

    block_x = int(math.ceil(x/MM_TILE_X))
    block_y = int(math.ceil(y/(MM_TILE_Y*4)))
    block_y = block_y * 4

    new_x = block_x * MM_TILE_X
    new_y = block_y * MM_TILE_Y

    pad_x = new_x - x
    pad_y = new_y - y
    xy_pad = F.pad(input=tensor, pad=(0,pad_y,0,pad_x), mode="constant", value=0)

    y_tile = xy_pad.reshape((new_x, block_y, MM_TILE_Y))
    xy_tile = y_tile.reshape((block_x, MM_TILE_X, block_y, MM_TILE_Y))

    xy_tile = xy_tile.transpose(1, 2)
    xy_tile_cp = xy_tile.reshape(block_x,block_y,MM_SUBBLOCK_NUM,MM_SUBBLOCK_RP,MM_TILE_Y)
    ret = xy_tile_cp.transpose(3,4)

    return ret


MM_TILE_FP32_X = 4


def convertMMWeightFP32(tensor):
    x = tensor.shape[0]
    y = tensor.shape[1]

    block_x = int(math.ceil(x / MM_TILE_FP32_X))
    block_y = int(math.ceil(y / (MM_TILE_Y * 4)))
    block_y = block_y * 4

    new_x = block_x * MM_TILE_FP32_X
    new_y = block_y * MM_TILE_Y

    pad_x = new_x - x
    pad_y = new_y - y
    xy_pad = F.pad(input=tensor,
                   pad=(0, pad_y, 0, pad_x),
                   mode="constant",
                   value=0)

    y_tile = xy_pad.reshape((new_x, block_y, MM_TILE_Y))
    xy_tile = y_tile.reshape((block_x, MM_TILE_FP32_X, block_y, MM_TILE_Y))

    ret = xy_tile.transpose(1, 2)

    return ret


MM_TILE_S8_Y = 64
MM_TILE_S8_Y_SUBBLOCK = 32
MM_TILE_S8_Y_SUBNUM = 2


def convertMMWeightS8(tensor):
    x = tensor.shape[0]
    y = tensor.shape[1]

    block_x = int(math.ceil(x / MM_TILE_X))
    block_y = int(math.ceil(y / (MM_TILE_S8_Y * 4)))
    block_y = block_y * 4

    new_x = block_x * MM_TILE_X
    new_y = block_y * MM_TILE_S8_Y

    pad_x = new_x - x
    pad_y = new_y - y
    xy_pad = F.pad(input=tensor,
                   pad=(0, pad_y, 0, pad_x),
                   mode="constant",
                   value=0)

    y_tile = xy_pad.reshape((new_x, block_y, MM_TILE_S8_Y))
    xy_tile = y_tile.reshape((block_x, MM_TILE_X, block_y, MM_TILE_S8_Y))

    xy_tile = xy_tile.transpose(1, 2)
    xy_tile_cp = xy_tile.reshape(block_x, block_y, MM_SUBBLOCK_NUM,
                                 MM_SUBBLOCK_RP, MM_TILE_S8_Y)
    xy_tile_cp = xy_tile_cp.reshape(block_x, block_y, MM_SUBBLOCK_NUM,
                                    MM_SUBBLOCK_RP, MM_TILE_S8_Y_SUBBLOCK,
                                    MM_TILE_S8_Y_SUBNUM)
    ret = xy_tile_cp.transpose(3, 4)

    return ret


MM_TILE_S4_Y = 128
MM_TILE_S4_Y_SUBBLOCK = 32
MM_TILE_S4_Y_SUBNUM = 4


def convertMMWeightS4(tensor):
    x = tensor.shape[0]
    y = tensor.shape[1]

    block_x = int(math.ceil(x / MM_TILE_X))
    block_y = int(math.ceil(y / (MM_TILE_S4_Y * 4)))
    block_y = block_y * 4

    new_x = block_x * MM_TILE_X
    new_y = block_y * MM_TILE_S4_Y

    pad_x = new_x - x
    pad_y = new_y - y
    xy_pad = F.pad(input=tensor,
                   pad=(0, pad_y, 0, pad_x),
                   mode="constant",
                   value=0)

    y_tile = xy_pad.reshape((new_x, block_y, MM_TILE_S4_Y))
    xy_tile = y_tile.reshape((block_x, MM_TILE_X, block_y, MM_TILE_S4_Y))

    xy_tile = xy_tile.transpose(1, 2)
    xy_tile_cp = xy_tile.reshape(block_x, block_y, MM_SUBBLOCK_NUM,
                                 MM_SUBBLOCK_RP, MM_TILE_S4_Y)
    xy_tile_cp = xy_tile_cp.reshape(block_x, block_y, MM_SUBBLOCK_NUM,
                                    MM_SUBBLOCK_RP, MM_TILE_S4_Y_SUBBLOCK,
                                    MM_TILE_S4_Y_SUBNUM)
    ret = xy_tile_cp.transpose(3, 4)

    return ret


SWIZZLE_CH_NUM=512
SWIZZLE_CH_TILE=32
SWIZZLE_CH_SUBTILE=2
SWIZZLE_SUBTILE_NUM=16
def do_swizzle_MMActivationBF16(tensor):
    if not pt.is_tensor(tensor):
        raise TypeError("only support pytorch tensor")
    if len(tensor.shape)!=2:
        raise ValueError("only support 2D matrix tensor")
    x = tensor.shape[0]
    y = tensor.shape[1]

    #### step1: do 512 ch algin
    block_col = int(math.ceil(y/SWIZZLE_CH_NUM))
    new_col = block_col * SWIZZLE_CH_NUM
    pad_col = new_col - y
    tensor_pad = F.pad(input=tensor, pad=(0,pad_col), mode="constant", value=0)
    
    #### step2: split 512ch as a unit, 32ch as a tile, 2ch as a sub-tile
    tile_num = int(SWIZZLE_CH_NUM // SWIZZLE_CH_TILE)
    tensor_reshape = tensor_pad.reshape(x, block_col, tile_num, SWIZZLE_SUBTILE_NUM, SWIZZLE_CH_SUBTILE)

    #### step3: do swizzle, 2ch packed,
    ####        get the first 2ch from 1st tile,then follow next 2ch from the 1st 2ch in next tile, 
    ####        after all the first 2ch is covered, then go to handle the second 2ch in the same way,
    ####        repeat above steps to the end
    ret = tensor_reshape.permute(0, 1, 3, 2, 4)

    #### step4: reshape as 2D
    ret = ret.reshape(x, -1)
    return ret


# yhb: y major blocklinear: shape = [x, y] => [y/32, x/8, x8, y32]
# add 2 rows packed firstly
def convertMMActivation(tensor, enable_swizzle=False):
    if enable_swizzle:
        tensor = do_swizzle_MMActivationBF16(tensor)
    x = tensor.shape[0]
    y = tensor.shape[1]

    block_x = int(math.ceil(x/(MM_TILE_X*4)))
    block_y = int(math.ceil(y/MM_TILE_Y))
    block_x = block_x * 4

    new_x = block_x * MM_TILE_X
    new_y = block_y * MM_TILE_Y

    pad_x = new_x - x
    pad_y = new_y - y
    xy_pad = F.pad(input=tensor,pad=(0,pad_y,0,pad_x),mode="constant",value=0)

    y_tile = xy_pad.reshape((new_x, block_y, MM_TILE_Y))
    xy_tile = y_tile.reshape((block_x, MM_TILE_X, block_y, MM_TILE_Y))

    ret = xy_tile.transpose(1, 2)
    ret = ret.transpose(0,1)
    ret = ret.reshape(block_y,block_x,MM_SUBBLOCK_NUM,MM_SUBBLOCK_RP,MM_TILE_Y)
    ret = ret.transpose(3, 4)

    return ret
    #return convertMMActivation(y_major)


def convertMMActivationFP32(tensor):
    x = tensor.shape[0]
    y = tensor.shape[1]

    block_x = int(math.ceil(x / (MM_TILE_FP32_X * 4)))
    block_y = int(math.ceil(y / MM_TILE_Y))
    block_x = block_x * 4

    new_x = block_x * MM_TILE_FP32_X
    new_y = block_y * MM_TILE_Y

    pad_x = new_x - x
    pad_y = new_y - y
    xy_pad = F.pad(input=tensor,
                   pad=(0, pad_y, 0, pad_x),
                   mode="constant",
                   value=0)

    y_tile = xy_pad.reshape((new_x, block_y, MM_TILE_Y))
    xy_tile = y_tile.reshape((block_x, MM_TILE_FP32_X, block_y, MM_TILE_Y))

    ret = xy_tile.transpose(1, 2)
    ret = ret.transpose(0,1)
    return ret


MM_TILE_S8_X = 16
MM_TILE_S8_X_SUBNUM = 4


def convertMMActivationS8(tensor):
    x = tensor.shape[0]
    y = tensor.shape[1]

    block_x = int(math.ceil(x/(MM_TILE_S8_X*4)))
    block_y = int(math.ceil(y/MM_TILE_Y))
    block_x = block_x * 4

    new_x = block_x * MM_TILE_S8_X
    new_y = block_y * MM_TILE_Y

    pad_x = new_x - x
    pad_y = new_y - y
    xy_pad = F.pad(input=tensor,pad=(0,pad_y,0,pad_x),mode="constant",value=0)

    y_tile = xy_pad.reshape((new_x, block_y, MM_TILE_Y))
    xy_tile = y_tile.reshape((block_x, MM_TILE_S8_X, block_y, MM_TILE_Y))

    ret = xy_tile.transpose(1, 2)
    ret = ret.transpose(0,1)
    ret = ret.reshape(block_y,block_x,MM_SUBBLOCK_NUM,MM_TILE_S8_X_SUBNUM,MM_TILE_Y)
    ret = ret.transpose(3, 4)

    return ret


MAXINT = 2**32

def construct_t_with_shape_pattern(shape,value=None, pattern="random", dtype="fp32"):
    t = []
    total = shape[0] * shape[1]
    if pattern is not "random":
        if value is None:
            for i in range(total):
                if pattern == "ones":
                    t.append(1)
                else:
                    t.append(i)
        else:
            for i in range(total):
                t.append(value)
    else:
        for i in range(total):
            if dtype == "fp32" or dtype == "bf16":
                ele = random.random() * 100
                # ele = random.uniform(0.1, 1.0)
            elif dtype == "s16" or dtype == "s8" or dtype == "s4":
                ele = random.randint(-MAXINT, MAXINT)
            elif dtype == "u8":
                ele = random.randint(0, MAXINT)
            else:
                raise NotImplementedError
            t.append(ele)
    return t

def construct_pattern_tensor(h, w, value=None, pattern="random", dtype="fp32",
                             sub_position=None,sub_shape=None):

    if sub_position == None:
        t=construct_t_with_shape_pattern((h,w),value, pattern, dtype)
        #print(t)
    else :
        t=[0 for i in range(h*w)]
        sub_t=construct_t_with_shape_pattern(sub_shape,value, pattern, dtype)
        sub_begin_idx = sub_position[0]*w + sub_position[1]
        sub_t.reverse()
        for h_id in range(sub_shape[0]):
            for w_id in range(sub_shape[1]):
                t_tmp = sub_t.pop()
                t[sub_begin_idx + h_id*w + w_id] = t_tmp
        #print(t)
    #


    if dtype == "fp32" or dtype == "bf16":
        tensor = pt.FloatTensor(t).reshape(h, w)
    elif dtype == "s16":
        tensor = pt.ShortTensor(t).reshape(h, w)
    elif dtype == "s8" or dtype == "s4":
        tensor = pt.CharTensor(t).reshape(h, w)
    elif dtype == "u8":
        tensor = pt.ByteTensor(t).reshape(h, w)
    return tensor


# yhb: trunk the 16b lsb out
def fp32Tobf16(np_tensor):
    if type(np_tensor) != np.ndarray:
        raise("only support numpy tensor")

    if np_tensor.dtype != 'float32':
        raise("only support float32")

    buf = np_tensor.tobytes()
    raw = np.frombuffer(buf, dtype="int16")

    raw_split = raw.reshape(int(raw.shape[0]/2), 2)
    raw_trans = raw_split.transpose(1, 0)

    bf16 = raw_trans[1]
    ret = bf16.reshape(np_tensor.shape)
    return ret


def bf16Tofp32(np_tensor):
    if type(np_tensor) != np.ndarray:
        raise("only support numpy tensor")

    if np_tensor.dtype != 'int16':
        raise("only support int16")

    bf_shape = np_tensor.shape
    totol_dem = np_tensor.size

    tensor_fp32 = np.zeros((2,totol_dem),dtype='int16')
    tensor_fp32[1] = np_tensor.reshape(1,totol_dem)

    tensor_fp32_tp = tensor_fp32.transpose()
    buf = tensor_fp32_tp.tobytes()

    tensor_fp32_checked = np.frombuffer(buf,dtype="float32")

    ret = tensor_fp32_checked.reshape(bf_shape)
    return ret


def br_fp32Tobf16Tofp32(tensor):
    tensor_cp = tensor.clone().detach()
    shape_cp = tensor_cp.shape
    if len(tensor_cp.shape) == 1:
        tensor_cp = tensor_cp.reshape((1, tensor_cp.shape[0]))
    # convert from fp32 to bf16
    tensor_n = fp32Tobf16(tensor_cp.numpy())
    # convert from bf16 to fp32
    tensor_c = bf16Tofp32(tensor_n)
    tensor_c = tensor_c.reshape(shape_cp)
    tensor_c_cp = copy.deepcopy(tensor_c)
    return tensor_c_cp


def br_dump_MMWeight(file_info, tensor, dtype="fp32"):
    tensor_cp = tensor.clone().detach()
    shape_cp = tensor_cp.shape
    if len(tensor_cp.shape) == 1:
        tensor_cp = tensor_cp.reshape((1, tensor_cp.shape[0]))

    if dtype == "fp32":
        tensor_ct = convertMMWeightFP32(tensor_cp)
    elif dtype == "bf16":
        if tensor_cp.dtype == pt.int32:
            tensor_cp = tensor_cp.float()
        # convert from fp32 to bf16
        tensor_n = fp32Tobf16(tensor_cp.numpy())
        tensor_n_cp = copy.deepcopy(tensor_n)
        tensor_ct = convertMMWeight(
            pt.from_numpy(tensor_n_cp).reshape(shape_cp))
    elif dtype == "s8" or dtype == "u8":
        tensor_ct = convertMMWeightS8(tensor_cp)
    elif dtype == "s4":
        tensor_ct_s8 = convertMMWeightS4(tensor_cp)
        # abstract 4-bit from s8 data
        tensor_ct_s8 = tensor_ct_s8.reshape(-1,2)
        # it is only for debugging
        # tensor_ct_s8.numpy().tofile(file_info + "-s4-raw-data.bin")
        tensor_ct = pt.zeros(tensor_ct_s8.shape[0], dtype=pt.uint8)
        tensor_ct_s8 = tensor_ct_s8.byte()
        for i in range(tensor_ct_s8.shape[0]):
            tensor_ct[i] = ((tensor_ct_s8[i][1]&0xf) <<4) | (tensor_ct_s8[i][0]&0xf)

    # dump data
    # br_dump_tensor_in_bf16(file_info, tensor_ct.numpy())
    file_name = file_info + '-' + dtype + '.bin'
    tensor_ct.numpy().tofile(file_name)
    tensor_size = np.prod(tensor_ct.shape)
    return tensor_size


def br_dump_MMActivation(file_info, tensor, dtype="fp32", enable_swizzle=False):
    tensor_cp = tensor.clone().detach()
    shape_cp = tensor_cp.shape
    if len(tensor_cp.shape) == 1:
        tensor_cp = tensor_cp.reshape((1, tensor_cp.shape[0]))

    if dtype == "fp32":
        tensor_ct = convertMMActivationFP32(tensor_cp)
    elif dtype == "bf16":
        if tensor_cp.dtype == pt.int32:
            tensor_cp = tensor_cp.float()
        # convert from fp32 to bf16
        tensor_n = fp32Tobf16(tensor_cp.numpy())
        tensor_n_cp = copy.deepcopy(tensor_n)
        tensor_ct = convertMMActivation(pt.from_numpy(tensor_n_cp).reshape(shape_cp), enable_swizzle=enable_swizzle)
    elif dtype == "s16":
        assert(tensor_cp.dtype == pt.int16), "make sure dtype is int16 which is the alia for s16"
        tensor_ct = convertMMActivation(tensor_cp)
    elif dtype == "s8" or dtype == "u8":
        tensor_ct = convertMMActivationS8(tensor_cp)
    # dump data
    # br_dump_tensor_in_bf16(file_info, tensor_ct.numpy())
    file_name = file_info + '-' + dtype + '.bin'
    tensor_ct.numpy().tofile(file_name)
    tensor_size = np.prod(tensor_ct.shape)
    return tensor_size
