import numpy as np
import torch

def Custom_MM(x: torch.Tensor, weight: torch.Tensor, weightScale: torch.Tensor, m: int):
    """
    执行量化的 GMM（通用矩阵乘法）操作，并使用 SwiGLU 激活函数。

    参数:
        x (torch.Tensor): 输入张量，形状为 (m, k)。
        weight (torch.Tensor): 权重张量，形状为 (k, n)。
        weightScale (torch.Tensor): 每个通道的缩放因子，
         - perGroup 场景: 形状为 (k_group_num, n)。注：当k_group_num==1时为perChannel场景
         - perChannel 场景: 形状为 (n)。
        perTokenScale (torch.Tensor): 每个 token 的缩放因子，形状为 (m,)。
        m (int): token 的数量（x 的行数）。

    返回:
        MMOut(fp16): MatMul + perGroup或perChannel反量化结果
    """
    # 使用 int32 精度执行矩阵乘法
    k, n = weight.shape
    MMOut = torch.zeros((m, n), dtype=torch.float16)
    # perGroup 场景
    if len(weightScale.shape) == 2 and weightScale.shape[0] != 1:
        K_group = weightScale.shape[0]
        per_group_ele = k // K_group
        x_grouped = x.view(-1, K_group, per_group_ele).transpose(0, 1)
        weight_grouped = weight.view(K_group, per_group_ele, n)
        c_temp = torch.bmm(x_grouped.to(torch.int32), weight_grouped.to(torch.int32)).float()
        for k_idx in range(K_group):
            MMOut += (c_temp[k_idx] * weightScale[k_idx].view(1, -1)).to(torch.float16)
    # perChannel 场景
    elif len(weightScale.shape) == 1 or (len(weightScale.shape) == 2 and weightScale.shape[0] == 1):
        c_temp = torch.matmul(x.to(torch.int32), weight.to(torch.int32)).to(torch.float32)
        MMOut = c_temp * weightScale.view(1, -1).to(torch.float16)
    return MMOut

def x_INT8_to_x_INT4(x: torch.Tensor):
    M, K = x.shape
    x_High_4bit = torch.floor(x.to(torch.float16) // 16).to(torch.int8)
    x_Low_4bit = (torch.bitwise_and(x.view(torch.int16), 0x0f0f).view(torch.int8) - 8)
    x_Int4 = torch.empty((2 * M, K), dtype=torch.int8)
    x_Int4[::2,:] =  x_High_4bit
    x_Int4[1::2,:] = x_Low_4bit
    return x_Int4

def process_groups(x: torch.Tensor, weight: torch.Tensor, weightScale: torch.Tensor, perTokenScale: torch.Tensor, weightAssistanceMatrix: torch.Tensor, groupList: torch.Tensor):
    """
    按组处理输入数据，并调用 GMM_Swiglu_quant 函数进行量化计算。

    参数:
        x (torch.Tensor): 输入张量，形状为 (M, K) INT8。
        weight (torch.Tensor): 权重张量列表，每个元素的形状为 (E, K, N)数据类型 INT8 但数据范围为INT4, 实际代表INT4。
        weightScale (torch.Tensor): 每个通道的缩放因子，
         - perGroup 场景: 形状为 (E, k_group_num, N)。
         - perChannel 场景: 形状为 (E, N)。
        perTokenScale (torch.Tensor): 每个 token 的缩放因子，形状为 (M,)。
        groupList (list): 定义每个组的 token 数量的列表。

    返回:
        quantOutput (torch.Tensor): 量化后的输出张量，形状为 (M, N // 2)。
        quantScaleOutput (torch.Tensor): 量化缩放因子，形状为 (M,)。
    """
    M, N = x.shape[0], weight.shape[2]  # 获取输入张量的形状
    quantOutput = torch.zeros(M, N // 2).to(torch.int8)  # 初始化量化输出张量
    quantScaleOutput = torch.zeros(M).to(torch.float32)  # 初始化量化缩放因子张量
    # 前处理 X_INT8 -> X_INT4
    x_INT4 = x_INT8_to_x_INT4(x)
    start_idx = 0  # 起始索引
    preV = 0  # 前一个组的 token 数量
    groupList = groupList.tolist()
    # 遍历 groupList，按组处理数据
    for i, v in enumerate(groupList):
        currV = v
        tempV = (currV - preV) * 2  # 计算当前组的 token 数量 “ * 2 ”是因为 1行Int8--> 2行Int4
        preV = currV  # 更新前一个组的 token 数量
        if (tempV > 0):
            # 调用 MM_Swiglu_quant 处理当前组
            MMOut = Custom_MM(x_INT4[start_idx : start_idx + tempV],
                              weight[i],
                              weightScale[i],
                              tempV)
            MM_Num_Concat = ((MMOut[::2] * 16 + MMOut[1::2]) + weightAssistanceMatrix[i].view(1,-1))
            PerToken_Quant = MM_Num_Concat * perTokenScale[start_idx // 2 : (start_idx + tempV) // 2].view(-1, 1)
            # 将结果分成两部分以应用 SwiGLU 激活函数
            SwiGLU, gate = PerToken_Quant.chunk(2, dim=-1)
            temp = SwiGLU * torch.sigmoid(SwiGLU)  # SwiGLU 激活
            temp = temp * gate  # 与门控值进行逐元素相乘

            # 对输出进行量化
            max_value = torch.max(torch.abs(temp), dim=-1).values  # 找到最大绝对值以计算缩放因子
            quantScaleOutput_temp = 127 / max_value  # 计算量化缩放因子
            quantOutput[start_idx // 2 : (start_idx + tempV) // 2] = torch.round(
                temp * quantScaleOutput_temp.reshape(tempV // 2, 1)).to(torch.int8)  # 量化为 int8
            quantScaleOutput[start_idx // 2 : (start_idx + tempV) // 2] = 1 / quantScaleOutput_temp  # 反向量化缩放因子以便后续反量化
        start_idx += tempV  # 更新起始索引以处理下一组

    return quantOutput, quantScaleOutput

def generate_non_decreasing_sequence(length, upper_limit, groupListType):
    """
    生成一个随机非减的一维 Tensor，且最后一个值小于上限。

    参数:
        length (int): 序列的长度。
        upper_limit (int): 最后一个值的上限。

    返回:
        torch.Tensor: 生成的一维 Tensor。
    """
    # 生成随机递增序列
    random_increments = torch.randint(0, 128, (length,))  # 随机增量，范围 0~9
    sequence = torch.cumsum(random_increments, dim=0)  # 累加生成非减序列

    # 确保最后一个值小于上限
    if sequence[-1] >= upper_limit:
        scale_factor = upper_limit / sequence[-1]  # 计算缩放因子
        sequence = (sequence * scale_factor).to(torch.int64)  # 缩放并转换为整数
        random_increments = (random_increments * scale_factor).to(torch.int64)
    if groupListType == 0:
        return sequence
    else :
        return random_increments

def gen_input_data(E, M, K, N, KNum_per_group, groupListType, dequantModle):
    x = torch.randint(-128, 127, (M, K), dtype=torch.int8)
    weight = torch.randint(-8, 7,(E, K, N), dtype=torch.int8)
    if dequantModle == 0:
        assert K % KNum_per_group == 0, "per-channel&&per-group模式下， K必须为KNum_per_group的整数倍"
        weightScale = 0.1 * torch.randn(E, K // KNum_per_group,  N)
    elif dequantModle == 1:
        weightScale = 0.1 * torch.randn(E, N)
    xScale = 0.1 * torch.randn(M)
    repeat_times = KNum_per_group if dequantModle == 0 else 1
    expanded_scale = weightScale.view(E, -1, N).repeat_interleave(repeat_times, dim=1)
    print(weight.shape)
    print(weightScale.shape)
    print(expanded_scale.shape)
    print(repeat_times)
    weightAssistanceMatrix = (8 * weight * expanded_scale).sum(dim=1)
    groupList = generate_non_decreasing_sequence(E, M, groupListType)
    return x, weight, weightScale, xScale, weightAssistanceMatrix, groupList

if __name__ == '__main__':
    # 生成数据
    E = 16
    M = 512
    K = 7168
    N = 4096
    KNum_per_group = 256
    groupListType = 0 # 0 :groupList是cumsum模式； 1 :groupList是count模式
    dequantModle = 0  # 0 :per-channel&&per-group模式； 1 :纯per-channel模式
    # 生成如下内容
    x, weight, weightScale, xScale, weightAssistanceMatrix, groupList = gen_input_data(E, M, K, N, KNum_per_group, groupListType, dequantModle)

    print("x          :", "\t",x.shape, "\t\t", x.dtype)
    print("weight     :", "\t",weight.shape, "\t\t", weight.dtype)
    print("weightScale:", "\t",weightScale.shape, "\t\t", weightScale.dtype)
    print("xScale     :", "\t",xScale.shape, "\t\t", xScale.dtype)
    print("groupList  :", "\t",groupList.shape, "\t\t", groupList.dtype)
    print("weightAssistanceMatrix:", "\t", weightAssistanceMatrix.shape, "\t\t", weightAssistanceMatrix.dtype)
    print("groupList value:", groupList)
    # 传给标杆函数
    if groupListType == 1:
        groupList = torch.cumsum(groupList)
    quantOutput, quantScaleOutput = process_groups(x, weight, weightScale, xScale, weightAssistanceMatrix, groupList)