#!/usr/bin/env python3
# coding: utf-8
# Copyright (c) 2025 Huawei Technologies Co., Ltd.
# This file is a part of the CANN Open Software.
# Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
# Please refer to the License for details. You may not use this file except in compliance with the License.
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
# See LICENSE in the root of the software repository for the full text of the License.
# ======================================================================================================================
import torch
import torch_npu
import ctypes
import logging
import numpy as np
import random
from atk.configs.dataset_config import InputDataset
from atk.configs.results_config import TaskResult
from atk.tasks.api_execute import register
from atk.tasks.api_execute.base_api import BaseApi
from atk.tasks.dataset.base_dataset import OpsDataset
from atk.tasks.api_execute.aclnn_base_api import AclnnBaseApi
from atk.tasks.backends.lib_interface.acl_wrapper import TensorPtr
from atk.tasks.backends.lib_interface.acl_wrapper import AclFormat,AclTensorList

def generate_non_decreasing_sequence(length, upper_limit, groupListType:int, seed: int):
    """
    生成一个随机非减的一维 Tensor,且最后一个值小于上限。

    参数:
        length (int): 序列的长度。 第二个输入weight 的shape[0]
        upper_limit (int): 最后一个值的上限。第一个输入x 的shape[0]

    返回:   
        torch.Tensor: 生成的一维 Tensor。
    """
    # 指定随机种子
    torch.manual_seed(seed)
    # 生成随机递增序列
    random_increments = torch.randint(0, 512, (length,))  # 随机增量,范围 0~9
    sequence = torch.cumsum(random_increments, dim=0)  # 累加生成非减序列

    # 确保最后一个值小于上限
    if sequence[-1] >= upper_limit:
        scale_factor = upper_limit / sequence[-1]  # 计算缩放因子
        sequence = (sequence * scale_factor).to(torch.int64)  # 缩放并转换为整数
        return sequence if groupListType == 0 else (random_increments * scale_factor).to(torch.int64)
    return sequence if groupListType == 0 else (random_increments).to(torch.int64)

def MM(x: torch.Tensor, weight: torch.Tensor, perChannelScale: torch.Tensor, perTokenScale: torch.Tensor, 
                     m: int, outDtype:torch.dtype, KNum_per_group: int, dequantMode: int):
    """
    执行量化的 GMM（通用矩阵乘法）操作，并使用 SwiGLU 激活函数。

    参数:
        x (torch.Tensor): 输入张量，形状为 (m, k)。
        weight (torch.Tensor): 权重张量，形状为 (k, n)。
        perChannelScale (torch.Tensor): 每个通道的缩放因子。
        -    当deQuantModle为0：perChannelScale.shape为(k // KNum_per_group, n)
        -    当deQuantModle为1：perChannelScale.shape为(1, n)
        perTokenScale (torch.Tensor): 每个 token 的缩放因子，形状为 (m,)。
        m (int): token 的数量（x 的行数）。

    返回:
        Output (torch.Tensor): 输出张量，形状为 (m, n)。
    """
    K, N = x.shape[1], weight.shape[1]
    if dequantMode == 0:
        groups = perChannelScale.shape[0]
        ele_per_group = KNum_per_group  # 即 k // groups
        # 1. Reshape x to [groups, m, ele_per_group]
        x_reshaped = x.reshape(m, groups, ele_per_group).transpose(0, 1).type(torch.float32)
        # 2. Reshape weight to [groups, ele_per_group, n]
        weight_reshaped = weight.reshape(groups, ele_per_group, N).type(torch.float32)
        # 3. 使用 torch.bmm 计算 batch matmul:
        #    [groups, m, ele_per_group] @ [groups, ele_per_group, n] -> [groups, m, n]
        batch_matmul = torch.bmm(x_reshaped, weight_reshaped)
        # 4. 乘以 perChannelScale（广播到 [groups, m, n]）
        scaled_results = (batch_matmul * perChannelScale.reshape(groups, 1, N)).type(torch.float16)
        # 5. 在 groups 维度求和，得到 [m, n]
        c_temp1 = torch.zeros(m,N).type(torch.float16)
        for k_idx in range(scaled_results.shape[0]):
            c_temp1 += scaled_results[k_idx].type(torch.float16)
        c_temp1 = perTokenScale.reshape(m, 1) * c_temp1.to(torch.float32)
        return c_temp1.type(outDtype)
    elif dequantMode == 1:
        # 使用 fp32 精度执行矩阵乘法
        c_temp1 = torch.matmul(x.type(torch.float32), weight.type(torch.float32))
        # 应用每个通道和每个 token 的缩放
        c_temp2 = (torch.mul(c_temp1, perChannelScale)).type(torch.float16).to(torch.float32)
        c_temp3 = torch.mul(c_temp2, perTokenScale.reshape(m, 1))
        return c_temp3.type(outDtype)

def MM_fp64(x: torch.Tensor, weight: torch.Tensor, perChannelScale: torch.Tensor, perTokenScale: torch.Tensor, 
                     m: int, outDtype:torch.dtype, KNum_per_group: int, dequantMode: int):
    """
    执行量化的 GMM（通用矩阵乘法）操作，并使用 SwiGLU 激活函数。

    参数:
        x (torch.Tensor): 输入张量，形状为 (m, k)。
        weight (torch.Tensor): 权重张量，形状为 (k, n)。
        perChannelScale (torch.Tensor): 每个通道的缩放因子。
        -    当deQuantModle为0：perChannelScale.shape为(k // KNum_per_group, n)
        -    当deQuantModle为1：perChannelScale.shape为(1, n)
        perTokenScale (torch.Tensor): 每个 token 的缩放因子，形状为 (m,)。
        m (int): token 的数量（x 的行数）。

    返回:
        Output (torch.Tensor): 输出张量，形状为 (m, n)。
    """
    K, N = x.shape[1], weight.shape[1]
    if dequantMode == 0:
        groups = perChannelScale.shape[0]
        ele_per_group = KNum_per_group  # 即 k // groups
        # 1. Reshape x to [groups, m, ele_per_group]
        x_reshaped = x.reshape(m, groups, ele_per_group).transpose(0, 1).type(torch.float64)
        # 2. Reshape weight to [groups, ele_per_group, n]
        weight_reshaped = weight.reshape(groups, ele_per_group, N).type(torch.float64)
        # 3. 使用 torch.bmm 计算 batch matmul:
        #    [groups, m, ele_per_group] @ [groups, ele_per_group, n] -> [groups, m, n]
        batch_matmul = torch.bmm(x_reshaped, weight_reshaped)
        # 4. 乘以 perChannelScale（广播到 [groups, m, n]）
        scaled_results = (batch_matmul * perChannelScale.reshape(groups, 1, N)).type(torch.float32)
        # 5. 在 groups 维度求和，得到 [m, n]
        c_temp1 = torch.zeros(m,N).type(torch.float32)
        for k_idx in range(scaled_results.shape[0]):
            c_temp1 += scaled_results[k_idx].type(torch.float32)
        c_temp1 = perTokenScale.reshape(m, 1) * c_temp1.to(torch.float64)
        return c_temp1.type(outDtype)
    elif dequantMode == 1:
        # 使用 fp32 精度执行矩阵乘法
        c_temp1 = torch.matmul(x.type(torch.float64), weight.type(torch.float64))
        # 应用每个通道和每个 token 的缩放
        c_temp2 = (torch.mul(c_temp1, perChannelScale)).type(torch.float16).to(torch.float64)
        c_temp3 = torch.mul(c_temp2, perTokenScale.reshape(m, 1))
        return c_temp3.type(outDtype)


def gmm_a4w4_golden(x: torch.Tensor, weight: torch.Tensor, perChannelScale: torch.Tensor, perTokenScale: torch.Tensor, groupList: torch.Tensor, 
                    outDtype:torch.dtype, groupListType: int):
    """
    按组处理输入数据，并调用 GMM_Swiglu_quant 函数进行量化计算。

    参数:
        x (torch.Tensor): 输入张量，形状为 (M, K)。
        weight (torch.Tensor): 权重张量列表，每个元素的形状为 (E, K, N)。
        perChannelScale (torch.Tensor): 每个通道的缩放因子列表。
        -    当dequantModle为0时，shape为 (E,K//KNum_per_group,N);
        -    当dequantModle为1时，shape为 (E,N);
        perTokenScale (torch.Tensor): 每个 token 的缩放因子，形状为 (M,)。
        groupList (list): 定义每个组的 token 数量的列表。
        -    当groupListType为0时，List语义为cumsum模式;
        -    当groupListType为1时，List语义为count模式;

    返回:
        Output (torch.Tensor): 输出张量，形状为 (M, N)。
    """
    M, K, N= x.shape[0], x.shape[1], weight.shape[2]  # 获取输入张量的形状
    Output = torch.zeros(M, N).type(outDtype)  # 初始化量化输出张量

    start_idx = 0  # 起始索引
    preV = 0  # 前一个组的 token 数量
    groupList = groupList.tolist()
    dequantMode = 0 if len(perChannelScale.shape) == 3 else 1
    KNum_per_group = K // (perChannelScale.shape[1] if len(perChannelScale.shape) == 3 else 1)
    # 遍历 groupList，按组处理数据
    for i, v in enumerate(groupList):
        if groupListType == 0:
            currV = v
            tempV = currV - preV  # 计算当前组的 token 数量
            preV = currV  # 更新前一个组的 token 数量
        elif groupListType == 1:
            tempV = v  # 计算当前组的 token 数量
        if (tempV > 0):
        # 调用 GMM_Swiglu_quant 处理当前组
            if perChannelScale.dtype == torch.float32:
                Output[start_idx:start_idx + tempV]= \
                    MM(x[start_idx:start_idx + tempV], 
                    weight[i], 
                    perChannelScale[i], 
                    perTokenScale[start_idx:start_idx + tempV], 
                    tempV, outDtype, KNum_per_group, dequantMode)
            else:
                Output[start_idx:start_idx + tempV]= \
                    MM_fp64(x[start_idx:start_idx + tempV], 
                    weight[i], 
                    perChannelScale[i], 
                    perTokenScale[start_idx:start_idx + tempV], 
                    tempV, outDtype, KNum_per_group, dequantMode)
        start_idx += tempV  # 更新起始索引以处理下一组
    return Output


@register("function_aclnn_grouped_matmul_V4_a4w4")
class AclnnGroupedMatmulV4A4W4(BaseApi):
    def init_by_input_data(self, input_data: InputDataset):
        """
        该接口可实现部门场景下api的初始化需要依赖于当前的输入数据，且不希望计入耗时，
        可以在此接口实现
        :param input_data:
        :return:
        """
        self.weight = input_data.kwargs['weight'][0].clone()
        self.x = input_data.kwargs['x'][0].clone()
        x = input_data.kwargs['x'][0]
        weight = input_data.kwargs['weight'][0]
        weight_format = input_data.kwargs['weightFormat']
        groupListType = input_data.kwargs['groupListType']
        input_data.kwargs.pop('weightFormat')
        seed = input_data.kwargs['seed']
        input_data.kwargs['groupListOptional'] = generate_non_decreasing_sequence(
            weight.shape[0], x.shape[0], groupListType, seed)
        input_data.kwargs.pop('seed')
        self.scaleOptional = input_data.kwargs['scaleOptional'][0].to(torch.bfloat16).to(torch.float32).clone()
        scaleOptional = input_data.kwargs['scaleOptional'][0].to(torch.bfloat16).to(torch.float32).clone()
        if self.device == "pyaclnn":
            E, K, N = self.weight.shape
            x_quant = torch_npu.npu_quantize(x.to(torch.float32), torch.tensor([1.],device='npu'), None, torch.quint4x2, -1, False)
            input_data.kwargs['x'][0] = x_quant
            if weight_format == 0: # ND
                weight_quant = torch_npu.npu_quantize(weight.to(torch.float32), torch.tensor([1.],device='npu'), None, torch.quint4x2, -1, False)
            if weight_format == 1: # NZ
                weight_quant = weight.reshape(E,  K // 16, 16, N // 64, 64).permute(0,3,1,2,4).contiguous()
                weight_quant = weight_quant.permute(0, 2, 3, 1, 4).reshape(E, K, N)
                weight_quant = torch_npu.npu_quantize(weight_quant.to(torch.float32), torch.tensor([1.],device='npu'), None, torch.quint4x2, -1, False)
            input_data.kwargs['weight'][0] = weight_quant
            scaleOptional = scaleOptional.view(E, -1, N)
            KGroup = scaleOptional.shape[1]
            scale_np = scaleOptional.cpu().numpy()
            scaleUint32 = scale_np.astype(np.float32)
            scaleUint32.dtype = np.uint32
            scaleUint64 = np.zeros((E, KGroup, N * 2), dtype=np.uint32)
            scaleUint64[...,::2] = scaleUint32
            scaleUint64.dtype = np.int64
            scale = torch.from_numpy(scaleUint64).npu()
            input_data.kwargs['scaleOptional'][0] = scale
            input_data.kwargs['groupListOptional'] = input_data.kwargs['groupListOptional'].to('npu')
            input_data.kwargs['perTokenScaleOptional'][0] = input_data.kwargs['perTokenScaleOptional'][0].to('npu')

    def cpu(self, input_data: InputDataset, with_output: bool = True):
        if self.device == "gpu":
            device = f"cuda:{self.device_id}"
        elif self.device == "npu":
            device = f"{self.device}:{self.device_id}"
        else:
            device = "cpu"
        x = self.x
        M = x.shape[0]
        N = self.weight.shape[-1]
        weight = self.weight
        scaleOptional = self.scaleOptional
        scale_in = scaleOptional.cpu()
        scale_in = scale_in.to(torch.bfloat16).to(torch.float32)
        perTokenScaleOptional = input_data.kwargs['perTokenScaleOptional'][0]
        groupListOptional = input_data.kwargs['groupListOptional']
        groupListType = input_data.kwargs['groupListType']
        x_in = x.cpu()
        weight_in = weight.cpu()
        groupList_in = groupListOptional.cpu()
        perTokenScale_in = perTokenScaleOptional.cpu()
        out_dtype = random.choice([torch.float16, torch.bfloat16])
        t1 = gmm_a4w4_golden(x_in, weight_in, scale_in, perTokenScale_in, groupList_in, out_dtype, groupListType)
        E = 0
        if groupListType == 0:
            E = groupList_in[-1]
        else:
            E = torch.cumsum(groupList_in)[-1]
        t2 = torch.zeros_like(t1)
        t2[:E,:] = t1[:E,:]

        return t2

    def cpu_benchmark(self, input_data: InputDataset, with_output: bool = True):
        if self.device == "gpu":
            device = f"cuda:{self.device_id}"
        elif self.device == "npu":
            device = f"{self.device}:{self.device_id}"
        else:
            device = "cpu"
        x = self.x
        M = x.shape[0]
        N = self.weight.shape[-1]
        weight = self.weight
        scaleOptional = self.scaleOptional
        scale_in = scaleOptional.cpu()
        scale_in = scale_in.to(torch.bfloat16).to(torch.float64)
        perTokenScaleOptional = input_data.kwargs['perTokenScaleOptional'][0]
        groupListOptional = input_data.kwargs['groupListOptional']
        groupListType = input_data.kwargs['groupListType']
        x_in = x.cpu()
        weight_in = weight.cpu()
        groupList_in = groupListOptional.cpu()
        perTokenScale_in = perTokenScaleOptional.cpu()
        out_dtype = random.choice([torch.float16, torch.bfloat16])
        t1 = gmm_a4w4_golden(x_in, weight_in, scale_in, perTokenScale_in, groupList_in, out_dtype, groupListType)
        E = 0
        if groupListType == 0:
            E = groupList_in[-1]
        else:
            E = torch.cumsum(groupList_in)[-1]
        t2 = torch.zeros_like(t1)
        t2[:E,:] = t1[:E,:]

        return t2

    def __call__(self, input_data: InputDataset, with_output: bool = True):
        scale = input_data.kwargs["scaleOptional"][0]
        if scale.dtype == torch.float64:
            return self.cpu_benchmark(input_data, with_output)
        else:
            return self.cpu(input_data, with_output)

@register("function_pyaclnn_grouped_matmul_V4_a4w4")
class PyaclnnGroupedMatmulV4A8W4(AclnnBaseApi):
    def __init__(self,task_result:TaskResult,backend):
        super().__init__(task_result,backend)
        self.input_args = None
        self.groupList = None
        self.groupListType = 0
    def init_by_input_data(self, input_data: InputDataset):
        self.task_result.output_info_list = [self.task_result.output_info_list]
        import ctypes
        input_args, output_packages = super().init_by_input_data(input_data)
        # x
        # weight
        input_args.insert(2, ctypes.c_void_p(0)) # bias
        input_args.insert(4, ctypes.c_void_p(0)) # offset
        input_args.insert(5, ctypes.c_void_p(0)) # antiquantscale
        input_args.insert(6, ctypes.c_void_p(0)) # antiquantoffset
        # 8 : groupList
        self.groupList = input_data.kwargs["groupListOptional"]
        self.groupListType = input_data.kwargs["groupListType"]
        input_args.insert(9, ctypes.c_void_p(0)) # activationInputOptional
        input_args.insert(10, ctypes.c_void_p(0)) # activationQuantScaleOptional
        input_args.insert(11, ctypes.c_void_p(0)) # activationQuantOffsetOptional
        input_args.insert(17, ctypes.c_void_p(0))
        input_args.insert(18, ctypes.c_void_p(0))
        return input_args, output_packages

    def after_call(self, output_packages):
        E = 0
        groupList = self.groupList
        if self.groupListType == 0:
            E = groupList[-1]
        else:
            E = torch.cumsum(groupList)[-1]
        output = []
        t1 = self.backend.acl_tensorlist_to_torch(output_packages[0])[0]
        t2 = torch.zeros_like(t1)
        t2[:E,:] = t1[:E,:]
        output.append(t2)
        return output