# Copyright (c) Huawei Technologies Co., Ltd. 2023. All rights reserved.
# executor_function_ascend_grouped_matmul_add.py
import torch
from atk.configs.dataset_config import InputDataset
from atk.configs.results_config import TaskResult
from atk.tasks.api_execute import register
from atk.tasks.api_execute.base_api import BaseApi
from atk.tasks.api_execute.aclnn_base_api import AclnnBaseApi
from atk.tasks.dataset.base_dataset import OpsDataset


def postprocess(c_in, a, antiquant_scale, antiquant_offset):
    assert c_in.dtype == np.int32, c_in.dtype
    M = a.shape[0]
    c_f32 = c_in.astype(np.float32)
    a_max = np.max(abs(a), axis=1, keepdims=True)
    a_sum = np.sum(a, axis=1, keepdims=True)
    c_tmp = np.matmul(a_sum, antiquant_offset)
    c = (c_f32[:M] + c_f32[M:] / 254) * (a_max / 127)
    c = (c + c_tmp) * antiquant_scale
    return c


@register("ascend_function_grouped_matmul")
class MethodTorchGroupedMatmulApi(BaseApi):
    def __init__(self, task_result: TaskResult):
        super(MethodTorchGroupedMatmulApi, self).__init__(task_result)
    # def get_cpp_func_signature_type(self):
    #    return "aclnnStatus aclnnGroupedMatmulV4GetWorkspaceSize(const aclTensorList *x, const aclTensorList *weight, const aclTensorList *biasOptional, \
    #         const aclTensorList *scaleOptional, const aclTensorList *offsetOptional, \
    #         const aclTensorList *antiquantScaleOptional, const aclTensorList *antiquantOffsetOptional, \
    #         const aclTensorList *perTokenScaleOptional, const aclTensor *groupListOptional, \
    #         const aclTensorList *activationInputOptional, const aclTensorList *activationQuantScaleOptional, \
    #         const aclTensorList *activationQuantOffsetOptional,  int64_t splitItem, int64_t groupType, \
    #         int64_t groupListType, int64_t actType, aclTensorList *out, aclTensorList *activationFeatureOutOptional, \
    #         aclTensorList *dynQuantScaleOutOptional, uint64_t *workspaceSize, \
    #         aclOpExecutor **executor)"
    def __call__(self, input_data: InputDataset, with_output: bool = True):
        x = input_data.kwargs["x"][0]
        weight = input_data.kwargs["weight"][0]
        antiquantScale = input_data.kwargs["antiquantScaleOptional"][0]
        groupList = input_data.kwargs["groupList"]
        
        bias = torch.tensor([])
        antiquantOffset = input_data.kwargs["antiquantOffsetOptional"][0]
        hasBias = False
        hasAntiquantOffset = True
        if not input_data.kwargs["biasOptional"]:
            hasBias = False
        else:
            hasBias = True
            bias = input_data.kwargs["biasOptional"][0]
        E = groupList.shape[0]


        param_group_list = groupList
    #if quant_flag == 1:  # 伪量化
        print("antiScale_input.shape", antiquantScale.shape)
        print("antiOffset_input.shape", antiquantOffset.shape)
        antiS = np.split(antiquantScale, E, axis=0)
        antiO = np.split(antiquantOffset, E, axis=0)
        #Pergroup为3维，再拆一次
        if antiquantScale.ndim > 2:
            antiS = [s[0] for s in antiS]
            antiO = [s[0] for s in antiO]
            print("Pergroup")
        for i in range(len(param_group_list)):
            print(f"---slice antiS[{i}].shape", antiS[i].shape)
            print(f"---slice antiO[{i}].shape", antiO[i].shape)

    #if groupType == 0:
        # M分组
        Xs = []
        Ws = []
        Bs = []
        print("----M type")
        offset = 0
        for i in range(len(param_group_list)):
            Xs.append(X_input[offset: param_group_list[i], :])
            Ws.append(W_input[i])
            offset = param_group_list[i]
            print(f"---slice Xs{i}.shape", Xs[i].shape)

        print("---slice Ws[0].shape", Ws[0].shape)

        output = [None] * len(Xs)
        print("---- A16W8 Quant Branch")
        W = [None] * len(Ws)
        B = [None] * len(Bs)
        X = [None] * len(Xs)
        antiscale = [None] * len(Xs)
        antioffset = [None] * len(Xs)
        Ds_torch = [None] * len(Xs)


        res = torch.empty(0, n, dtype=torch.float32)
        print("---MSD")
        for i in range(len(Xs)):
            X[i] = Xs[i].astype(np.float32)
            W[i] = Ws[i].astype(np.int32)
            antiscale[i] = antiS[i].astype(np.float32)
            antioffset[i] = antiO[i].astype(np.float32)
            dim_M = X[i].shape[0]
            dim_N = antiscale[i].shape[-1]
            antioffset[i] = antioffset[i].reshape((1, dim_N))
            antiscale_i = np.broadcast_to(antiscale[i], [dim_M, dim_N])
            A_i = preprocess(X[i])
            c_in = np.matmul(A_i.astype(np.int32), W[i])
            Ds_torch[i] = postprocess(c_in, X[i], antiscale_i, antioffset[i])
            if hasBias == 1:
                B[i] = Bs[i].astype(np.float32)
                Ds_torch[i] = Ds_torch[i] + B[i]
            Ds_torch[i] = torch.from_numpy(Ds_torch[i])
            # if input_dtype == 'bf16':
            #     output[i] = Ds_torch[i].to(torch.bfloat16)
            # else:
            output[i] = Ds_torch[i].half()
            res = torch.cat([res, Ds_torch[i]], dim=0)
        return res
    
    def init_by_input_data(self, input_data: InputDataset, with_output: bool = False):
        x = input_data.kwargs["x"][0]
        groupList = input_data.kwargs["groupList"]
        if self.device == 'pyaclnn':
            input_data.kwargs['groupList'] = torch.linspace(x.shape[0] // groupList.shape[0], x.shape[0], steps=int(groupList.size(0))).round().to(torch.int64).npu()
        else:
            input_data.kwargs['groupList'] = torch.linspace(x.shape[0] // groupList.shape[0], x.shape[0], steps=int(groupList.size(0))).round().to(torch.int64).cpu()


@register("aclnn_grouped_matmul")
class AclnnFunctionApi(AclnnBaseApi):
    def __call__(self):
        super().__call__()

    def init_by_input_data(self, input_data):
        import ctypes
        input_args, output_pac = super().init_by_input_data(input_data)
        # input_args.insert(2, ctypes.c_void_p(0)) # bias
        # input_args.insert(3, ctypes.c_void_p(0)) # scale
        input_args.insert(4, ctypes.c_void_p(0)) # offset
        input_args.insert(5, ctypes.c_void_p(0)) # antiquantscale
        input_args.insert(6, ctypes.c_void_p(0)) # antiquantoffset
        # input_args.insert(7, ctypes.c_void_p(0)) # antiquantOffset
        # 8 : groupList
        input_args.insert(9, ctypes.c_void_p(0)) # activationInputOptional
        input_args.insert(10, ctypes.c_void_p(0)) # activationQuantScaleOptional
        input_args.insert(11, ctypes.c_void_p(0)) # activationQuantOffsetOptional
        input_args[12] = ctypes.c_long(2) # split item = 2
        # input_args.insert(15, ctypes.c_long(0)) # actType
        # nullptr output
        input_args.insert(17, ctypes.c_void_p(0))
        input_args.insert(18, ctypes.c_void_p(0))

        return input_args, output_pac

    def after_call(self, output_packages):
        output = super().after_call(output_packages)
        return output