#!/usr/bin/env python3
# coding: utf-8
# Copyright (c) 2025 Huawei Technologies Co., Ltd.
# This file is a part of the CANN Open Software.
# Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
# Please refer to the License for details. You may not use this file except in compliance with the License.
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
# See LICENSE in the root of the software repository for the full text of the License.
# ======================================================================================================================
import torch
import torch_npu
import ctypes
import logging
import numpy as np
import random
from atk.configs.dataset_config import InputDataset
from atk.configs.results_config import TaskResult
from atk.tasks.api_execute import register
from atk.tasks.api_execute.base_api import BaseApi
from atk.tasks.dataset.base_dataset import OpsDataset
from atk.tasks.api_execute.aclnn_base_api import AclnnBaseApi
from atk.tasks.backends.lib_interface.acl_wrapper import TensorPtr
from atk.tasks.backends.lib_interface.acl_wrapper import AclFormat,AclTensorList

def gmm_a8w4_golden(x_in, weight_in, bias_in, scale_in, groupList_in, perTokenScale_in):
    weightNz = weight_in.astype(np.int8)
    groupNum = groupList_in.shape[0]
    m = x_in.shape[0]
    k = x_in.shape[1]
    n = scale_in.shape[2]

    weight = weightNz.reshape(groupNum, k, n)
    xC12 = np.concatenate([x_in.reshape(m, 1, k) // 16, (x_in.reshape(m, 1, k) & 0x0F) - 8], axis=1).reshape(m * 2, k)
    scaleUint32 = scale_in.astype(np.uint32)
    scaleUint32.dtype = np.float32
    out = non_quant_golden(xC12, weight, scaleUint32, perTokenScale_in, groupList_in, bias_in)
    return out

def non_quant_golden(x, weight, scale, perTokenScale, groupList, bias):
    groupNum, k, n = weight.shape
    quantGroupNum = scale.shape[1]
    index = np.cumsum(groupList)
    xSplit = np.split(x, index * 2, axis=0)
    perTokenScaleSplit = np.split(perTokenScale, index, axis=0)
    weightGroup = weight.reshape(groupNum, quantGroupNum, k // quantGroupNum, n).astype(np.int32)
    mmOuts = []
    if perTokenScale.dtype == np.float64:
        atomic = np.float64
    else:  
        atomic = np.float16
    for i in range(groupNum):
        xi = xSplit[i].reshape(-1, quantGroupNum, k // quantGroupNum).astype(np.int32)
        mmi = np.zeros([xi.shape[0], n], dtype=atomic)
        for j in range(quantGroupNum):
            mm = np.matmul(xi[:, j, :], weightGroup[i, j, ...])
            mm = mm.astype(scale.dtype) * scale[i, j].reshape(1, -1)
            mmi = (mmi.astype(atomic) + mm.astype(atomic)).astype(atomic)

        mmi = mmi.reshape(-1, 2, n).astype(perTokenScale.dtype)
        mmi = mmi[:, 0, :] * 16 + mmi[:, 1, :] + bias[i].reshape(1, n)
        mmi = mmi * perTokenScaleSplit[i]
        mmOuts.append(mmi)
    golden = np.concatenate(mmOuts, axis=0)
    golden_tensor = torch.from_numpy(golden)
    out_dtype = random.choice([torch.float16, torch.bfloat16])
    return golden_tensor.to(out_dtype)

@register("function_aclnn_grouped_matmul_V4_a8w4")
class AclnnGroupedMatmulV4A8W4(BaseApi):
    def init_by_input_data(self, input_data: InputDataset):
        """
        该接口可实现部门场景下api的初始化需要依赖于当前的输入数据，且不希望计入耗时，
        可以在此接口实现
        :param input_data:
        :return:
        """
        self.weight = input_data.kwargs['weight'][0].clone()
        weight = input_data.kwargs['weight'][0]
        self.scaleOptional = input_data.kwargs['scaleOptional'][0].clone()
        scaleOptional = input_data.kwargs['scaleOptional'][0]
        if self.device == "pyaclnn":
            weight_quant = torch_npu.npu_quantize(weight.to(torch.float32), torch.tensor([1.],device='npu'), None, torch.quint4x2, -1, False)
            input_data.kwargs['weight'][0] = weight_quant

            E,k,N = scaleOptional.shape[0],scaleOptional.shape[1],scaleOptional.shape[2]
            scale_np = scaleOptional.cpu().numpy()
            scaleUint32 = scale_np.astype(np.float16).astype(np.float32)
            scaleUint32.dtype = np.uint32
            scaleUint64 = np.zeros((E, k, N*2), dtype=np.uint32)
            scaleUint64[...,::2] = scaleUint32
            scaleUint64.dtype = np.int64
            scale = torch.from_numpy(scaleUint64).npu()
            input_data.kwargs['scaleOptional'][0] = scale
            input_data.kwargs['biasOptional'][0] = input_data.kwargs['biasOptional'][0].to('npu')
            input_data.kwargs['groupListOptional'] = input_data.kwargs['groupListOptional'].to('npu')
            input_data.kwargs['perTokenScaleOptional'][0] = input_data.kwargs['perTokenScaleOptional'][0].to('npu')

    def cpu(self, input_data: InputDataset, with_output: bool = False):
        if self.device == "gpu":
            device = f"cuda:{self.device_id}"
        elif self.device == "npu":
            device = f"{self.device}:{self.device_id}"
        else:
            device = "cpu"

        x = input_data.kwargs['x'][0]
        M = x.shape[0]
        weight = self.weight
        biasOptional = input_data.kwargs['biasOptional'][0]
        scaleOptional = self.scaleOptional
        E,k,N = scaleOptional.shape[0],scaleOptional.shape[1],scaleOptional.shape[2]
        scale_np = scaleOptional.cpu().numpy()
        scaleUint32 = scale_np.astype(np.float16).astype(np.float32)
        scaleUint32.dtype = np.uint32
        scaleUint64 = np.zeros((E, k, N*2), dtype=np.uint32)
        scaleUint64[...,::2] = scaleUint32
        scaleUint64.dtype = np.int64

        perTokenScaleOptional = input_data.kwargs['perTokenScaleOptional'][0]
        groupListOptional = input_data.kwargs['groupListOptional']

        x_in = x.cpu().numpy()
        weight_in = weight.cpu().numpy()
        bias_in = biasOptional.cpu().numpy()
        scale_in = scaleUint64
        groupList_in = groupListOptional.cpu().numpy()
        perTokenScale_in = perTokenScaleOptional.cpu().numpy()

        t1 = gmm_a8w4_golden(x_in=x_in, weight_in=weight_in, bias_in=bias_in,
                                 scale_in=scale_in, groupList_in=groupList_in, perTokenScale_in=perTokenScale_in)

        t2 = torch.zeros(M, N, dtype=t1.dtype, device=t1.device)
        t2[:t1.shape[0]] = t1

        return t2
    
    def cpu_benchmark(self, input_data: InputDataset, with_output: bool = False):
        if self.device == "gpu":
            device = f"cuda:{self.device_id}"
        elif self.device == "npu":
            device = f"{self.device}:{self.device_id}"
        else:
            device = "cpu"

        x = input_data.kwargs['x'][0]
        M = x.shape[0]
        weight = self.weight
        biasOptional = input_data.kwargs['biasOptional'][0].to(torch.float64)
        scaleOptional = self.scaleOptional.to(torch.float64)
        E,k,N = scaleOptional.shape[0],scaleOptional.shape[1],scaleOptional.shape[2]
        scale_np = scaleOptional.cpu().numpy()
        scaleUint32 = scale_np.astype(np.float16).astype(np.float32)
        scaleUint32.dtype = np.uint32
        scaleUint64 = np.zeros((E, k, N*2), dtype=np.uint32)
        scaleUint64[...,::2] = scaleUint32
        scaleUint64.dtype = np.int64

        perTokenScaleOptional = input_data.kwargs['perTokenScaleOptional'][0].to(torch.float64)
        groupListOptional = input_data.kwargs['groupListOptional']

        x_in = x.cpu().numpy()
        weight_in = weight.cpu().numpy()
        bias_in = biasOptional.cpu().numpy()
        scale_in = scaleUint64
        groupList_in = groupListOptional.cpu().numpy()
        perTokenScale_in = perTokenScaleOptional.cpu().numpy()

        t1 = gmm_a8w4_golden(x_in=x_in, weight_in=weight_in, bias_in=bias_in,
                                 scale_in=scale_in, groupList_in=groupList_in, perTokenScale_in=perTokenScale_in)

        t2 = torch.zeros(M, N, dtype=t1.dtype, device=t1.device)
        t2[:t1.shape[0]] = t1

        return t2
    
    def __call__(self, input_data: InputDataset, with_output: bool = False):
        scale = input_data.kwargs["scaleOptional"][0]
        if scale.dtype == torch.float64:
            return self.cpu_benchmark(input_data, with_output)
        else:
            return self.cpu(input_data, with_output)

    def get_cpp_func_signature_type(self):
        return "aclnnStatus aclnnGroupedMatmulV4GetWorkspaceSize(const aclTensorList *x, const aclTensorList *weight, \
        const aclTensorList *biasOptional, const aclTensorList *scaleOptional, const aclTensorList *offsetOptional, \
        const aclTensorList *antiquantScaleOptional, const aclTensorList *antiquantOffsetOptional, const aclTensorList *perTokenScaleOptional, \
        const aclTensor *groupListOptional, const aclTensorList *activationInputOptional, const aclTensorList *activationQuantScaleOptional,  \
        const aclTensorList *activationQuantOffsetOptional, int64_t splitItem, int64_t groupType, int64_t groupListType, int64_t actType, \
        aclTensorList *out, aclTensorList *activationFeatureOutOptional, aclTensorList *dynQuantScaleOutOptional, uint64_t *workspaceSize, \
        aclOpExecutor **executor)"

@register("function_pyaclnn_grouped_matmul_V4_a8w4")
class PyaclnnGroupedMatmulV4A8W4(AclnnBaseApi):
    def __init__(self,task_result:TaskResult,backend):
        super().__init__(task_result,backend)
        self.input_args = None

    def init_by_input_data(self, input_data: InputDataset):
        self.task_result.output_info_list = [self.task_result.output_info_list]
        input_args, output_packages = super().init_by_input_data(input_data)
        self.input_args, tmp = super().init_by_input_data(input_data)

        input_args[4] = ctypes.POINTER(AclTensorList)()
        input_args[5] = ctypes.POINTER(AclTensorList)()
        input_args[6] = ctypes.POINTER(AclTensorList)()
        input_args[9] = ctypes.POINTER(AclTensorList)()
        input_args[10] = ctypes.POINTER(AclTensorList)()
        input_args[11] = ctypes.POINTER(AclTensorList)()
        input_args.append(ctypes.POINTER(AclTensorList)())
        input_args.append(ctypes.POINTER(AclTensorList)())

        return input_args, output_packages

    def after_call(self, output_packages):
        groupListOptional = self.acl_tensor_to_torch(self.input_args[8])
        E = groupListOptional.shape[0]
        output = []
        t1 = self.backend.acl_tensorlist_to_torch(output_packages[0])[0]
        t2 = torch.zeros_like(t1)
        t2[:E,:] = t1[:E,:]
        output.append(t2)
        return output