# Copyright 2024 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""ptq wrapper cells for mindformers."""

from mindspore import Parameter, Tensor, dtype
from mindspore.common.initializer import initializer
from mindspore import ops as msops
from mindformers.modules.layers import Linear
from mindformers.parallel_core.inference.tensor_parallel.layers import (
    ColumnParallelLinear as McoreColumnParallelLinear, RowParallelLinear as McoreRowParallelLinear)
from mindformers.parallel_core.inference.tensor_parallel.layers import QKVParallelLinear

from mindspore_gs.ptq.ptq_config import PTQMode, QuantGranularity
from mindspore_gs.ptq.context import InnerPTQConfig
from mindspore_gs.ptq.basic_quant_func import quant_tensor
from mindspore_gs.common import logger
from mindspore_gs.ptq.ptq.hal import QuantParam, AllQuantMatmul, ParallelType, KernelType, OutlierSuppressionPlusSmoothMatmul
from mindspore_gs.ptq.ptq.algorithms.quantizer import Quantizer
from mindspore_gs.ptq.ptq.wrapper_cell import Checker
from .parallel_minmax import get_min_max_op
from .linear_weight_quant_wrappers import WeightQuantLinearCell
from .linear_wrapper import LinearInferCell
from .mcore_linear_wrapper import McoreLinearInferCell
from mindspore_gs.ptq.ptq.hal import MyV3Matmul
import numpy as np

def fake_quant_tensor_4bit(weight_np: np.ndarray):
    weight_np = weight_np.astype(np.float32) 
    max_abs = np.max(np.abs(weight_np))
    if max_abs < 1e-6:
        max_abs = 1e-6

    qmax = 7.0
    scale = max_abs / qmax

    q = np.round(weight_np / scale)
    q = np.clip(q, -qmax, qmax)

    q_int4 = q.astype(np.int8)

    zero_point = np.array(0.0, dtype=np.float64)

    return scale, zero_point, q_int4

def pack_int4_to_qint4x2(q_int4_nd: np.ndarray) -> np.ndarray:

    orig_shape = q_int4_nd.shape
    last_dim = orig_shape[-1]

    if last_dim % 2 != 0:
        pad_width = [(0, 0)] * q_int4_nd.ndim
        pad_width[-1] = (0, 1)
        q_int4_nd = np.pad(q_int4_nd, pad_width, mode="constant", constant_values=0)
        last_dim = q_int4_nd.shape[-1]

    new_shape = q_int4_nd.shape[:-1] + (last_dim // 2, 2)
    q2 = q_int4_nd.reshape(new_shape)   


    low  = (q2[..., 0] & 0x0F).astype(np.uint8)
    high = (q2[..., 1] & 0x0F).astype(np.uint8)

    packed = (low | (high << 4)).astype(np.uint8)  

    return packed


class AllQuantLinearCell(WeightQuantLinearCell):
    """QuantLinearCell"""

    @staticmethod
    def reg_self():
        """reg_self"""
        class A8W8Checker(Checker):
            def check(self, config: InnerPTQConfig):
                return config.weight_quant_dtype == dtype.int8 and config.act_quant_dtype == dtype.int8 and \
                       config.act_quant_granularity is QuantGranularity.PER_TENSOR

        Quantizer.reg_layer_map(Linear, AllQuantLinearCell, A8W8Checker())
        Quantizer.reg_layer_map(McoreColumnParallelLinear, AllQuantLinearCell, A8W8Checker())
        Quantizer.reg_layer_map(McoreRowParallelLinear, AllQuantLinearCell, A8W8Checker())
        Quantizer.reg_layer_map(QKVParallelLinear, AllQuantLinearCell, A8W8Checker())
        try:
            from research.deepseek3.moe import (ColumnParallelGroupLinear, RowParallelGroupLinear,
                                                ColumnParallelLinearWorldRegion, RowParallelLinearWorldRegion)
            from research.deepseek3.infer.layers import ColumnParallelLinear as DSColumnParallelLinear
            from research.deepseek3.infer.layers import RowParallelLinear as DSRowParallelLinear
            from research.llama3_1.infer.layers import ColumnParallelLinear as LlamaColumnParallelLinear
            from research.llama3_1.infer.layers import RowParallelLinear as LlamaRowParallelLinear
            from research.telechat2.infer.layers import ColumnParallelLinear as TC2ColumnParallelLinear
            from research.telechat2.infer.layers import RowParallelLinear as TC2RowParallelLinear
            Quantizer.reg_layer_map(TC2ColumnParallelLinear, AllQuantLinearCell, A8W8Checker())
            Quantizer.reg_layer_map(TC2RowParallelLinear, AllQuantLinearCell, A8W8Checker())
            Quantizer.reg_layer_map(LlamaColumnParallelLinear, AllQuantLinearCell, A8W8Checker())
            Quantizer.reg_layer_map(LlamaRowParallelLinear, AllQuantLinearCell, A8W8Checker())
            Quantizer.reg_layer_map(DSColumnParallelLinear, AllQuantLinearCell, A8W8Checker())
            Quantizer.reg_layer_map(DSRowParallelLinear, AllQuantLinearCell, A8W8Checker())
            Quantizer.reg_layer_map(ColumnParallelGroupLinear, AllQuantLinearCell, A8W8Checker())
            Quantizer.reg_layer_map(RowParallelGroupLinear, AllQuantLinearCell, A8W8Checker())
            Quantizer.reg_layer_map(ColumnParallelLinearWorldRegion, AllQuantLinearCell, A8W8Checker())
            Quantizer.reg_layer_map(RowParallelLinearWorldRegion, AllQuantLinearCell, A8W8Checker())
        except ImportError:
            pass

    def __init__(self, linear_name, linear, context, cfg: InnerPTQConfig, **kwargs):
        super().__init__(linear_name, linear, context, cfg, **kwargs)

        is_rowparallel = self.parallel_type == ParallelType.ROW_PARALLEL
        self.x_quant_max, self.x_quant_min = get_min_max_op(cfg.tp_size, is_rowparallel)

        self.x_scale = Parameter(initializer('ones', (1,), dtype=dtype.float64))
        self.x_zp = Parameter(initializer('zeros', (1,), dtype=dtype.float64))

    def _quant_info(self):
        res = super()._quant_info()
        if self.cfg.act_quant_dtype == dtype.int8:
            return f'{res}-A8-{str(self.cfg.act_quant_granularity)}'
        raise RuntimeError(f"Unexpected act_quant_dtype: {self.cfg.act_quant_dtype}.")

    def quant(self):
        """quant"""
        # quant weight
        super().quant()
        # quant activation
        x_scale, x_zp, _ = quant_tensor(self.cat_samples, self.x_quant_min, self.x_quant_max,
                                        self.cfg.act_narrow_range, self.cfg.act_symmetric,
                                        self.cfg.act_quant_granularity == QuantGranularity.PER_GROUP,
                                        self.cfg.group_size,
                                        self.cfg.act_quant_dtype, -1, False)
        self.x_scale.set_data(Tensor(x_scale, dtype=dtype.float64))
        self.x_zp.set_data(Tensor(x_zp, dtype=dtype.float64))
        self.cfg.dumper.dump_data(self.layer_name, "|activation_params|input0_activation_inputs", self.cat_samples)
        self.cfg.dumper.dump_data(self.layer_name, "|activation_params|output0_activation_scale", self.x_scale)
        self.cfg.dumper.dump_data(self.layer_name, "|activation_params|output1_activation_zp", self.x_zp)

    def deploy(self):

        w1_np = self.layer.weight1.asnumpy()
        s1, zp1, q_w1 = fake_quant_tensor_4bit(w1_np)

        w2_np = self.layer.weight2.asnumpy()
        s2, zp2, q_w2 = fake_quant_tensor_4bit(w2_np)

        q_w1 = pack_int4_to_qint4x2(q_w1)  # dtype: uint8
        q_w2 = pack_int4_to_qint4x2(q_w2)

        self.q_weight_1.set_data(Tensor(q_w1, dtype=dtype.qint4x2))
        self.q_weight_2.set_data(Tensor(q_w2, dtype=dtype.qint4x2))

        # 为了后面 QuantParam 能用，scale/zp 也存一下
        self.w1_scale = Tensor(np.array([s1], dtype=np.float32), dtype=dtype.float32)
        self.w1_zp    = Tensor(np.array([zp1], dtype=np.float32), dtype=dtype.bfloat16)
        self.w2_scale = Tensor(np.array([s2], dtype=np.float32), dtype=dtype.float32)
        self.w2_zp    = Tensor(np.array([zp2], dtype=np.float32), dtype=dtype.bfloat16)

        if self.is_mcorelinear and hasattr(self.layer, "weight1") and hasattr(self.layer, "weight2"):
            print("self._layer_name",self._layer_name)
            print("self.q_weight_1: ",self.q_weight_1.dtype,self.q_weight_1.shape,self.q_weight_1.data)
            print("self.q_weight_2: ",self.q_weight_2.dtype,self.q_weight_2.shape)
            return AllQuantMcoreLinearInferCell(self._layer_name, self.layer, self.cfg, 
                                                self.q_weight_1,  self.q_weight_2,  
                                                QuantParam(self.x_scale, self.x_zp),
                                                QuantParam(self.w1_scale, self.w1_zp), 
                                                QuantParam(self.w2_scale, self.w2_zp),  
                                                self.parallel_type)
        if ("output" in self._layer_name):
            print("ouyput here")
            return AllQuantLinearInferCell(self._layer_name, self.layer, self.cfg,
                                       self.q_weight, QuantParam(self.x_scale, self.x_zp),
                                       QuantParam(self.w_scale, self.w_zp), self.compute_type,
                                       self.parallel_type)
            

        return AllQuantLinearInferCell(self._layer_name, self.layer, self.cfg,
                                       self.q_weight, QuantParam(self.x_scale, self.x_zp),
                                       QuantParam(self.w_scale, self.w_zp), self.compute_type,
                                       self.parallel_type)


class AllQuantLinearInferCell(LinearInferCell):
    """AllQuantLinearInferCell"""

    def __init__(self, layer_name, linear: Linear, cfg: InnerPTQConfig, q_weight, x_qparam: QuantParam,
                 w_qparam: QuantParam, compute_type, parallel_type: ParallelType):
        super().__init__(linear, parallel_type)
        self.cfg = cfg
        is_deploy = cfg.mode == PTQMode.DEPLOY
        use_aclnn_quant = any(opname in layer_name for opname in cfg.aclnn_quant_list)
        bias_osp = None
        if isinstance(self.layer.matmul, OutlierSuppressionPlusSmoothMatmul):
            origin_weight = msops.mul(self._layer.weight, linear.matmul.smooth_scale)
            bias_osp = msops.matmul(
                msops.expand_dims(-linear.matmul.beta_osp, 0),
                (
                    origin_weight.astype("float32").transpose()
                    if self._layer.transpose_b
                    else self._layer.weight.astype("float32")
                ),
            )
            bias_osp = bias_osp.squeeze()
        quant, qmm = AllQuantMatmul.create(layer_name, linear, parallel_type, q_weight, x_qparam, w_qparam, is_deploy,
                                           cfg.tp_size, compute_type,
                                           KernelType.ACLNN if use_aclnn_quant else KernelType.INTERNAL, bias_osp)
        if not is_deploy:
            logger.debug(f"AllQuantLinearInferCell: x_qparam of Layer({parallel_type}:{layer_name}) is {x_qparam}")
            logger.debug(f"AllQuantLinearInferCell: w_qparam of Layer({parallel_type}:{layer_name}) is {w_qparam}")
            logger.debug(f"AllQuantLinearInferCell: q_weight of Layer({parallel_type}:{layer_name}) is "
                         f"{{{q_weight.shape}, {q_weight.dtype}}}")
        self._set_act_quant(quant)
        self.layer.matmul = qmm
        self.layer.weight = q_weight


# class AllQuantMcoreLinearInferCell(McoreLinearInferCell):
#     """AllQuantLinearInferCell"""

#     def __init__(self, layer_name, linear: Linear, cfg: InnerPTQConfig, q_weight, x_qparam: QuantParam,
#                  w_qparam: QuantParam, compute_type, parallel_type: ParallelType):
#         super().__init__(linear, parallel_type)
#         self.cfg = cfg
#         is_deploy = cfg.mode == PTQMode.DEPLOY
        # use_aclnn_quant = any(opname in layer_name for opname in cfg.aclnn_quant_list)
        # bias_osp = None
        # if isinstance(self.layer.matmul, OutlierSuppressionPlusSmoothMatmul):
        #     origin_weight = msops.mul(self._layer.weight, linear.matmul.smooth_scale)
        #     bias_osp = msops.matmul(
        #         msops.expand_dims(-linear.matmul.beta_osp, 0),
        #         (
        #             origin_weight.astype("float32").transpose()
        #             if self._layer.transpose_b
        #             else self._layer.weight.astype("float32")
        #         ),
        #     )
        #     bias_osp = bias_osp.squeeze()
        # quant, qmm = AllQuantMatmul.create(layer_name, linear, parallel_type, q_weight, x_qparam, w_qparam, is_deploy,
        #                                    cfg.tp_size, compute_type,
        #                                    KernelType.ACLNN if use_aclnn_quant else KernelType.INTERNAL, bias_osp)

        
        # if not is_deploy:
        #     logger.debug(f"AllQuantLinearInferCell: x_qparam of Layer({parallel_type}:{layer_name}) is {x_qparam}")
        #     logger.debug(f"AllQuantLinearInferCell: w_qparam of Layer({parallel_type}:{layer_name}) is {w_qparam}")
        #     logger.debug(f"AllQuantLinearInferCell: q_weight of Layer({parallel_type}:{layer_name}) is "
        #                  f"{{{q_weight.shape}, {q_weight.dtype}}}")
        # self._set_act_quant(quant)
        # self.layer.matmul = qmm
        # self.layer.weight = q_weight

class AllQuantMcoreLinearInferCell(McoreLinearInferCell):
    """AllQuantLinearInferCell with dual weights"""
    
    def __init__(self, layer_name, linear: Linear, cfg: InnerPTQConfig, 
                 q_weight1, q_weight2,  
                 x_qparam: QuantParam, 
                 w1_qparam: QuantParam, 
                 w2_qparam: QuantParam, 
                #  compute_type, 
                 parallel_type: ParallelType):
        
        super().__init__(linear, parallel_type)
        self.cfg = cfg

        w1_scale_param = w1_qparam.scale
        w1_zp_param = w1_qparam.zero_point
        w2_scale_param = w2_qparam.scale  
        w2_zp_param = w2_qparam.zero_point
        
        is_deploy = cfg.mode == PTQMode.DEPLOY
        
        self.layer.matmul1 = MyV3Matmul(
            w_like=(q_weight1, w1_scale_param, w1_zp_param),  # 使用第一个权重和参数
            transpose_a=False, 
            transpose_b=False, 
            # dst_type=compute_type
        )

        self.layer.matmul2 = MyV3Matmul(
            w_like=(q_weight2, w2_scale_param, w2_zp_param),  # 使用第二个权重和参数
            transpose_a=False, 
            transpose_b=False, 
            # dst_type=compute_type
        )
        
        self.w1_scale = w1_scale_param
        self.w1_zp = w1_zp_param
        self.w2_scale = w2_scale_param  
        self.w2_zp = w2_zp_param
        self.q_weight1 = q_weight1
        self.q_weight2 = q_weight2

