from mindspore import dtype, Tensor, ops, nn
from mindspore.nn import SequentialCell
from mindspore import dtype as mstype
from mindformers.modules.layers import Linear
from mindspore_gs.ptq.ptq_config import QuantGranularity
from mindspore_gs.ptq.context import InnerPTQConfig
from mindspore_gs.ptq.ptq.algorithms.quantizer import Quantizer
from mindspore_gs.ptq.ptq.wrapper_cell import Checker, WrapperCell
from mindspore_gs.ptq.ptq.wrappers.mindformers.linear_all_quant_wrappers import (
    AllQuantLinearCell, AllQuantLinearInferCell
)
import pdb
class AllQuantLowRankCell(WrapperCell):
    @staticmethod
    def reg_self():
        class A8W8Checker(Checker):
            def check(self, cfg: InnerPTQConfig):
                return (
                    cfg.weight_quant_dtype == dtype.int8 and
                    cfg.act_quant_dtype    == dtype.int8 and
                    cfg.act_quant_granularity is QuantGranularity.PER_TENSOR
                )
        Quantizer.reg_layer_map(SequentialCell, AllQuantLowRankCell, A8W8Checker())

    def __init__(self, layer_name: str, seq: SequentialCell,
                 cfg: InnerPTQConfig, network_helper, **kwargs):
        super().__init__(layer_name, seq, cfg, network_helper, **kwargs)
        self._orig = seq

        cells = getattr(seq, "cell_list", None)
        if cells is None:
            cells = [c for _, c in seq.cells_and_names()]

        if len(cells) >= 2:
            self._skip = False
            self.quant0 = AllQuantLinearCell(f"{layer_name}_0", cells[0],
                                              cfg, network_helper, **kwargs)
            self.quant1 = AllQuantLinearCell(f"{layer_name}_1", cells[1],
                                              cfg, network_helper, **kwargs)
            # 保留并行/计算类型以便 deploy 时复用
            self.parallel_type = getattr(self.quant0, "parallel_type", None)
            self.compute_type  = getattr(self.quant0, "compute_type",  None)
        else:
            self._skip = True

    def construct(self, x):
        if self._skip:
            return self._orig(x)
        x = self.quant0(x)
        return self.quant1(x)

    def quant(self):
        if not self._skip:
            self.quant0.quant()
            self.quant1.quant()

    def deploy(self):
        if self._skip:
            return self._orig
        inf0 = self.quant0.deploy()
        inf1 = self.quant1.deploy()
        print(">>> inf1 attrs:", dir(inf1))
        print(">>> inf1.layer attrs:", dir(inf1.layer))
        pdb.set_trace()
        return AllQuantLowRankInferCell(self.layer_name, inf0, inf1)

    def add_hook(self):
        if getattr(self, "_skip", False):
            return
        self.quant0.add_hook()
        self.quant1.add_hook()

    def remove_hook(self):
        if getattr(self, "_skip", False):
            return
        self.quant0.remove_hook()
        self.quant1.remove_hook()

    def process(self):
        if getattr(self, "_skip", False):
            return
        self.quant0.process()
        self.quant1.process()


class AllQuantLowRankInferCell(WrapperCell):
    def __init__(self,
                 layer_name: str,
                 q0: AllQuantLinearInferCell,
                 q1: AllQuantLinearInferCell):
        # 调用父类构造，不需要原始 seq，后面直接用 inf0/inf1
        super().__init__(layer_name, None, None, None)

        # 第一次完整量化段
        self.inf0 = q0.deploy()

        # 第二次量化段，拆出内部算子
        inf1 = q1.deploy()
        self.inf1 = inf1

        # 按照 dir 输出选出的算子和 bias
        self.act_quant_op = inf1.layer.activation   # float→int8
        self.matmul_op    = inf1.layer.matmul       # int8×int8→int8
        self.bias_add     = inf1.layer.bias_add     # int8 bias 加法
        self.dequant_op   = inf1.layer.to_float     # int8→float

    def construct(self, x):
        # —— 第一次量化 MatMul ——  
        x_mid = self.inf0(x)  

        # —— 第二次量化 MatMul ——  
        x_q  = self.act_quant_op(x_mid)            # 1) 激活量化
        y_q  = self.matmul_op(x_q)                 # 2) MatMul
        y_q  = self.bias_add(y_q, self.inf1.layer.bias)  # 3) 加 bias
        y    = self.dequant_op(y_q)                # 4) 反量化
        return y