import functools
import re

import torch
import torch.nn as nn
from ..utils.model_utils import TransformerStructurePairVisitor
from ..utils.flat_linear import FlatQuantizedLinear, FlatNormWrapper, FakeQuantizedLinearConfig, FakeQuantizedLinear
from ..utils.model_utils import get_module_by_name, set_module_by_name
from ..utils.model_utils import StructurePair, AttnNormLinearPair, AttnLinearLinearPair, MLPNormLinearPair, MLPLinearLinearPair, ModelStructureBridge
from ..utils.trans_utils import GeneralMatrixTrans
from ..utils.function_utils import get_decompose_dim, get_init_scale
from ..processors.quantizer_manager import QuantizerMapper

act_stats = {}

def stat_input_hook(m, x, y, name, act_stats):
    if isinstance(x, tuple):
        x = x[0]
    stat_tensor(act_stats, name, x)


def stat_tensor(act_stats, name, x):
    if 'input_max' not in act_stats[name]:
        act_stats[name]['input_max'] = x.view(-1, x.shape[-1]).abs().max(0)[0].clone().detach().cpu()
    else:
        act_stats[name]['input_max'] = torch.maximum(act_stats[name]['input_max'], x.view(-1, x.shape[-1]).abs().max(0)[0].clone().detach().cpu())

class FakeQuantizerVisitor(TransformerStructurePairVisitor): 
    def __init__(self, model, config: FakeQuantizedLinearConfig):
        self.model = model
        self.config = config
        self.quantizer_dict = {}
        self.linear_quantizer = FakeQuantizedLinear
        self.act_stats = {}
        self.hooks = {}

    def register_forward_hook(self, mod, name):
        self.act_stats[name] = {}
        self.act_stats[name]['input_max'] = torch.full([mod.weight.shape[1]], 1e-5, dtype=mod.weight.dtype)
        self.hooks[name] = mod.register_forward_hook(functools.partial(stat_input_hook, name=name, act_stats=self.act_stats))

    def remove_forward_hook(self, prefix: str=""):
        for name, hook in self.hooks.items():
            if prefix and not name.startswith(prefix):
                continue
            hook.remove()

    def set_quant_config(self, config: FakeQuantizedLinearConfig):
        self.config = config

    def _visit_linear_pair(self, pair: StructurePair):
        """访问norm和线性层结构对"""
        linear_names = pair.target_modules
        for linear_name in linear_names:
            linear_module = get_module_by_name(self.model, linear_name)
            flat_linear = self.linear_quantizer(self.config, linear_module)
            self.register_forward_hook(flat_linear, linear_name)
            set_module_by_name(self.model, linear_name, flat_linear)
            self.quantizer_dict[linear_name] = flat_linear

    def visit_attn_norm_linear_pair(self, pair: AttnNormLinearPair):
        """访问注意力层norm和线性层结构对"""
        self._visit_linear_pair(pair)

    def visit_attn_linear_linear_pair(self, pair: AttnLinearLinearPair):
        """访问注意力层线性层和线性层结构对"""
        self._visit_linear_pair(pair)

    def visit_mlp_norm_linear_pair(self, pair: MLPNormLinearPair):
        """访问MLP层norm和线性层结构对"""
        self._visit_linear_pair(pair)

    def visit_mlp_linear_linear_pair(self, pair: MLPLinearLinearPair):
        """访问MLP层线性层和线性层结构对"""
        self._visit_linear_pair(pair)

    def to_org_mode(self, prefix: str=""):
        """设置模型为原始模式"""
        for name, quantizer in self.quantizer_dict.items():
            if prefix and not name.startswith(prefix):
                continue
            quantizer.to_org_mode()

    def to_calib_mode(self, prefix: str=""):
        """设置模型为训练模式"""
        self.remove_forward_hook(prefix)
        for name, quantizer in self.quantizer_dict.items():
            if prefix and not name.startswith(prefix):
                continue
            quantizer.to_calib_mode()

    def fake_quant_weight(self, prefix: str=""):
        """量化权重"""
        for name, quantizer in self.quantizer_dict.items():
            if prefix and not name.startswith(prefix):
                continue
            quantizer.fake_quant_weight()

    def to_eval_mode(self, prefix: str="", quant_weight=True):
        """设置模型为评估模式"""
        self.remove_forward_hook(prefix)
        for name, quantizer in self.quantizer_dict.items():
            if prefix and not name.startswith(prefix):
                continue
            quantizer.to_eval_mode()
        if quant_weight:
            self.fake_quant_weight(prefix=prefix)

class FlatQuantQuantizerMapVisitor(FakeQuantizerVisitor):
    def __init__(self, model, config, add_diag=True, diag_alpha=0.5, diag_relu=False):
        super(FlatQuantQuantizerMapVisitor, self).__init__(model, config)
        self.decompose_trans_dict = {}
        self.norm_dict = {}
        self.add_diag = add_diag
        self.linear_quantizer = FlatQuantizedLinear
        self.diag_alpha = diag_alpha
        self.diag_relu = diag_relu

    def _visit_norm_linear_pair(self, pair: StructurePair):
        """访问norm和线性层结构对"""
        norm_name = pair.source_modules
        norm_module = get_module_by_name(self.model, norm_name)

        ln_dim_left, ln_dim_right = get_decompose_dim(norm_module.weight.shape[0])

        ln_trans = GeneralMatrixTrans(ln_dim_left, ln_dim_right, add_diag=self.add_diag, diag_relu=self.diag_relu)

        self.decompose_trans_dict[pair] = ln_trans
        flat_norm = FlatNormWrapper(norm_module, ln_trans)
        set_module_by_name(self.model, norm_name, flat_norm)
        self.norm_dict[norm_name] = flat_norm
        linear_names = pair.target_modules
        for linear_name in linear_names:
            linear_module = get_module_by_name(self.model, linear_name)
            linear_module.set_trans(weight_in_trans=ln_trans)

    """FlatQuant量化器访问者"""
    def visit_attn_norm_linear_pair(self, pair: AttnNormLinearPair):
        """访问注意力层norm和线性层结构对"""
        super(FlatQuantQuantizerMapVisitor, self).visit_attn_norm_linear_pair(pair)
        self._visit_norm_linear_pair(pair)

    def visit_attn_linear_linear_pair(self, pair: AttnLinearLinearPair):
        """访问注意力层线性层和线性层结构对"""
        super(FlatQuantQuantizerMapVisitor, self).visit_attn_linear_linear_pair(pair)
        config = pair.config
        pre_linear_name = pair.source_modules
        pre_linear_module = get_module_by_name(self.model, pre_linear_name)
        head_dim = config.hidden_size // config.num_attention_heads
        trans = GeneralMatrixTrans(config.num_attention_heads, head_dim, add_diag=False, diag_relu=self.diag_relu)
        pre_linear_module.set_trans(weight_out_trans=trans.right_trans)
        self.decompose_trans_dict[pair] = trans

        for linear_name in pair.target_modules:
            linear_module = get_module_by_name(self.model, linear_name)
            linear_module.set_trans(weight_in_trans=trans, act_in_trans=trans.left_trans)

    def visit_mlp_norm_linear_pair(self, pair: MLPNormLinearPair):
        """访问MLP层norm和线性层结构对"""
        super(FlatQuantQuantizerMapVisitor, self).visit_mlp_norm_linear_pair(pair)
        self._visit_norm_linear_pair(pair)

    def visit_mlp_linear_linear_pair(self, pair: MLPLinearLinearPair):
        """访问MLP层线性层和线性层结构对"""
        super(FlatQuantQuantizerMapVisitor, self).visit_mlp_linear_linear_pair(pair)
        pre_linear_name = pair.source_modules
        pre_linear_module = get_module_by_name(self.model, pre_linear_name)
        pre_dim_left, pre_dim_right = get_decompose_dim(pre_linear_module.linear.weight.shape[0])
        linear_trans = GeneralMatrixTrans(pre_dim_left, pre_dim_right, add_diag=self.add_diag, diag_relu=self.diag_relu)
        self.decompose_trans_dict[pair] = linear_trans

        for linear_name in pair.target_modules:
            linear_module = get_module_by_name(self.model, linear_name)
            linear_module.set_trans(weight_in_trans=linear_trans, act_in_trans=linear_trans)


    def _init_diag_scale(self, prefix: str="", diag_alpha: float=0.5):
        """初始化对角线尺度"""
        for pair, trans in self.decompose_trans_dict.items():
            if trans.diag_trans is None:
                continue
            pre_linear_name = pair.source_modules
            post_linear_names = pair.target_modules
            if prefix and not pre_linear_name.startswith(prefix):
                continue
            weights = []
            for linear_name in post_linear_names:
                linear_module = get_module_by_name(self.model, linear_name)
                weights.append(linear_module.weight)
                input_max = self.act_stats[linear_name].get('input_max', None)
            if input_max is None:
                input_max = torch.full([linear_module.weight.shape[1]], 1e-5, dtype=linear_module.weight.dtype)
            weights_max = torch.cat(weights, dim=0).abs().max(dim=0)[0]
            weights_max = weights_max.to(trans.diag_trans.diag_scale)
            input_max = input_max.to(trans.diag_trans.diag_scale)
            trans.diag_trans.diag_scale.data = get_init_scale(weights_max, input_max, diag_alpha)


    def to_org_mode(self, prefix: str=""):
        """设置模型为原始模式"""
        super(FlatQuantQuantizerMapVisitor, self).to_org_mode(prefix)
        for name, quantizer in self.norm_dict.items():
            if prefix and not name.startswith(prefix):
                continue
            quantizer.to_org_mode()


    def to_calib_mode(self, prefix: str=""):
        """设置模型为校准模式"""
        super(FlatQuantQuantizerMapVisitor, self).to_calib_mode(prefix)
        for name, quantizer in self.norm_dict.items():
            if prefix and not name.startswith(prefix):
                continue
            quantizer.to_calib_mode()   
        self._init_diag_scale(prefix, diag_alpha=self.diag_alpha)


    def to_eval_mode(self, prefix: str="", quant_weight=True):
        """设置模型为评估模式"""
        super(FlatQuantQuantizerMapVisitor, self).to_eval_mode(prefix, quant_weight=False)
        for name, quantizer in self.norm_dict.items():
            if prefix and not name.startswith(prefix):
                continue
            quantizer.to_eval_mode()
        for pair, trans in self.decompose_trans_dict.items():
            if prefix and not str(pair).startswith(prefix):
                continue
            if trans.diag_trans is not None:
                pre_linear_name = pair.source_modules
                pre_linear_module = get_module_by_name(self.model, pre_linear_name)
                weight = pre_linear_module.weight.data
                ori_dtype = weight.dtype
                if weight.dim() == 2:
                    weight = weight.to(torch.float64) * trans.diag_trans.diag_scale.data.to(torch.float64).unsqueeze(1)
                elif weight.dim() == 1:
                    weight = weight.to(torch.float64) * trans.diag_trans.diag_scale.data.to(torch.float64)
                else:
                    raise ValueError(f"weight dim is not supported: {weight.dim()}")
                pre_linear_module.weight.data = weight.to(ori_dtype)
            trans.to_eval_mode()
        if quant_weight:
            self.fake_quant_weight(prefix=prefix)

def get_n_set_parameters_byname(model, required_names):
    params = []
    for r_name in required_names:
        for name, param in model.named_parameters():
            if name.find(r_name) > -1:
                params.append(param)
    for param in params:
        param.requires_grad = True
    return params


def get_trainable_parameters(model, base_lr=3e-5):
    """获取可训练参数"""
    params = {}
    params["linear_u"] = get_n_set_parameters_byname(model, ['linear_u'])
    params["linear_v"] = get_n_set_parameters_byname(model, ['linear_v'])
    params["linear_diag"] = get_n_set_parameters_byname(model, ['linear_diag'])
    params["diag_scale"] = get_n_set_parameters_byname(model, ['diag_scale'])
    params["clip_factor"] = get_n_set_parameters_byname(model, ['clip_factor'])
    trainable_params = [{"params": params["linear_u"], "lr": base_lr}]
    trainable_params.append({"params": params["linear_v"], "lr": base_lr})
    trainable_params.append({"params": params["linear_diag"], "lr": base_lr})
    trainable_params.append({"params": params["diag_scale"], "lr": base_lr})
    trainable_params.append({"params": params["clip_factor"], "lr": base_lr*10})
    return params, trainable_params


# def quantize_model(model_bridge, config, diag_alpha: float=0.5):
#     """量化模型"""
#     pairs_dict = model_bridge.get_structure_pairs()
#     quantizer = FlatQuantQuantizerMapVisitor(model_bridge.model, config, diag_alpha=diag_alpha)
#     total_layers = len(pairs_dict[AttnNormLinearPair.__name__])
#     for i in range(total_layers):
#         pairs_dict[AttnNormLinearPair.__name__][i].accept(quantizer)
#         pairs_dict[AttnLinearLinearPair.__name__][i].accept(quantizer)
#         pairs_dict[MLPNormLinearPair.__name__][i].accept(quantizer)
#         # pairs_dict[MLPLinearLinearPair.__name__][i].accept(quantizer)
#     return quantizer

# def quantize_model(model_bridge: ModelStructureBridge, config: FakeQuantizedLinearConfig, diag_alpha: float=0.5) -> QuantizerMapper:
#     pairs_dict = model_bridge.get_structure_pairs()
    

#     mapper = QuantizerMapper()
    
#     flat_quant_visitor = FlatQuantQuantizerMapVisitor(model_bridge.model, config, diag_alpha=diag_alpha)
    
#     # 使用正则表达式注册需要量化的模块
#     # 匹配所有layers中的qkv_proj, o_proj, gate_up_proj模块

#     attention_qkv = re.compile(r'.*layers\.\d+\..*?(qkv_proj).*')
#     attention_o = re.compile(r'.*layers\.\d+\..*?(o_proj).*')
#     mlp_gate_up = re.compile(r'.*layers\.\d+\..*?(gate_up_proj).*')

#     mapper.register_pattern(attention_qkv, flat_quant_visitor)
#     mapper.register_pattern(attention_o, flat_quant_visitor)
#     mapper.register_pattern(mlp_gate_up, flat_quant_visitor)

#     mapper.apply_quantizer(pairs_dict)
    
#     return mapper

def quantize_model(model_bridge: ModelStructureBridge, 
                   config: FakeQuantizedLinearConfig, 
                   diag_alpha: float=0.5,
                   diag_relu: bool=False) -> QuantizerMapper:
    pairs_dict = model_bridge.get_structure_pairs()
    

    mapper = QuantizerMapper()
    
    flat_quant_visitor = FlatQuantQuantizerMapVisitor(model_bridge.model, 
                                                      config, 
                                                      diag_alpha=diag_alpha,
                                                      diag_relu=diag_relu)
    total_layers = len(pairs_dict[AttnNormLinearPair.__name__])

    for i in range(total_layers):
        attention_qkv = "model.layers." + str(i) + ".self_attn.qkv_proj"
        attention_o = "model.layers." + str(i) + ".self_attn.o_proj"
        mlp_gate_up = "model.layers." + str(i) + ".mlp.gate_up_proj"
        mlp_down = "model.layers." + str(i) + ".mlp.down_proj"
        mapper.register_pattern(attention_qkv, flat_quant_visitor)
        # mapper.register_pattern(attention_o, flat_quant_visitor)
        mapper.register_pattern(mlp_gate_up, flat_quant_visitor)
        # mapper.register_pattern(mlp_down, flat_quant_visitor)
    mapper.apply_quantizer(pairs_dict)
    
    return mapper


def mix_quantize_model(model_bridge: ModelStructureBridge, config: FakeQuantizedLinearConfig, diag_alpha: float=0.5) -> QuantizerMapper:
    pairs_dict = model_bridge.get_structure_pairs()
    

    mapper = QuantizerMapper()
    
    flat_quant_visitor = FlatQuantQuantizerMapVisitor(model_bridge.model, config, diag_alpha=diag_alpha)
    mix_config = FakeQuantizedLinearConfig(w_bits=8, a_bits=8, w_asym=False, a_asym=True)
    quant_visitor = FakeQuantizerVisitor(model_bridge.model, mix_config)
    total_layers = len(pairs_dict[AttnNormLinearPair.__name__])

    for i in range(total_layers):
        attention_qkv = "model.layers." + str(i) + ".self_attn.qkv_proj"
        attention_o = "model.layers." + str(i) + ".self_attn.o_proj"
        mlp_gate_up = "model.layers." + str(i) + ".mlp.gate_up_proj"

        mapper.register_pattern(attention_qkv, flat_quant_visitor)
        mapper.register_pattern(attention_o, quant_visitor)
        mapper.register_pattern(mlp_gate_up, flat_quant_visitor)
        mapper.register_pattern(mlp_gate_up, quant_visitor)
    mapper.apply_quantizer(pairs_dict)
    
    return mapper