from typing import List, Dict, Any

import torch
from torch.nn import Module
from tqdm import tqdm

class StructurePair:
    """负责管理模块对"""
    def __init__(self, sources: str, targets: List[str], prefix_name: str):
        if not isinstance(sources, str):    
            raise ValueError(f"sources must be a string, got {type(sources)}")
        if not isinstance(targets, List):
            raise ValueError(f"targets must be a list of strings, got {type(targets)}")
        self.source_modules = sources
        self.target_modules = targets
        self.name = prefix_name + '.' + self._name

    def accept(self, visitor) -> Any:
        """接受访问者模式"""
        return visitor.visit(self)
        
    def __str__(self) -> str:
        return self.name

    def __repr__(self) -> str:
        return self.name

class ModelStructureBridge:
    """模型结构桥接器基类"""
    def __init__(self, model: Module, config: dict=None):
        self.model = model
        if config:
            self.config = config
        else:
            self.config = getattr(model, 'config', None)
        self._structure_pair_registry = {} 
        self._layers_name = None

    def register_structure_pair(self, pair: type):
        """注册结构对类型"""
        if not isinstance(pair, StructurePair):
            raise TypeError(f"pair_class must be a instance of StructurePair, got {pair}")
        if pair.__class__.__name__ not in self._structure_pair_registry:
            self._structure_pair_registry[pair.__class__.__name__] = [pair]
        else:
            self._structure_pair_registry[pair.__class__.__name__].append(pair)
        
    def get_structure_pairs(self) -> List[StructurePair]:
        """获取所有结构对"""
        return self._structure_pair_registry
        # return [pair for pairs in self._structure_pair_registry.values() for pair in pairs]

    def get_layers(self) -> str:
        """获取所有transformers层"""
        return self._layers_name

    def analyze_structure(self):
        """分析模型结构，由子类实现"""
        raise NotImplementedError

    def run_calibration(self, input_data):
        """运行校准前向传播"""
        for data in tqdm(input_data):
            if isinstance(data, tuple) or isinstance(data, list):
                with torch.no_grad():
                    self.model(*data)
            elif isinstance(data, dict):
                with torch.no_grad():
                    self.model(**data)


class AttnNormLinearPair(StructurePair):
    _name = "self_attn.qkv_proj"
    """注意力层和线性层结构对"""
    def __init__(self, config, attn_norm_name, linear_name, prefix_name: str):
        super(AttnNormLinearPair, self).__init__(attn_norm_name, linear_name, prefix_name)
        self.config = config

    def accept(self, visitor) -> Any:
        return visitor.visit_attn_norm_linear_pair(self)


class  AttnLinearLinearPair(StructurePair):
    _name = "self_attn.o_proj"
    """注意力层线性层和线性层结构对"""
    def __init__(self, config, pre_linear_name, post_linear_name, prefix_name: str):
        super(AttnLinearLinearPair, self).__init__(pre_linear_name, post_linear_name, prefix_name)
        self.config = config

    def accept(self, visitor) -> Any:
        return visitor.visit_attn_linear_linear_pair(self)


class MLPNormLinearPair(StructurePair):
    _name = "mlp.gate_up_proj"
    """注意力层和线性层结构对"""
    def __init__(self, config, mlp_norm_name, linear_name, prefix_name: str):
        super(MLPNormLinearPair, self).__init__(mlp_norm_name, linear_name, prefix_name)
        self.config = config

    def accept(self, visitor) -> Any:
        return visitor.visit_mlp_norm_linear_pair(self)


class  MLPLinearLinearPair(StructurePair):
    _name = "mlp.down_proj"
    """线性层和线性层结构对"""
    def __init__(self, config, pre_linear_name, post_linear_name, prefix_name: str):
        super(MLPLinearLinearPair, self).__init__(pre_linear_name, post_linear_name, prefix_name)
        self.config = config

    def accept(self, visitor) -> Any:
        return visitor.visit_mlp_linear_linear_pair(self)


def get_module_by_name(model: Module, submodule_key: str) -> Module:
    """根据名称获取模块"""
    module_tokens = submodule_key.split('.')
    cur_mod = model
    for s in module_tokens:
        cur_mod = getattr(cur_mod, s)
    return cur_mod


def set_module_by_name(model: Module, submodule_key: str, module: Module, clone_hooks: bool = True):
    """根据名称设置模块
    
    Args:
        model: 要修改的模型
        submodule_key: 模块的路径名称，以点分隔
        module: 新的模块实例
        clone_hooks: 是否将原模块的钩子克隆到新模块，默认为True
    """
    tokens = submodule_key.split('.')
    sub_tokens = tokens[:-1]
    cur_mod = model
    for s in sub_tokens:
        cur_mod = getattr(cur_mod, s)
    
    # 保存原始模块的引用
    if clone_hooks:
        old_module = getattr(cur_mod, tokens[-1])
        
        # 复制前向传播钩子
        if hasattr(old_module, '_forward_pre_hooks'):
            for hook_id, hook_fn in old_module._forward_pre_hooks.items():
                module.register_forward_pre_hook(hook_fn)
                
        if hasattr(old_module, '_forward_hooks'):
            for hook_id, hook_fn in old_module._forward_hooks.items():
                module.register_forward_hook(hook_fn)
        
        # 复制后向传播钩子（如果存在）
        if hasattr(old_module, '_backward_pre_hooks'):
            for hook_id, hook_fn in old_module._backward_pre_hooks.items():
                module.register_backward_pre_hook(hook_fn)
                
        if hasattr(old_module, '_backward_hooks'):
            for hook_id, hook_fn in old_module._backward_hooks.items():
                module.register_backward_hook(hook_fn)
    
    # 设置新模块
    setattr(cur_mod, tokens[-1], module)


class TransformerStructurePairVisitor:
    """访问者抽象基类"""
    def visit(self, pair: StructurePair) -> Any:
        """访问结构对"""
        pass

    def visit_attn_norm_linear_pair(self, pair: AttnNormLinearPair) -> Any:
        """访问注意力层norm和线性层结构对"""
        pass

    def visit_attn_linear_linear_pair(self, pair: AttnLinearLinearPair) -> Any:
        """访问注意力层线性层和线性层结构对"""
        pass

    def visit_mlp_norm_linear_pair(self, pair: MLPNormLinearPair) -> Any:
        """访问MLP层norm和线性层结构对"""
        pass

    def visit_mlp_linear_linear_pair(self, pair: MLPLinearLinearPair) -> Any:
        """访问MLP层线性层和线性层结构对"""
        pass


class ModuleWrapper(Module):
    def __init__(self, _wrapped, pre_process=None, post_process=None):
        self.__dict__["__wrapped"] = _wrapped
        self.__dict__["__pre_process"] = pre_process
        self.__dict__["__post_process"] = post_process

    def __getattr__(self, name):
        return getattr(self._wrapped, name)

    def __setattr__(self, name, value):
        if name in ('_wrapped', '__pre_process', '__post_process'):
            self.__dict__[name] = value
        else:
            setattr(self._wrapped, name, value)

    def __call__(self, *args, **kwargs):
        if self.pre_trans:
            args = self.pre_trans(*args, **kwargs)
        args = self._wrapped(*args, **kwargs)
        if self.post_trans:
            args = self.post_trans(*args)
        return args


class RunnerStopExecution(Exception):
    """停止执行"""
    pass


class LayerRuner:
    def __init__(self, model_bridge:ModelStructureBridge):
        self.model_bridge = model_bridge
        layers_name = model_bridge.get_layers()
        self.layers = get_module_by_name(model_bridge.model, layers_name)
        self.num_layers = len(self.layers)

    def prepare_first_layer_input(self, data_list:List[Any]):
        first_layer_data = {'data':[], 'attention_mask':None, 'position_ids':None, 'position_embeddings':None}
        
        def hook_fn(module, input, kwargs ):
            first_layer_data['data'].append(input)
            first_layer_data['attention_mask'] = kwargs.get('attention_mask', None)
            first_layer_data['position_ids'] = kwargs.get('position_ids', None)
            first_layer_data['position_embeddings'] = kwargs.get('position_embeddings', None)
            raise RunnerStopExecution
        
        hook_handle = self.layers[0].register_forward_pre_hook(hook_fn, with_kwargs=True)
        
        try:
            for data in data_list:
                try:
                    self.model_bridge.model(*data)
                except RunnerStopExecution:
                    pass
        finally:
            hook_handle.remove()
            
        return first_layer_data
    def load_layer_to_device(self, i, device:torch.device):
        """加载层到指定设备"""
        self.layers[i].to(device)

    def run_layer(self, i, data, **kwargs):
        """运行下一层"""
        layer = self.layers[i]
        return layer(data, **kwargs)


