"""
层级比较分析器：比较跳过某一层后各层输出的差异
"""
import torch
import numpy as np
from typing import List, Dict, Tuple
from env.llm.model_loader import ModelLoader
from llm.skipable_model import SkipableModel


class LayerComparisonAnalyzer:
    """层级比较分析器类"""
    
    def __init__(self, model_id="meta-llama/Llama-2-7b-chat-hf", cache_dir="./hf_cache/"):
        """
        初始化分析器
        
        Args:
            model_id: 模型ID
            cache_dir: 缓存目录
        """
        self.model_loader = ModelLoader(cache_dir)
        self.model, self.tokenizer = self.model_loader.load_model(model_id)
        self.skipable_model = SkipableModel(self.model, self.tokenizer)
        self.num_layers = self.model.config.num_hidden_layers
        
    def _forward_with_layer_outputs(self, prompt: str, skip_layer: int = None) -> List[torch.Tensor]:
        """
        执行前向传播并收集每层的输出
        
        Args:
            prompt: 输入文本
            skip_layer: 要跳过的层索引，None表示不跳过任何层
            
        Returns:
            List[torch.Tensor]: 每一层的输出向量列表
        """
        inputs = self.tokenizer(prompt, return_tensors="pt").to(self.model.device)
        input_ids = inputs.input_ids
        batch_size, seq_length = input_ids.shape
        attention_mask_2d = inputs.attention_mask
        
        # 准备position_ids
        position_ids = torch.arange(
            0, seq_length, dtype=torch.long, device=self.model.device
        ).unsqueeze(0)
        
        layer_outputs = []
        
        with torch.no_grad():
            # 初始嵌入层
            hidden_states = self.model.model.embed_tokens(input_ids)
            
            # 计算旋转位置编码 (RoPE)
            try:
                rotary_emb = self.model.model.rotary_emb(hidden_states, position_ids)
            except TypeError:
                rotary_emb = self.model.model.rotary_emb(hidden_states, seq_len=seq_length)
            
            # 构建注意力掩码
            if seq_length > 1:
                final_attention_mask = torch.zeros(
                    (batch_size, 1, seq_length, seq_length), 
                    dtype=self.model.dtype, 
                    device=self.model.device
                )
                
                causal_mask = torch.ones(
                    (seq_length, seq_length), dtype=torch.bool, device=self.model.device
                ).triu(diagonal=1)
                
                final_attention_mask.masked_fill_(
                    causal_mask[None, None, :, :], 
                    torch.finfo(self.model.dtype).min
                )
                
                final_attention_mask.masked_fill_(
                    attention_mask_2d[:, None, None, :] == 0, 
                    torch.finfo(self.model.dtype).min
                )
            else:
                final_attention_mask = None
            
            # 遍历Transformer块
            for i, decoder_layer in enumerate(self.model.model.layers):
                if skip_layer is not None and i == skip_layer:
                    # 跳过指定层，直接添加当前hidden_states作为该层的"输出"
                    layer_outputs.append(hidden_states.clone())
                else:
                    # 正常执行该层
                    layer_outputs_tuple = decoder_layer(
                        hidden_states,
                        attention_mask=final_attention_mask,
                        position_embeddings=rotary_emb
                    )
                    hidden_states = layer_outputs_tuple[0]
                    layer_outputs.append(hidden_states.clone())
        
        return layer_outputs
    
    def compare_layers(self, prompt: str, skip_layer_idx: int) -> Dict:
        """
        比较跳过指定层后各层输出的差异
        
        Args:
            prompt: 输入文本
            skip_layer_idx: 要跳过的层索引（0-based）
            
        Returns:
            Dict: 包含比较结果的字典
        """
        if skip_layer_idx < 0 or skip_layer_idx >= self.num_layers:
            raise ValueError(f"层索引超出范围: {skip_layer_idx}, 模型总层数: {self.num_layers}")
        
        print(f"开始比较分析...")
        print(f"模型总层数: {self.num_layers}")
        print(f"跳过层索引: {skip_layer_idx}")
        print(f"输入文本: {prompt}")
        
        # 进行两次推理
        print("正在进行完整推理（不跳过任何层）...")
        full_outputs = self._forward_with_layer_outputs(prompt)
        
        print(f"正在进行跳层推理（跳过第{skip_layer_idx}层）...")
        skip_outputs = self._forward_with_layer_outputs(prompt, skip_layer_idx)
        
        # 计算差异
        print("正在计算层输出差异...")
        layer_distances = {}
        
        for layer_idx in range(self.num_layers):
            # 获取最后一个token的hidden states
            full_hidden = full_outputs[layer_idx][:, -1, :]  # [batch_size, hidden_size]
            skip_hidden = skip_outputs[layer_idx][:, -1, :]
            
            # 计算余弦相似度
            cosine_sim = torch.nn.functional.cosine_similarity(
                full_hidden, skip_hidden, dim=-1
            ).item()
            
            layer_distances[layer_idx] = {
                'cosine_similarity': cosine_sim
            }
        
        result = {
            'prompt': prompt,
            'skipped_layer': skip_layer_idx,
            'total_layers': self.num_layers,
            'layer_distances': layer_distances
        }
        
        return result
    
    def print_comparison_summary(self, result: Dict):
        """
        打印比较结果摘要
        
        Args:
            result: compare_layers方法返回的结果字典
        """
        print(f"\n{'='*60}")
        print(f"层级比较分析结果")
        print(f"{'='*60}")
        print(f"输入文本: {result['prompt']}")
        print(f"跳过的层: 第{result['skipped_layer']}层")
        print(f"模型总层数: {result['total_layers']}")
        print(f"{'='*60}")
        
        print(f"{'层索引':<8} {'余弦相似度':<15}")
        print(f"{'-'*25}")
        
        for layer_idx in range(result['total_layers']):
            distances = result['layer_distances'][layer_idx]
            print(f"{layer_idx:<8} "
                  f"{distances['cosine_similarity']:<15.4f}")
        
        # 分析跳过层之后的层级影响
        skip_layer = result['skipped_layer']
        if skip_layer < result['total_layers'] - 1:
            print(f"\n跳过第{skip_layer}层后各层的影响分析:")
            print(f"{'='*40}")
            
            after_skip_layers = []
            for i in range(skip_layer + 1, result['total_layers']):
                distances = result['layer_distances'][i]
                after_skip_layers.append({
                    'layer': i,
                    'cosine_sim': distances['cosine_similarity']
                })
            
            # 按余弦相似度排序（相似度越低，影响越大）
            after_skip_layers.sort(key=lambda x: x['cosine_sim'])
            
            print("受影响最大的后续层（按余弦相似度排序，越小影响越大）:")
            for layer_info in after_skip_layers[:5]:  # 显示前5个受影响最大的层
                print(f"  第{layer_info['layer']}层: 余弦相似度 {layer_info['cosine_sim']:.4f}")


def print_aggregated_analysis(all_results: List[Dict]):
    """
    打印多个样本的聚合分析结果
    
    Args:
        all_results: 多个样本的分析结果列表
    """
    if not all_results:
        print("没有可分析的结果")
        return
    
    num_samples = len(all_results)
    skip_layer = all_results[0]['skipped_layer']
    total_layers = all_results[0]['total_layers']
    
    print(f"\n{'='*80}")
    print(f"多样本聚合分析结果")
    print(f"{'='*80}")
    print(f"样本数量: {num_samples}")
    print(f"跳过的层: 第{skip_layer}层")
    print(f"模型总层数: {total_layers}")
    
    # 计算每层的平均统计指标
    layer_stats = {}
    for layer_idx in range(total_layers):
        cosine_sims = []
        
        for result in all_results:
            distances = result['layer_distances'][layer_idx]
            cosine_sims.append(distances['cosine_similarity'])
        
        layer_stats[layer_idx] = {
            'avg_cosine': np.mean(cosine_sims),
            'std_cosine': np.std(cosine_sims)
        }
    
    print(f"\n平均指标统计:")
    print(f"{'='*40}")
    print(f"{'层索引':<8} {'平均余弦相似度':<15} {'标准差':<15}")
    print(f"{'-'*40}")
    
    for layer_idx in range(total_layers):
        stats = layer_stats[layer_idx]
        print(f"{layer_idx:<8} "
              f"{stats['avg_cosine']:<15.4f} "
              f"{stats['std_cosine']:<15.4f}")
    
    # 分析跳过层之后受影响最大的层（基于平均值）
    if skip_layer < total_layers - 1:
        print(f"\n跳过第{skip_layer}层后各层的平均影响分析:")
        print(f"{'='*50}")
        
        after_skip_layers = []
        for i in range(skip_layer + 1, total_layers):
            stats = layer_stats[i]
            after_skip_layers.append({
                'layer': i,
                'avg_cosine': stats['avg_cosine']
            })
        
        # 按平均余弦相似度排序（相似度越低，影响越大）
        after_skip_layers.sort(key=lambda x: x['avg_cosine'])
        
        print("受影响最大的后续层（按平均余弦相似度排序，越小影响越大）:")
        for layer_info in after_skip_layers[:5]:  # 显示前5个受影响最大的层
            print(f"  第{layer_info['layer']}层: 平均余弦相似度 {layer_info['avg_cosine']:.4f}")
    
    # 分析样本间的一致性
    print(f"\n样本间差异一致性分析:")
    print(f"{'='*50}")
    
    # 计算跳过层之后各层余弦相似度的变异系数（CV）
    cv_scores = []
    for i in range(skip_layer + 1, total_layers):
        stats = layer_stats[i]
        if stats['avg_cosine'] > 0:
            cv = stats['std_cosine'] / abs(stats['avg_cosine'])
            cv_scores.append({
                'layer': i,
                'cv': cv,
                'avg_cosine': stats['avg_cosine']
            })
    
    if cv_scores:
        cv_scores.sort(key=lambda x: x['cv'])
        print("最一致的层级影响（变异系数最小，表示样本间差异最稳定）:")
        for layer_info in cv_scores[:3]:  # 显示前3个最一致的层
            print(f"  第{layer_info['layer']}层: 变异系数 {layer_info['cv']:.3f}, "
                  f"平均余弦相似度 {layer_info['avg_cosine']:.4f}")




def main():
    """主函数，演示使用方法"""
    # 创建分析器
    analyzer = LayerComparisonAnalyzer()
    
    # 多个测试提示文本，包括简单和复杂的长例子
    test_prompts = [
        # 简单例子
        "The capital of France is",
        "print('Hello",
        "1 + 2 + 3 + 4 = ",
        "Once upon a time",
        "Machine learning is",
        
        # 复杂的长例子
        "In the rapidly evolving field of artificial intelligence, researchers have discovered that large language models exhibit emergent behaviors when scaling up their parameters and training data. These capabilities include few-shot learning, complex reasoning, and",
        
        "The implementation of a distributed computing system requires careful consideration of multiple factors including network latency, data consistency, fault tolerance, and load balancing. When designing such a system, engineers must evaluate trade-offs between",
        
        "Climate change represents one of the most pressing challenges of our time, with far-reaching consequences for ecosystems, human societies, and economic systems worldwide. The complex interactions between atmospheric greenhouse gases, ocean currents, and terrestrial carbon cycles create",
        
        "In quantum mechanics, the principle of superposition states that a quantum system can exist in multiple states simultaneously until measured. This fundamental concept underlies many quantum phenomena including quantum entanglement, quantum tunneling, and the uncertainty principle. The mathematical formulation of",
        
        "The development of modern neural networks has revolutionized the field of machine learning, enabling breakthrough applications in computer vision, natural language processing, and reinforcement learning. Deep learning architectures such as transformers, convolutional neural networks, and recurrent neural networks have demonstrated remarkable capabilities in"
    ]
    
    # 比较跳过第4层的影响（总共16层，索引从0开始，所以第4层索引为3）
    skip_layer_idx = 3
    
    # 存储所有结果用于后续分析
    all_results = []
    
    for i, test_prompt in enumerate(test_prompts):
        print(f"\n{'='*80}")
        print(f"测试样本 {i+1}/{len(test_prompts)}: '{test_prompt}'")
        print(f"{'='*80}")
        
        try:
            result = analyzer.compare_layers(test_prompt, skip_layer_idx)
            analyzer.print_comparison_summary(result)
            all_results.append(result)
            
        except Exception as e:
            print(f"样本 '{test_prompt}' 分析过程中出现错误: {e}")
            import traceback
            traceback.print_exc()
    
    # 分析所有样本的统计结果
    if all_results:
        print_aggregated_analysis(all_results)


if __name__ == "__main__":
    main()
