"""
可行性研究 v3：使用新的SkipableLLM验证环境实现
"""
from llm import ModelLoader
from env import SkipableLLM


def main():
    """
    执行跳层推理实验 - 使用新的SkipableLLM环境
    """
    # --- 1. 配置 ---
    MODEL_ID = "meta-llama/Llama-2-13b-chat-hf"
    CACHE_DIR = "./hf_cache/"
    
    # --- 2. 加载模型 ---
    loader = ModelLoader(cache_dir=CACHE_DIR)
    model, tokenizer = loader.load_model(MODEL_ID)
    
    # 创建新的可跳层模型
    skipable_llm = SkipableLLM(model, tokenizer)
    
    # --- 3. 准备测试用例 ---
    test_prompts = [
        # 逻辑推理问题
        "Which name is longer? Bob has 3 letters, Alice has 5 letters. Therefore, Alice is",
        
        # 数学比较问题  
        "Compare 8.11 and 8.9: Since 8.11 has more digits after decimal but 8.9 = 8.90, we see 8.9 is actually",
        
        # 算术问题
        "What is 15 + 27? Let me calculate step by step: 15 + 27 = 10 + 5 + 20 + 7 = 30 + 12 = ",
        
        # 几何问题
        "A circle has radius 5. What is its area? Using the formula A = πr², we get A = π × 5² = π × ",
        
        # 代数问题
        "Solve for x: 2x + 6 = 14. First subtract 6 from both sides: 2x = 8. Then divide by 2: x = ",
        
        # 概率问题
        "A fair coin is flipped twice. What is the probability of getting at least one head? The probability is 1 - P(no heads) = 1 - (1/2)² = 1 - ",
        
        # 编程逻辑问题
        'In Python, what does this code print?\nfor i in range(3):\n    print(i)\nThe output will be: 0, 1, ',
        
        # 物理问题
        "An object falls from height h. Using v² = u² + 2as with u=0, a=g, s=h, the final velocity is v = √(2gh). If g=10 m/s² and h=5m, then v = √(2×10×5) = √(",
        
        # 化学问题
        "Balance this equation: C + O₂ → CO₂. We need 1 carbon atom and 2 oxygen atoms on each side, so the balanced equation is: C + ",
        
        # 历史问题
        "World War II ended in 1945. It started in 1939 when Germany invaded Poland. Therefore, the war lasted approximately"
    ]
    
    # --- 4. 执行实验 ---
    num_layers = skipable_llm.num_layers
    all_results = []
    
    for prompt_idx, prompt in enumerate(test_prompts):
        print(f"\n" + "="*50)
        print(f" 测试例子 {prompt_idx + 1}: '{prompt}' ")
        print("="*50)
        
        # 准备输入
        inputs = skipable_llm.tokenizer(prompt, return_tensors="pt").to(skipable_llm.device)
        input_ids = inputs.input_ids
        
        # 完整推理 (基准)
        print("\n执行完整推理 (基准)")
        print("-" * 30)
        
        # 使用新的SkipableLLM进行完整推理
        skipable_llm.start_inference(input_ids)
        skipable_llm.set_mask([1] * num_layers)  # 所有层都推理
        
        # 推理所有层
        has_more = True
        while has_more:
            has_more = skipable_llm.inference_layers(num_layers)
        
        baseline_token = skipable_llm.get_next_token()
        print(f"提示: '{prompt}'")
        print(f"基准模型预测的下一个词元: '{baseline_token}'")
        
        # 逐层跳过进行推理
        print("\n执行跳层推理实验")
        print("-" * 30)
        
        # 记录当前prompt的结果
        prompt_results = {
            'prompt': prompt,
            'baseline_token': baseline_token,
            'skip_results': [],
            'error_layers': []
        }
        
        for layer_to_skip in range(num_layers):
            # 创建跳层掩码：跳过指定层
            layer_mask = [1] * num_layers  # 1表示执行该层
            layer_mask[layer_to_skip] = 0  # 0表示跳过该层
            
            # 使用新接口进行跳层推理
            skipable_llm.start_inference(input_ids)
            skipable_llm.set_mask(layer_mask)
            
            # 推理所有层
            has_more = True
            while has_more:
                has_more = skipable_llm.inference_layers(num_layers)
            
            skipped_token = skipable_llm.get_next_token()
            is_same = (skipped_token == baseline_token)
            
            # 记录结果
            prompt_results['skip_results'].append({
                'layer_skipped': layer_to_skip,
                'predicted_token': skipped_token,
                'is_correct': is_same
            })
            
            if not is_same:
                prompt_results['error_layers'].append(layer_to_skip)
            
            print(f"跳过第 {layer_to_skip:02d} 层 | 预测词元: '{skipped_token}' | "
                  f"与基准一致: {'✅' if is_same else '❌'}")
        
        all_results.append(prompt_results)
    
    # --- 5. 统计分析 ---
    print("\n" + "="*60)
    print(" 综合统计分析 ")
    print("="*60)
    
    total_tests = len(test_prompts) * num_layers
    total_errors = 0
    all_error_layers = []
    
    for result in all_results:
        error_count = len(result['error_layers'])
        total_errors += error_count
        all_error_layers.extend(result['error_layers'])
        
        print(f"\n提示: '{result['prompt']}'")
        print(f"  基准预测: '{result['baseline_token']}'")
        print(f"  错误层数: {error_count}/{num_layers}")
        print(f"  错误率: {error_count/num_layers*100:.1f}%")
        if result['error_layers']:
            print(f"  错误层序号: {result['error_layers']}")
    
    # 计算总体统计
    overall_error_rate = total_errors / total_tests * 100
    
    print(f"\n" + "-"*40)
    print(" 总体统计 ")
    print("-"*40)
    print(f"总测试次数: {total_tests}")
    print(f"总错误次数: {total_errors}")
    print(f"总体错误率: {overall_error_rate:.2f}%")
    
    if all_error_layers:
        mean_error_layer = sum(all_error_layers) / len(all_error_layers)
        print(f"错误层序号均值: {mean_error_layer:.2f}")
        
        # 计算错误层的分布
        layer_error_count = {}
        for layer in all_error_layers:
            layer_error_count[layer] = layer_error_count.get(layer, 0) + 1
        
        print(f"\n错误层分布:")
        for layer in sorted(layer_error_count.keys()):
            count = layer_error_count[layer]
            percentage = count / len(all_error_layers) * 100
            print(f"  第{layer:02d}层: {count}次 ({percentage:.1f}%)")
    else:
        print("错误层序号均值: 无错误")
        print("所有跳层测试都与基准预测一致！")

    # --- 6. 测试新接口功能 ---
    print("\n" + "="*60)
    print(" 测试新接口功能 ")
    print("="*60)
    
    # 测试分步推理功能
    test_prompt = "What is 2 + 2? The answer is"
    inputs = skipable_llm.tokenizer(test_prompt, return_tensors="pt").to(skipable_llm.device)
    input_ids = inputs.input_ids
    
    print(f"\n测试分步推理功能")
    print(f"提示: '{test_prompt}'")
    
    # 初始化推理
    skipable_llm.start_inference(input_ids)
    skipable_llm.set_mask([1] * num_layers)  # 所有层都推理
    
    # 分步推理，每次推理5层
    step_size = 5
    step_count = 0
    while True:
        print(f"\n步骤 {step_count + 1}: 推理 {step_size} 层")
        has_more = skipable_llm.inference_layers(step_size)
        
        # 获取当前状态
        current_state = skipable_llm.get_state()
        print(f"  当前状态形状: {current_state.shape if current_state is not None else None}")
        print(f"  当前推理到第 {skipable_llm.current_layer_index} 层")
        
        step_count += 1
        
        if not has_more:
            print("  推理完成!")
            break
    
    # 获取最终结果
    final_token = skipable_llm.get_next_token()
    print(f"\n最终预测词元: '{final_token}'")
    
    # 测试概率获取功能
    target_words = ["4", "four", "Four", "8"]
    probabilities = skipable_llm.get_token_probabilities(target_words)
    print(f"\n目标词概率:")
    for word, prob in probabilities.items():
        print(f"  '{word}': {prob:.4f}")


if __name__ == "__main__":
    main()
