#!/usr/bin/env python3
import numpy as np
from typing import List
import torch
from env.llm.model_loader import ModelLoader
from env.llm.skipable_state_llm import SkipableStateLLM
from env.env import SkipLayerEnv

class InteractiveAgent:
    """手工测试的交互代理"""
    
    def __init__(self, num_layers: int):
        self.num_layers = num_layers
        
    def get_action(self, state: np.ndarray) -> List[int]:
        """从用户输入获取动作"""
        while True:
            try:
                skip_layers_str = input(f"请输入要跳过的层数（空格分隔，范围0-{self.num_layers-1}，留空表示不跳过任何层）: ").strip()
                
                # 初始化掩码（全为0，表示都执行）
                action = [0] * self.num_layers
                
                # 如果输入不为空，解析要跳过的层数
                if skip_layers_str:
                    skip_layers = [int(x) for x in skip_layers_str.split()]
                    
                    # 检查层数是否在有效范围内
                    for layer_idx in skip_layers:
                        if layer_idx < 0 or layer_idx >= self.num_layers:
                            raise ValueError(f"层数 {layer_idx} 超出范围 0-{self.num_layers-1}")
                        action[layer_idx] = 1  # 设置对应层为跳过
                
                print(f"生成的掩码: {''.join(map(str, action))} (跳过层: {[i for i, x in enumerate(action) if x == 1] if any(action) else '无'})")
                return action
                
            except ValueError as e:
                if "invalid literal" in str(e):
                    print("输入错误！请确保输入的都是数字，用空格分隔")
                else:
                    print(f"输入错误！{e}")
            except Exception as e:
                print(f"输入错误！{e}")

def run_interactive_demo(model_path: str = "meta-llama/Llama-2-7b-chat-hf"):
    """运行交互演示"""
    # 初始化模型加载器
    model_loader = ModelLoader(cache_dir="./hf_cache/")
    
    # 使用模型加载器加载模型
    print("正在加载模型...")
    model, tokenizer = model_loader.load_model(model_path)
    
    # 创建可跳层模型实例
    full_llm = SkipableStateLLM(model, tokenizer)
    skip_llm = SkipableStateLLM(model, tokenizer)
    
    # 创建环境（使用虚拟数据集）
    env = SkipLayerEnv(
        full_model=full_llm,
        skip_model=skip_llm,
        dataset=[""],  # 使用空数据集以便手动输入
        max_seq_length=128,
        top_k=5
    )
    
    # 创建交互代理
    agent = InteractiveAgent(num_layers=full_llm.num_layers)
    
    # 用户输入初始prompt
    initial_prompt = input("\n请输入初始prompt（例如'The capital of France is'）: ").strip()
    
    # 环境重置
    print("\n初始化环境...")
    state, _ = env.reset()
    env.current_prompt = initial_prompt
    env.generated_sequence = []
    
    # 运行交互循环
    done = False
    while not done:
        # 显示当前状态
        current_sequence = env.current_prompt + ''.join(env.generated_sequence)
        print(f"\n当前序列: {current_sequence}")
        
        # 显示基线模型输出
        with torch.no_grad():
            full_output = full_llm.full_inference(current_sequence)
        print(f"基线模型建议的下个token: {full_output}")
        
        # 显示跳层模型的topk概率
        tokens, probs = skip_llm.decode(k=5)
        print("\n当前token概率分布:")
        for i, (token, prob) in enumerate(zip(tokens, probs)):
            print(f"  {i+1}. {token} ({prob*100:.1f}%)")
        
        # 获取用户动作
        action = agent.get_action(state)
        
        # 执行环境步骤
        next_state, reward, done, _, _ = env.step(action)
        
        # 显示结果
        print(f"\n奖励值: {reward:.2f} (速度奖励: {reward + env.kl_weight * env._calculate_kl_divergence():.2f} - KL惩罚: {env.kl_weight * env._calculate_kl_divergence():.2f})")
        print(f"已跳过 {sum(action)} 层")
        print("="*60)
        
        state = next_state
    
    # 显示最终结果
    final_sequence = env.current_prompt + ''.join(env.generated_sequence)
    print("\n生成完成！")
    print(f"最终序列: {final_sequence}")

if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("--model_path", type=str, default="meta-llama/Llama-2-7b-chat-hf")
    args = parser.parse_args()
    
    run_interactive_demo(args.model_path)
