# -*- coding: utf-8 -*-
"""
推理脚本 - 使用训练好的Transformer模型进行文本生成
包含模型加载、文本生成、交互式对话等功能
"""

import os
import json
import argparse
from typing import List, Optional, Dict, Any

import torch
import torch.nn.functional as F
from tqdm import tqdm

from config import config
from data_loader import Tokenizer
from model import TransformerLanguageModel


class TextGenerator:
    """文本生成器类 - 管理模型推理和文本生成"""
    
    def __init__(self, model_path: str, vocab_path: str = None, device: str = "auto"):
        self.model_path = model_path
        self.vocab_path = vocab_path or config.data.vocab_path
        
        # 设置设备
        if device == "auto":
            self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        else:
            self.device = torch.device(device)
        
        # 加载模型和分词器
        self.model = None
        self.tokenizer = None
        self.config_dict = None
        
        self.load_model()
        self.load_tokenizer()
        
        print(f"文本生成器初始化完成，使用设备: {self.device}")
        print(f"模型参数数量: {self.model.get_model_size():,}")
    
    def load_model(self):
        """加载训练好的模型"""
        if not os.path.exists(self.model_path):
            raise FileNotFoundError(f"模型文件不存在: {self.model_path}")
        
        print(f"加载模型: {self.model_path}")
        checkpoint = torch.load(self.model_path, map_location=self.device)
        
        # 获取模型配置
        if 'config' in checkpoint:
            self.config_dict = checkpoint['config']
        else:
            print("警告: 检查点中没有配置信息，使用默认配置")
            self.config_dict = config.__dict__
        
        # 创建模型
        if 'model_info' in checkpoint:
            model_info = checkpoint['model_info']
            self.model = TransformerLanguageModel(
                vocab_size=model_info['vocab_size'],
                d_model=model_info['d_model'],
                n_heads=model_info['n_heads'],
                n_layers=model_info['n_layers'],
                d_ff=model_info['d_ff'],
                max_seq_length=model_info['max_seq_length'],
                dropout=model_info['dropout'],
                activation=model_info['activation']
            )
        else:
            self.model = TransformerLanguageModel()
        
        # 加载模型权重
        self.model.load_state_dict(checkpoint['model_state_dict'])
        self.model.to(self.device)
        self.model.eval()
        
        print("模型加载完成")
    
    def load_tokenizer(self):
        """加载分词器"""
        if not os.path.exists(self.vocab_path):
            raise FileNotFoundError(f"词汇表文件不存在: {self.vocab_path}")
        
        print(f"加载词汇表: {self.vocab_path}")
        self.tokenizer = Tokenizer(self.vocab_path)
        print(f"词汇表加载完成，词汇量: {self.tokenizer.vocab_size}")
    
    def generate_text(self, prompt: str, max_length: int = 100, temperature: float = 1.0,
                     top_k: int = 50, top_p: float = 0.9, num_return_sequences: int = 1,
                     do_sample: bool = True) -> List[str]:
        """
        生成文本
        Args:
            prompt: 输入提示文本
            max_length: 最大生成长度
            temperature: 温度参数，控制随机性
            top_k: top-k采样参数
            top_p: top-p采样参数
            num_return_sequences: 返回序列数量
            do_sample: 是否使用采样
        Returns:
            生成的文本列表
        """
        if not prompt.strip():
            raise ValueError("输入提示不能为空")
        
        print(f"生成文本，提示: '{prompt}'")
        
        # 编码输入
        input_ids = self.tokenizer.encode(prompt, add_special_tokens=True)
        input_tensor = torch.tensor([input_ids], dtype=torch.long).to(self.device)
        
        generated_texts = []
        
        with torch.no_grad():
            for i in range(num_return_sequences):
                if do_sample:
                    # 使用采样生成
                    generated = self._generate_with_sampling(
                        input_tensor.clone(), max_length, temperature, top_k, top_p
                    )
                else:
                    # 使用贪婪搜索
                    generated = self._generate_greedy(input_tensor.clone(), max_length)
                
                # 解码生成的文本
                generated_ids = generated[0].cpu().tolist()
                generated_text = self.tokenizer.decode(generated_ids, skip_special_tokens=True)
                
                # 移除原始提示
                if generated_text.startswith(prompt):
                    generated_text = generated_text[len(prompt):].strip()
                
                generated_texts.append(generated_text)
        
        return generated_texts
    
    def _generate_with_sampling(self, input_ids: torch.Tensor, max_length: int,
                               temperature: float, top_k: int, top_p: float) -> torch.Tensor:
        """使用采样策略生成文本"""
        for _ in range(max_length):
            # 前向传播
            outputs = self.model(input_ids)
            logits = outputs['logits']
            
            # 获取最后一个位置的logits
            next_token_logits = logits[:, -1, :] / temperature
            
            # Top-k采样
            if top_k > 0:
                top_k_logits, top_k_indices = torch.topk(next_token_logits, min(top_k, next_token_logits.size(-1)))
                next_token_logits = torch.full_like(next_token_logits, float('-inf'))
                next_token_logits.scatter_(1, top_k_indices, top_k_logits)
            
            # Top-p采样
            if top_p < 1.0:
                sorted_logits, sorted_indices = torch.sort(next_token_logits, descending=True)
                cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
                
                # 移除累积概率超过top_p的token
                sorted_indices_to_remove = cumulative_probs > top_p
                sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
                sorted_indices_to_remove[..., 0] = 0
                
                indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
                next_token_logits[indices_to_remove] = float('-inf')
            
            # 采样下一个token
            probs = F.softmax(next_token_logits, dim=-1)
            next_token = torch.multinomial(probs, num_samples=1)
            
            # 添加到序列中
            input_ids = torch.cat([input_ids, next_token], dim=-1)
            
            # 检查是否生成结束标记
            if next_token.item() == self.tokenizer.word2idx.get(config.data.eos_token, -1):
                break
            
            # 检查序列长度
            if input_ids.size(1) >= self.model.max_seq_length:
                break
        
        return input_ids
    
    def _generate_greedy(self, input_ids: torch.Tensor, max_length: int) -> torch.Tensor:
        """使用贪婪搜索生成文本"""
        for _ in range(max_length):
            # 前向传播
            outputs = self.model(input_ids)
            logits = outputs['logits']
            
            # 获取最后一个位置的最大概率token
            next_token = torch.argmax(logits[:, -1, :], dim=-1, keepdim=True)
            
            # 添加到序列中
            input_ids = torch.cat([input_ids, next_token], dim=-1)
            
            # 检查是否生成结束标记
            if next_token.item() == self.tokenizer.word2idx.get(config.data.eos_token, -1):
                break
            
            # 检查序列长度
            if input_ids.size(1) >= self.model.max_seq_length:
                break
        
        return input_ids
    
    def calculate_perplexity(self, text: str) -> float:
        """计算文本的困惑度"""
        input_ids = self.tokenizer.encode(text, add_special_tokens=True)
        input_tensor = torch.tensor([input_ids], dtype=torch.long).to(self.device)
        
        with torch.no_grad():
            outputs = self.model(input_tensor)
            logits = outputs['logits']
            
            # 计算交叉熵损失
            shift_logits = logits[..., :-1, :].contiguous()
            shift_labels = input_tensor[..., 1:].contiguous()
            
            loss_fct = torch.nn.CrossEntropyLoss()
            loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
            
            perplexity = torch.exp(loss).item()
        
        return perplexity
    
    def interactive_chat(self):
        """交互式对话模式"""
        print("\n=== 交互式文本生成 ===")
        print("输入文本提示，模型将生成续写内容")
        print("输入 'quit' 或 'exit' 退出")
        print("输入 'help' 查看帮助")
        print("-" * 50)
        
        while True:
            try:
                prompt = input("\n请输入提示文本: ").strip()
                
                if prompt.lower() in ['quit', 'exit', '退出']:
                    print("再见!")
                    break
                
                if prompt.lower() == 'help':
                    self._show_help()
                    continue
                
                if not prompt:
                    print("请输入有效的提示文本")
                    continue
                
                # 生成参数设置
                print("\n生成参数设置 (直接回车使用默认值):")
                
                try:
                    max_length = input(f"最大生成长度 (默认: 50): ").strip()
                    max_length = int(max_length) if max_length else 50
                    
                    temperature = input(f"温度参数 (默认: 1.0): ").strip()
                    temperature = float(temperature) if temperature else 1.0
                    
                    top_k = input(f"Top-k参数 (默认: 50): ").strip()
                    top_k = int(top_k) if top_k else 50
                    
                    top_p = input(f"Top-p参数 (默认: 0.9): ").strip()
                    top_p = float(top_p) if top_p else 0.9
                    
                    num_sequences = input(f"生成序列数量 (默认: 1): ").strip()
                    num_sequences = int(num_sequences) if num_sequences else 1
                    
                except ValueError:
                    print("参数格式错误，使用默认值")
                    max_length, temperature, top_k, top_p, num_sequences = 50, 1.0, 50, 0.9, 1
                
                # 生成文本
                print("\n生成中...")
                generated_texts = self.generate_text(
                    prompt=prompt,
                    max_length=max_length,
                    temperature=temperature,
                    top_k=top_k,
                    top_p=top_p,
                    num_return_sequences=num_sequences
                )
                
                # 显示结果
                print("\n=== 生成结果 ===")
                for i, text in enumerate(generated_texts, 1):
                    print(f"\n序列 {i}:")
                    print(f"完整文本: {prompt}{text}")
                    print(f"生成部分: {text}")
                    
                    # 计算困惑度
                    try:
                        perplexity = self.calculate_perplexity(prompt + text)
                        print(f"困惑度: {perplexity:.2f}")
                    except:
                        print("困惑度计算失败")
                
            except KeyboardInterrupt:
                print("\n操作被中断")
                break
            except Exception as e:
                print(f"发生错误: {str(e)}")
    
    def _show_help(self):
        """显示帮助信息"""
        help_text = """
=== 帮助信息 ===

参数说明:
- 最大生成长度: 控制生成文本的最大token数量
- 温度参数: 控制生成的随机性，值越大越随机 (0.1-2.0)
- Top-k参数: 只考虑概率最高的k个token (1-100)
- Top-p参数: 只考虑累积概率达到p的token (0.1-1.0)
- 生成序列数量: 生成多少个不同的续写版本

使用技巧:
- 温度较低(0.5-0.8)生成更连贯的文本
- 温度较高(1.0-1.5)生成更有创意的文本
- Top-k和Top-p可以组合使用来控制生成质量

命令:
- 'quit' 或 'exit': 退出程序
- 'help': 显示此帮助信息
"""
        print(help_text)


def batch_generate(generator: TextGenerator, prompts: List[str], 
                  output_file: str, **generation_kwargs):
    """批量生成文本"""
    results = []
    
    print(f"批量生成文本，共 {len(prompts)} 个提示")
    
    for i, prompt in enumerate(tqdm(prompts, desc="生成进度")):
        try:
            generated_texts = generator.generate_text(prompt, **generation_kwargs)
            
            for j, text in enumerate(generated_texts):
                result = {
                    'prompt_id': i,
                    'sequence_id': j,
                    'prompt': prompt,
                    'generated_text': text,
                    'full_text': prompt + text
                }
                
                # 计算困惑度
                try:
                    result['perplexity'] = generator.calculate_perplexity(result['full_text'])
                except:
                    result['perplexity'] = None
                
                results.append(result)
        
        except Exception as e:
            print(f"生成第 {i} 个提示时出错: {str(e)}")
            continue
    
    # 保存结果
    with open(output_file, 'w', encoding='utf-8') as f:
        json.dump(results, f, ensure_ascii=False, indent=2)
    
    print(f"批量生成完成，结果保存到: {output_file}")
    return results


def main():
    """主函数"""
    parser = argparse.ArgumentParser(description="Transformer语言模型推理")
    parser.add_argument("--model_path", type=str, required=True, help="模型文件路径")
    parser.add_argument("--vocab_path", type=str, help="词汇表文件路径")
    parser.add_argument("--device", type=str, default="auto", help="设备类型 (cuda/cpu/auto)")
    parser.add_argument("--mode", type=str, default="interactive", 
                       choices=["interactive", "single", "batch"], help="运行模式")
    parser.add_argument("--prompt", type=str, help="单次生成的提示文本")
    parser.add_argument("--prompts_file", type=str, help="批量生成的提示文件")
    parser.add_argument("--output_file", type=str, help="输出文件路径")
    parser.add_argument("--max_length", type=int, default=100, help="最大生成长度")
    parser.add_argument("--temperature", type=float, default=1.0, help="温度参数")
    parser.add_argument("--top_k", type=int, default=50, help="Top-k参数")
    parser.add_argument("--top_p", type=float, default=0.9, help="Top-p参数")
    parser.add_argument("--num_sequences", type=int, default=1, help="生成序列数量")
    
    args = parser.parse_args()
    
    # 创建文本生成器
    generator = TextGenerator(
        model_path=args.model_path,
        vocab_path=args.vocab_path,
        device=args.device
    )
    
    # 生成参数
    generation_kwargs = {
        'max_length': args.max_length,
        'temperature': args.temperature,
        'top_k': args.top_k,
        'top_p': args.top_p,
        'num_return_sequences': args.num_sequences
    }
    
    if args.mode == "interactive":
        # 交互式模式
        generator.interactive_chat()
    
    elif args.mode == "single":
        # 单次生成模式
        if not args.prompt:
            print("错误: 单次生成模式需要提供 --prompt 参数")
            return
        
        generated_texts = generator.generate_text(args.prompt, **generation_kwargs)
        
        print(f"\n提示: {args.prompt}")
        print("=== 生成结果 ===")
        for i, text in enumerate(generated_texts, 1):
            print(f"\n序列 {i}: {text}")
            perplexity = generator.calculate_perplexity(args.prompt + text)
            print(f"困惑度: {perplexity:.2f}")
    
    elif args.mode == "batch":
        # 批量生成模式
        if not args.prompts_file:
            print("错误: 批量生成模式需要提供 --prompts_file 参数")
            return
        
        if not args.output_file:
            print("错误: 批量生成模式需要提供 --output_file 参数")
            return
        
        # 读取提示文件
        with open(args.prompts_file, 'r', encoding='utf-8') as f:
            prompts = [line.strip() for line in f if line.strip()]
        
        # 批量生成
        batch_generate(generator, prompts, args.output_file, **generation_kwargs)


if __name__ == "__main__":
    main()