"""
推理模块
支持文本生成、对话等功能
"""

import torch
import torch.nn.functional as F
from typing import List, Optional, Union, Dict, Any
import numpy as np
from transformers import GenerationConfig

from model_config import LLMConfig
from transformer_model import LLMForCausalLM
from tokenizer import LLMTokenizer


class TextGenerator:
    """文本生成器"""
    
    def __init__(
        self,
        model: LLMForCausalLM,
        tokenizer: LLMTokenizer,
        device: str = "cuda"
    ):
        self.model = model
        self.tokenizer = tokenizer
        self.device = device
        
        # 移动模型到设备
        self.model.to(device)
        self.model.eval()
    
    def generate(
        self,
        prompt: str,
        max_length: int = 100,
        temperature: float = 1.0,
        top_p: float = 0.9,
        top_k: int = 50,
        repetition_penalty: float = 1.1,
        do_sample: bool = True,
        num_beams: int = 1,
        early_stopping: bool = True,
        pad_token_id: Optional[int] = None,
        eos_token_id: Optional[int] = None,
        **kwargs
    ) -> str:
        """生成文本"""
        
        # 设置默认值
        if pad_token_id is None:
            pad_token_id = self.tokenizer.pad_token_id
        if eos_token_id is None:
            eos_token_id = self.tokenizer.eos_token_id
        
        # 编码输入
        input_ids = self.tokenizer.encode(prompt, add_special_tokens=True, return_tensors="pt")
        input_ids = input_ids.to(self.device)
        
        # 生成配置
        generation_config = GenerationConfig(
            max_length=max_length,
            temperature=temperature,
            top_p=top_p,
            top_k=top_k,
            repetition_penalty=repetition_penalty,
            do_sample=do_sample,
            num_beams=num_beams,
            early_stopping=early_stopping,
            pad_token_id=pad_token_id,
            eos_token_id=eos_token_id,
            **kwargs
        )
        
        # 生成文本
        with torch.no_grad():
            generated_ids = self.model.generate(
                input_ids=input_ids,
                generation_config=generation_config,
                **kwargs
            )
        
        # 解码生成的文本
        generated_text = self.tokenizer.decode(generated_ids[0], skip_special_tokens=True)
        
        # 移除原始提示
        if generated_text.startswith(prompt):
            generated_text = generated_text[len(prompt):].strip()
        
        return generated_text
    
    def generate_batch(
        self,
        prompts: List[str],
        max_length: int = 100,
        temperature: float = 1.0,
        top_p: float = 0.9,
        top_k: int = 50,
        repetition_penalty: float = 1.1,
        do_sample: bool = True,
        num_beams: int = 1,
        early_stopping: bool = True,
        **kwargs
    ) -> List[str]:
        """批量生成文本"""
        results = []
        for prompt in prompts:
            result = self.generate(
                prompt=prompt,
                max_length=max_length,
                temperature=temperature,
                top_p=top_p,
                top_k=top_k,
                repetition_penalty=repetition_penalty,
                do_sample=do_sample,
                num_beams=num_beams,
                early_stopping=early_stopping,
                **kwargs
            )
            results.append(result)
        return results


class ChatBot:
    """聊天机器人"""
    
    def __init__(
        self,
        model: LLMForCausalLM,
        tokenizer: LLMTokenizer,
        device: str = "cuda",
        system_prompt: str = "你是一个有用的AI助手。",
        max_history: int = 10
    ):
        self.generator = TextGenerator(model, tokenizer, device)
        self.system_prompt = system_prompt
        self.max_history = max_history
        self.history = []
    
    def format_conversation(self, user_input: str) -> str:
        """格式化对话历史"""
        # 添加系统提示
        formatted = f"系统：{self.system_prompt}\n"
        
        # 添加历史对话
        for conv in self.history[-self.max_history:]:
            role = conv.get('role', '')
            content = conv.get('content', '')
            if role == 'user':
                formatted += f"用户：{content}\n"
            elif role == 'assistant':
                formatted += f"助手：{content}\n"
        
        # 添加当前用户输入
        formatted += f"用户：{user_input}\n助手："
        
        return formatted
    
    def chat(
        self,
        user_input: str,
        max_length: int = 200,
        temperature: float = 0.7,
        top_p: float = 0.9,
        top_k: int = 50,
        **kwargs
    ) -> str:
        """进行对话"""
        # 格式化对话
        formatted_input = self.format_conversation(user_input)
        
        # 生成回复
        response = self.generator.generate(
            prompt=formatted_input,
            max_length=max_length,
            temperature=temperature,
            top_p=top_p,
            top_k=top_k,
            **kwargs
        )
        
        # 更新历史
        self.history.append({'role': 'user', 'content': user_input})
        self.history.append({'role': 'assistant', 'content': response})
        
        return response
    
    def clear_history(self):
        """清空对话历史"""
        self.history = []
    
    def set_system_prompt(self, system_prompt: str):
        """设置系统提示"""
        self.system_prompt = system_prompt


class ModelEvaluator:
    """模型评估器"""
    
    def __init__(
        self,
        model: LLMForCausalLM,
        tokenizer: LLMTokenizer,
        device: str = "cuda"
    ):
        self.model = model
        self.tokenizer = tokenizer
        self.device = device
        
        # 移动模型到设备
        self.model.to(device)
        self.model.eval()
    
    def compute_perplexity(
        self,
        text: str,
        max_length: int = 512
    ) -> float:
        """计算困惑度"""
        # 编码文本
        input_ids = self.tokenizer.encode(text, add_special_tokens=True, return_tensors="pt")
        input_ids = input_ids.to(self.device)
        
        # 截断
        if input_ids.shape[1] > max_length:
            input_ids = input_ids[:, :max_length]
        
        # 计算损失
        with torch.no_grad():
            outputs = self.model(input_ids=input_ids, labels=input_ids)
            loss = outputs.loss
        
        # 计算困惑度
        perplexity = torch.exp(loss).item()
        return perplexity
    
    def compute_perplexity_batch(
        self,
        texts: List[str],
        max_length: int = 512
    ) -> List[float]:
        """批量计算困惑度"""
        perplexities = []
        for text in texts:
            perplexity = self.compute_perplexity(text, max_length)
            perplexities.append(perplexity)
        return perplexities
    
    def evaluate_generation_quality(
        self,
        prompts: List[str],
        references: List[str],
        max_length: int = 100,
        temperature: float = 0.7,
        **kwargs
    ) -> Dict[str, float]:
        """评估生成质量"""
        generator = TextGenerator(self.model, self.tokenizer, self.device)
        
        # 生成文本
        generated_texts = generator.generate_batch(
            prompts=prompts,
            max_length=max_length,
            temperature=temperature,
            **kwargs
        )
        
        # 计算BLEU分数（简化版）
        bleu_scores = []
        for generated, reference in zip(generated_texts, references):
            # 简单的BLEU计算
            generated_tokens = generated.split()
            reference_tokens = reference.split()
            
            # 计算1-gram精确度
            if len(generated_tokens) == 0:
                bleu_score = 0.0
            else:
                matches = sum(1 for token in generated_tokens if token in reference_tokens)
                precision = matches / len(generated_tokens)
                bleu_score = precision
            
            bleu_scores.append(bleu_score)
        
        # 计算平均分数
        avg_bleu = np.mean(bleu_scores)
        avg_length = np.mean([len(text.split()) for text in generated_texts])
        
        return {
            'bleu_score': avg_bleu,
            'avg_length': avg_length,
            'generated_texts': generated_texts
        }
    
    def evaluate_accuracy(
        self,
        test_data: List[Dict[str, Any]],
        max_length: int = 100,
        temperature: float = 0.0,  # 使用贪心解码
        **kwargs
    ) -> Dict[str, float]:
        """评估准确率"""
        generator = TextGenerator(self.model, self.tokenizer, self.device)
        
        correct = 0
        total = len(test_data)
        
        for item in test_data:
            prompt = item['prompt']
            expected = item['expected']
            
            # 生成回答
            generated = generator.generate(
                prompt=prompt,
                max_length=max_length,
                temperature=temperature,
                **kwargs
            )
            
            # 检查是否匹配
            if expected.lower().strip() in generated.lower().strip():
                correct += 1
        
        accuracy = correct / total if total > 0 else 0.0
        
        return {
            'accuracy': accuracy,
            'correct': correct,
            'total': total
        }