use crate::language_model::{DialogueDataset, LanguageModel};
use crate::nlp::Tokenizer;
use crate::transformer::TransformerConfig;

/// 生成式对话管理器
pub struct GenerativeDialogueManager {
    language_model: LanguageModel,
    conversation_history: Vec<String>,
    max_history: usize,
}

impl GenerativeDialogueManager {
    /// 创建新的生成式对话管理器
    pub fn new(config: TransformerConfig) -> Self {
        let mut tokenizer = Tokenizer::new();
        
        // 构建丰富的词表
        let vocab = vec![
            // 基础词汇
            "你", "我", "他", "她", "它", "们", "的", "了", "吗", "呢", "啊", "是", "在", "有", "和", "与",
            "好", "很", "非常", "真", "太", "最", "更", "也", "都", "还", "就", "才", "只", "不", "没",
            
            // 问候和礼貌
            "你好", "您好", "早上好", "晚上好", "再见", "谢谢", "对不起", "请", "欢迎",
            
            // 疑问词
            "什么", "为什么", "怎么", "如何", "哪里", "谁", "哪", "几", "多少",
            
            // 动词
            "做", "说", "看", "听", "想", "要", "能", "可以", "会", "知道", "理解", "学习", "帮助",
            "训练", "运行", "使用", "创建", "实现", "优化", "改进",
            
            // 名词 - 技术相关
            "AI", "人工智能", "机器学习", "深度学习", "神经网络", "Transformer", "模型",
            "数据", "训练", "测试", "预测", "推理", "特征", "算法", "系统", "程序", "代码",
            "Linear", "Attention", "注意力", "机制", "复杂度", "性能", "优化",
            
            // 名词 - 日常
            "问题", "答案", "方法", "例子", "文档", "教程", "指南", "帮助",
            "天气", "时间", "地方", "人", "事情", "想法", "感觉",
            
            // 形容词
            "好", "坏", "大", "小", "多", "少", "快", "慢", "强", "弱", "简单", "复杂",
            "聪明", "笨", "有趣", "无聊", "重要", "厉害", "棒", "赞",
            
            // 连词和助词
            "但是", "不过", "然而", "所以", "因为", "如果", "虽然", "或者", "而且", "并且",
            "吧", "呀", "哦", "嗯", "啊", "哈",
            
            // 标点
            "。", "，", "！", "？", "、",
        ];
        
        tokenizer.add_words(&vocab);
        
        let language_model = LanguageModel::new(config, tokenizer);
        
        GenerativeDialogueManager {
            language_model,
            conversation_history: Vec::new(),
            max_history: 5,
        }
    }

    /// 训练模型
    pub fn train(&mut self, dataset: &DialogueDataset, epochs: usize, learning_rate: f64) {
        println!("\n🎓 开始训练生成式对话模型...");
        println!("数据集大小: {} 对话对", dataset.len());
        println!("训练轮数: {}", epochs);
        println!("学习率: {}\n", learning_rate);

        for epoch in 0..epochs {
            let mut total_loss = 0.0;
            
            for (input, target) in &dataset.pairs {
                let loss = self.language_model.train_step(input, target, learning_rate);
                total_loss += loss;
            }
            
            let avg_loss = total_loss / dataset.len() as f64;
            
            if (epoch + 1) % 10 == 0 || epoch == 0 {
                println!("Epoch {}/{}: Loss = {:.6}", epoch + 1, epochs, avg_loss);
            }
        }
        
        println!("\n✅ 训练完成！\n");
    }

    /// 处理用户输入并生成回复
    pub fn chat(&mut self, user_input: &str) -> String {
        println!("\n=== 生成式对话处理 ===");
        println!("用户输入: {}", user_input);
        
        // 构建上下文提示
        let mut context = String::new();
        
        // 添加历史对话作为上下文
        for msg in self.conversation_history.iter().rev().take(3) {
            context.push_str(msg);
            context.push_str(" ");
        }
        
        // 添加当前输入
        context.push_str(user_input);
        
        println!("上下文: {}", context);
        
        // 生成回复
        let response = self.language_model.generate(
            &context,
            30,      // 最大长度
            0.8,     // 温度（0.8比较自然）
            Some(40), // top-k
            Some(0.9), // top-p
        );
        
        println!("生成策略: 温度=0.8, top-k=40, top-p=0.9");
        println!("生成回复: {}\n", response);
        
        // 更新历史
        self.conversation_history.push(format!("用户:{}", user_input));
        self.conversation_history.push(format!("AI:{}", response));
        
        if self.conversation_history.len() > self.max_history * 2 {
            self.conversation_history.drain(0..2);
        }
        
        response
    }

    /// 设置生成参数
    pub fn set_generation_params(&mut self, _max_history: usize) {
        // 为未来扩展预留
    }

    /// 清除对话历史
    pub fn clear_history(&mut self) {
        self.conversation_history.clear();
    }

    /// 获取对话历史
    pub fn get_history(&self) -> &[String] {
        &self.conversation_history
    }

    /// 获取语言模型的tokenizer
    pub fn tokenizer(&self) -> &Tokenizer {
        self.language_model.tokenizer()
    }
}

/// 训练助手 - 帮助准备和管理训练数据
pub struct TrainingHelper;

impl TrainingHelper {
    /// 从文件加载对话数据（简化版）
    pub fn load_dialogue_from_text(text: &str) -> DialogueDataset {
        let mut dataset = DialogueDataset::new();
        
        let lines: Vec<&str> = text.lines().collect();
        let mut i = 0;
        
        while i + 1 < lines.len() {
            let input = lines[i].trim();
            let response = lines[i + 1].trim();
            
            if !input.is_empty() && !response.is_empty() {
                dataset.add_pair(input.to_string(), response.to_string());
            }
            
            i += 2;
        }
        
        dataset
    }

    /// 创建增强的训练数据集（使用大规模数据）
    pub fn create_enhanced_dataset() -> DialogueDataset {
        // 使用data_loader创建大规模数据集
        crate::data_loader::DataLoader::create_large_chinese_dataset()
    }

    /// 数据增强
    pub fn augment_dataset(dataset: &DialogueDataset) -> DialogueDataset {
        let mut augmented = dataset.clone();
        
        // 简单的数据增强：添加标点符号变体
        for (input, response) in &dataset.pairs {
            if !input.ends_with('？') && !input.ends_with('吗') {
                augmented.add_pair(
                    format!("{}吗", input),
                    response.clone(),
                );
            }
        }
        
        augmented
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn test_generative_dialogue_creation() {
        let config = TransformerConfig {
            d_model: 64,
            n_heads: 4,
            d_ff: 256,
            n_layers: 1,
            max_seq_len: 128,
            dropout: 0.1,
            use_linear_attention: true,
        };
        
        let manager = GenerativeDialogueManager::new(config);
        assert!(manager.get_history().is_empty());
    }

    #[test]
    fn test_training_helper() {
        let dataset = TrainingHelper::create_enhanced_dataset();
        assert!(dataset.len() > 20);
    }
}

