#!/usr/bin/env python3
"""
完全可运行的AI系统 - 超简化版本
解决所有路径、依赖和加载问题
"""
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import json
import os
import pickle
import re

class SimpleTokenizer:
    """简单的词汇级tokenizer"""
    def __init__(self):
        # 基础词汇表
        self.word_to_id = {}
        self.id_to_word = {}
        self.vocab_size = 0
        
        # 特殊tokens
        self.pad_token = '<PAD>'
        self.unk_token = '<UNK>'
        self.eos_token = '<EOS>'
        
        # 初始化基础词汇
        base_vocab = [
            '<PAD>', '<UNK>', '<EOS>',
            '你', '好', '我', '是', '的', '了', '在', '有', '和', '人', '这', '中',
            '一', '个', '不', '也', '就', '会', '能', '说', '可', '以', '要', '他',
            '她', '它', '们', '吗', '呢', '啊', '哦', '嗯', '对', '很', '都', '没',
            'AI', '助手', '帮助', '问题', '回答', '谢谢', '不客气', '再见', '早上好',
            '你好', '是谁', '什么', '怎么', '为什么', '在哪', '多少', '几点',
            '！', '？', '。', '，', '：', '；', '"', '"', ''', ''', '（', '）'
        ]
        
        for i, word in enumerate(base_vocab):
            self.word_to_id[word] = i
            self.id_to_word[i] = word
        
        self.vocab_size = len(base_vocab)
        self.pad_token_id = self.word_to_id[self.pad_token]
        self.unk_token_id = self.word_to_id[self.unk_token]
        self.eos_token_id = self.word_to_id[self.eos_token]
    
    def tokenize(self, text):
        """简单分词"""
        # 基础的中文分词
        tokens = []
        i = 0
        while i < len(text):
            # 尝试匹配最长的词
            found = False
            for length in range(min(4, len(text) - i), 0, -1):
                word = text[i:i+length]
                if word in self.word_to_id:
                    tokens.append(word)
                    i += length
                    found = True
                    break
            
            if not found:
                char = text[i]
                if char.strip():  # 忽略空白字符
                    tokens.append(self.unk_token)
                i += 1
        
        return tokens
    
    def encode(self, text, max_length=None, padding=False, truncation=True):
        """编码文本"""
        tokens = self.tokenize(text)
        token_ids = [self.word_to_id.get(token, self.unk_token_id) for token in tokens]
        
        if truncation and max_length and len(token_ids) > max_length:
            token_ids = token_ids[:max_length]
        
        if padding and max_length:
            while len(token_ids) < max_length:
                token_ids.append(self.pad_token_id)
        
        return token_ids
    
    def decode(self, token_ids, skip_special_tokens=True):
        """解码"""
        if isinstance(token_ids, torch.Tensor):
            token_ids = token_ids.tolist()
        
        words = []
        for token_id in token_ids:
            if token_id in self.id_to_word:
                word = self.id_to_word[token_id]
                if skip_special_tokens and word in [self.pad_token, self.unk_token, self.eos_token]:
                    continue
                words.append(word)
        
        return ''.join(words)

class SimpleModel(nn.Module):
    """简单的LSTM模型"""
    def __init__(self, vocab_size, embed_dim=64, hidden_dim=128):
        super().__init__()
        self.vocab_size = vocab_size
        self.embed_dim = embed_dim
        self.hidden_dim = hidden_dim
        
        self.embedding = nn.Embedding(vocab_size, embed_dim)
        self.lstm = nn.LSTM(embed_dim, hidden_dim, batch_first=True, num_layers=2)
        self.output = nn.Linear(hidden_dim, vocab_size)
        self.dropout = nn.Dropout(0.1)
        
    def forward(self, input_ids):
        embeds = self.embedding(input_ids)
        embeds = self.dropout(embeds)
        lstm_out, _ = self.lstm(embeds)
        logits = self.output(lstm_out)
        return logits
    
    def generate(self, tokenizer, prompt, max_length=20):
        """生成回复"""
        self.eval()
        
        # 编码输入
        input_ids = tokenizer.encode(prompt, max_length=50)
        input_tensor = torch.tensor([input_ids])
        
        generated_ids = input_ids.copy()
        
        with torch.no_grad():
            for _ in range(max_length):
                if len(generated_ids) > 50:  # 限制最大长度
                    break
                
                # 前向传播
                current_input = torch.tensor([generated_ids])
                logits = self.forward(current_input)
                
                # 获取最后一个时间步的logits
                next_token_logits = logits[0, -1, :]
                
                # 添加随机性
                probabilities = torch.softmax(next_token_logits / 0.8, dim=-1)
                next_token = torch.multinomial(probabilities, 1).item()
                
                # 检查结束条件
                if next_token == tokenizer.eos_token_id:
                    break
                
                generated_ids.append(next_token)
        
        # 解码结果
        generated_text = tokenizer.decode(generated_ids)
        
        # 提取回复部分（去掉原始输入）
        response = generated_text[len(prompt):].strip()
        
        # 如果没有生成内容，返回默认回复
        if not response:
            default_responses = ["我理解了", "好的", "明白了", "可以的", "没问题"]
            import random
            response = random.choice(default_responses)
        
        return response

class SimpleDataset(Dataset):
    """训练数据集"""
    def __init__(self, tokenizer):
        # 内置对话数据
        self.conversations = [
            {"input": "你好", "output": "你好！我是AI助手"},
            {"input": "你是谁", "output": "我是AI助手，可以回答问题"},
            {"input": "再见", "output": "再见！有问题随时找我"},
            {"input": "谢谢", "output": "不客气！很高兴帮助你"},
            {"input": "早上好", "output": "早上好！今天也要加油哦"},
            {"input": "晚上好", "output": "晚上好！休息得好吗"},
            {"input": "你好吗", "output": "我很好！谢谢关心"},
            {"input": "帮助", "output": "我可以回答问题和聊天"},
            {"input": "什么", "output": "有什么问题吗"},
            {"input": "为什么", "output": "这是一个好问题"}
        ]
        self.tokenizer = tokenizer
        self.max_length = 32
    
    def __len__(self):
        return len(self.conversations)
    
    def __getitem__(self, idx):
        conv = self.conversations[idx]
        
        # 构建训练文本
        input_text = conv['input']
        output_text = conv['output']
        full_text = input_text + output_text
        
        # 编码
        token_ids = self.tokenizer.encode(full_text, max_length=self.max_length, 
                                        padding=True, truncation=True)
        
        input_ids = torch.tensor(token_ids)
        
        # 创建标签（向右移动一位）
        labels = input_ids.clone()
        labels[:-1] = input_ids[1:]
        labels[-1] = self.tokenizer.pad_token_id
        
        return {
            'input_ids': input_ids,
            'labels': labels
        }

def train_model():
    """训练模型"""
    print("=== 开始训练AI模型 ===")
    
    # 创建目录
    os.makedirs('models/working', exist_ok=True)
    
    # 初始化
    tokenizer = SimpleTokenizer()
    dataset = SimpleDataset(tokenizer)
    dataloader = DataLoader(dataset, batch_size=2, shuffle=True)
    
    model = SimpleModel(vocab_size=tokenizer.vocab_size)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    criterion = nn.CrossEntropyLoss(ignore_index=tokenizer.pad_token_id)
    
    print(f"词汇表大小: {tokenizer.vocab_size}")
    print(f"训练样本数: {len(dataset)}")
    
    # 训练循环
    model.train()
    for epoch in range(15):
        epoch_loss = 0
        
        for batch in dataloader:
            input_ids = batch['input_ids']
            labels = batch['labels']
            
            optimizer.zero_grad()
            logits = model.forward(input_ids)
            
            loss = criterion(logits.view(-1, logits.size(-1)), labels.view(-1))
            loss.backward()
            optimizer.step()
            
            epoch_loss += loss.item()
        
        avg_loss = epoch_loss / len(dataloader)
        print(f"Epoch {epoch+1}/15, Loss: {avg_loss:.4f}")
    
    # 保存模型 - 分别保存
    print("保存模型...")
    
    # 保存模型权重
    torch.save(model.state_dict(), 'models/working/model_weights.pth')
    
    # 保存tokenizer
    with open('models/working/tokenizer.pkl', 'wb') as f:
        pickle.dump(tokenizer, f)
    
    # 保存模型配置
    config = {
        'vocab_size': tokenizer.vocab_size,
        'embed_dim': model.embed_dim,
        'hidden_dim': model.hidden_dim
    }
    with open('models/working/config.json', 'w', encoding='utf-8') as f:
        json.dump(config, f, ensure_ascii=False, indent=2)
    
    # 测试生成
    print("\n=== 测试生成 ===")
    model.eval()
    test_prompts = ["你好", "你是谁", "谢谢"]
    
    for prompt in test_prompts:
        response = model.generate(tokenizer, prompt)
        print(f"输入: {prompt}")
        print(f"输出: {response}")
        print("-" * 30)
    
    print("训练完成！模型已保存到 models/working/")
    return model, tokenizer

if __name__ == "__main__":
    try:
        train_model()
    except Exception as e:
        print(f"错误: {e}")
        import traceback
        traceback.print_exc()