#!/usr/bin/env python3
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import json
import os
import pickle

class SimpleTokenizer:
    """简单的字符级tokenizer"""
    def __init__(self):
        # 基础词汇表：中文字符 + 英文 + 数字 + 标点
        self.vocab = {}
        self.idx_to_token = {}
        self.pad_token = '<PAD>'
        self.unk_token = '<UNK>'
        self.eos_token = '<EOS>'
        
        # 特殊token
        special_tokens = ['<PAD>', '<UNK>', '<EOS>']
        
        # 常用字符
        chars = list('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')
        chars.extend(['你', '好', '是', '谁', '我', '的', '了', '在', '有', '和', '人', '这', '中', '大', '为', '上', '个', '国', '以', '要', '他', '时', '来', '用', '们', '生', '到', '作', '地', '于', '出', '就', '分', '对', '成', '会', '可', '主', '发', '年', '动', '同', '工', '也', '能', '下', '过', '子', '说', '产', '种', '面', '而', '方', '后', '多', '定', '行', '学', '法', '所', '民', '得', '经', '三', '之', '进', '着', '等', '部', '度', '家', '电', '力', '里', '如', '水', '化', '高', '自', '二', '理', '起', '小', '物', '现', '实', '加', '量', '都', '两', '体', '制', '学', '机', '当', '使', '点', '从', '业', '本', '去', '把', '性', '应', '开', '它', '合', '还', '因', '由', '其', '些', '然', '前', '外', '天', '政', '四', '日', '那', '社', '义', '事', '平', '形', '相', '全', '表', '间', '样', '与', '关', '各', '重', '新', '线', '内', '数', '正', '心', '反', '明', '看', '原', '又', '么', '利', '比', '或', '但', '质', '气', '第', '向', '道', '命', '此', '变', '条', '只', '没', '结', '解', '问', '意', '建', '月', '公', '无', '系', '军', '很', '情', '者', '最', '立', '代', '想', '已', '通', '并', '提', '直', '题', '党', '程', '展', '五', '果', '料', '象', '员', '革', '位', '入', '常', '文', '总', '次', '品', '式', '活', '设', '及', '管', '特', '件', '长', '求', '老', '头', '基', '资', '边', '流', '路', '级', '少', '图', '山', '统', '接', '知', '较', '将', '组', '见', '计', '别', '她', '手', '角', '期', '根', '论', '运', '农', '指', '几', '九', '十'])
        chars.extend(['！', '？', '。', '，', '；', '：', '"', '"', ''', ''', '（', '）', '【', '】', '《', '》'])
        
        # 构建词汇表
        vocab_list = special_tokens + chars
        for i, token in enumerate(vocab_list):
            self.vocab[token] = i
            self.idx_to_token[i] = token
        
        self.vocab_size = len(self.vocab)
        self.pad_token_id = self.vocab[self.pad_token]
        self.unk_token_id = self.vocab[self.unk_token]
        self.eos_token_id = self.vocab[self.eos_token]
    
    def encode(self, text, max_length=None, truncation=True, padding=False, return_tensors=None):
        """编码文本"""
        # 字符级编码
        tokens = []
        for char in text:
            tokens.append(self.vocab.get(char, self.unk_token_id))
        
        if truncation and max_length and len(tokens) > max_length:
            tokens = tokens[:max_length]
        
        if padding and max_length:
            while len(tokens) < max_length:
                tokens.append(self.pad_token_id)
        
        if return_tensors == 'pt':
            return torch.tensor([tokens])
        
        return tokens
    
    def decode(self, token_ids, skip_special_tokens=True):
        """解码token"""
        if isinstance(token_ids, torch.Tensor):
            token_ids = token_ids.tolist()
        
        text = ""
        for token_id in token_ids:
            if token_id in self.idx_to_token:
                token = self.idx_to_token[token_id]
                if skip_special_tokens and token in ['<PAD>', '<UNK>', '<EOS>']:
                    continue
                text += token
        
        return text

class SimpleModel(nn.Module):
    """简化的模型"""
    def __init__(self, vocab_size, embed_dim=128, hidden_dim=256):
        super().__init__()
        self.vocab_size = vocab_size
        self.embed_dim = embed_dim
        
        self.embedding = nn.Embedding(vocab_size, embed_dim)
        self.lstm = nn.LSTM(embed_dim, hidden_dim, batch_first=True)
        self.output = nn.Linear(hidden_dim, vocab_size)
        
    def forward(self, input_ids):
        embeds = self.embedding(input_ids)
        lstm_out, _ = self.lstm(embeds)
        logits = self.output(lstm_out)
        return logits
    
    def generate(self, tokenizer, prompt, max_length=30):
        """生成回复"""
        self.eval()
        input_ids = tokenizer.encode(prompt)
        input_ids = torch.tensor([input_ids])
        
        with torch.no_grad():
            for _ in range(max_length):
                logits = self.forward(input_ids)
                next_token_logits = logits[0, -1, :]
                next_token = torch.argmax(next_token_logits).item()
                
                if next_token == tokenizer.eos_token_id:
                    break
                
                input_ids = torch.cat([input_ids, torch.tensor([[next_token]])], dim=1)
        
        generated = tokenizer.decode(input_ids[0])
        return generated[len(prompt):].strip()

class SimpleDataset(Dataset):
    """简单数据集"""
    def __init__(self, tokenizer):
        # 内置训练数据
        self.conversations = [
            {"input": "你好", "output": "你好！我是AI助手"},
            {"input": "你是谁", "output": "我是AI助手"},
            {"input": "再见", "output": "再见！"},
            {"input": "谢谢", "output": "不客气"},
            {"input": "早上好", "output": "早上好！"}
        ]
        self.tokenizer = tokenizer
        self.max_length = 64
    
    def __len__(self):
        return len(self.conversations)
    
    def __getitem__(self, idx):
        conv = self.conversations[idx]
        text = f"{conv['input']}{conv['output']}"
        
        tokens = self.tokenizer.encode(text, max_length=self.max_length, 
                                     truncation=True, padding=True)
        
        input_ids = torch.tensor(tokens)
        labels = input_ids.clone()
        labels[:-1] = input_ids[1:]
        labels[-1] = self.tokenizer.pad_token_id
        
        return {
            'input_ids': input_ids,
            'labels': labels
        }

def main():
    print("=== 超简化AI训练系统 ===")
    
    # 创建目录
    os.makedirs('models', exist_ok=True)
    os.makedirs('models/simple', exist_ok=True)
    
    # 初始化tokenizer
    print("初始化tokenizer...")
    tokenizer = SimpleTokenizer()
    print(f"词汇表大小: {tokenizer.vocab_size}")
    
    # 创建数据集
    print("创建数据集...")
    dataset = SimpleDataset(tokenizer)
    dataloader = DataLoader(dataset, batch_size=2, shuffle=True)
    
    # 创建模型
    print("创建模型...")
    model = SimpleModel(vocab_size=tokenizer.vocab_size)
    
    # 训练设置
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    criterion = nn.CrossEntropyLoss(ignore_index=tokenizer.pad_token_id)
    
    # 训练
    print("开始训练...")
    model.train()
    
    for epoch in range(10):
        epoch_loss = 0
        for batch in dataloader:
            input_ids = batch['input_ids']
            labels = batch['labels']
            
            optimizer.zero_grad()
            logits = model(input_ids)
            loss = criterion(logits.view(-1, logits.size(-1)), labels.view(-1))
            loss.backward()
            optimizer.step()
            
            epoch_loss += loss.item()
        
        print(f"Epoch {epoch+1}/10, Loss: {epoch_loss/len(dataloader):.4f}")
    
    # 保存模型
    print("保存模型...")
    torch.save({
        'model_state_dict': model.state_dict(),
        'tokenizer': tokenizer
    }, 'models/simple/model.pth')
    
    # 测试
    print("\n测试生成:")
    test_prompts = ["你好", "你是谁"]
    for prompt in test_prompts:
        response = model.generate(tokenizer, prompt)
        print(f"输入: {prompt} -> 输出: {response}")
    
    print("\n✅ 训练完成！")

if __name__ == "__main__":
    try:
        main()
    except Exception as e:
        print(f"❌ 错误: {e}")
        import traceback
        traceback.print_exc()