#!/usr/bin/env python3
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import os
import sys

# 添加src目录到路径
sys.path.append('src')
from model import SimpleAIModel, ConversationDataset, save_model
from transformers import GPT2Tokenizer

def main():
    print("=== MiniAI 简化训练脚本 ===")
    
    # 设备检测
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")
    
    # 确保目录存在
    os.makedirs('data', exist_ok=True)
    os.makedirs('models', exist_ok=True)
    os.makedirs('models/trained', exist_ok=True)
    
    # 初始化tokenizer
    print("初始化tokenizer...")
    tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
    if tokenizer.pad_token is None:
        tokenizer.pad_token = tokenizer.eos_token
    
    # 创建数据集
    print("创建数据集...")
    dataset = ConversationDataset('data/conversations.json', tokenizer)
    dataloader = DataLoader(dataset, batch_size=2, shuffle=True)  # 减小batch_size
    print(f"数据集大小: {len(dataset)}")
    
    # 创建模型
    print("创建模型...")
    model = SimpleAIModel(vocab_size=tokenizer.vocab_size)
    model.to(device)
    
    # 优化器和损失函数
    optimizer = torch.optim.AdamW(model.parameters(), lr=1e-4)
    criterion = nn.CrossEntropyLoss(ignore_index=tokenizer.pad_token_id)
    
    # 训练
    print("开始训练...")
    model.train()
    num_epochs = 5  # 减少训练轮数
    
    for epoch in range(num_epochs):
        epoch_loss = 0
        batch_count = 0
        
        for batch in dataloader:
            # 移动数据到设备
            input_ids = batch['input_ids'].to(device)
            labels = batch['labels'].to(device)
            attention_mask = batch['attention_mask'].to(device)
            
            # 前向传播
            optimizer.zero_grad()
            logits = model(input_ids, attention_mask)
            
            # 计算损失
            loss = criterion(logits.view(-1, logits.size(-1)), labels.view(-1))
            
            # 反向传播
            loss.backward()
            optimizer.step()
            
            epoch_loss += loss.item()
            batch_count += 1
            
            if batch_count % 5 == 0:
                print(f"  Epoch {epoch+1}/{num_epochs}, Batch {batch_count}, Loss: {loss.item():.4f}")
        
        avg_loss = epoch_loss / len(dataloader)
        print(f"Epoch {epoch+1}/{num_epochs} 完成, 平均损失: {avg_loss:.4f}")
    
    # 保存模型
    print("保存模型...")
    save_model(model, tokenizer, 'models/trained')
    
    # 测试生成
    print("\n测试生成:")
    model.eval()
    test_prompts = ["你好", "你是谁"]
    
    for prompt in test_prompts:
        response = model.generate(tokenizer, prompt, max_length=20)
        print(f"输入: {prompt}")
        print(f"输出: {response}")
        print("-" * 30)
    
    print("✅ 训练完成！")

if __name__ == "__main__":
    try:
        main()
    except Exception as e:
        print(f"❌ 错误: {e}")
        import traceback
        traceback.print_exc()