import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from transformers import GPT2Tokenizer
import os
import json
from tqdm import tqdm
import argparse
from model import SimpleAIModel, ConversationDataset, save_model

def train_model(model, dataloader, optimizer, criterion, device, num_epochs=10):
    """训练模型"""
    model.train()
    total_loss = 0
    
    for epoch in range(num_epochs):
        epoch_loss = 0
        progress_bar = tqdm(dataloader, desc=f'Epoch {epoch+1}/{num_epochs}')
        
        for batch_idx, batch in enumerate(progress_bar):
            # 移动数据到设备
            input_ids = batch['input_ids'].to(device)
            labels = batch['labels'].to(device)
            attention_mask = batch['attention_mask'].to(device)
            
            # 前向传播
            optimizer.zero_grad()
            logits = model(input_ids, attention_mask)
            
            # 计算损失 (只对非padding位置计算)
            loss = criterion(logits.view(-1, logits.size(-1)), labels.view(-1))
            
            # 反向传播
            loss.backward()
            optimizer.step()
            
            # 统计
            epoch_loss += loss.item()
            progress_bar.set_postfix({'loss': loss.item()})
        
        avg_loss = epoch_loss / len(dataloader)
        total_loss += avg_loss
        print(f'Epoch {epoch+1}/{num_epochs}, Average Loss: {avg_loss:.4f}')
    
    return total_loss / num_epochs

def fine_tune_model(model_path, data_path, output_path, num_epochs=5, learning_rate=1e-4):
    """微调已训练的模型"""
    print(f"开始微调模型: {model_path}")
    
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")
    
    # 加载预训练模型
    from model import load_model
    model, tokenizer = load_model(model_path, device)
    
    # 准备数据
    dataset = ConversationDataset(data_path, tokenizer)
    dataloader = DataLoader(dataset, batch_size=4, shuffle=True)
    
    # 优化器和损失函数
    optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
    criterion = nn.CrossEntropyLoss(ignore_index=tokenizer.pad_token_id)
    
    # 微调
    avg_loss = train_model(model, dataloader, optimizer, criterion, device, num_epochs)
    
    # 保存微调后的模型
    save_model(model, tokenizer, output_path)
    print(f"微调完成！平均损失: {avg_loss:.4f}")
    print(f"模型已保存到: {output_path}")

def main():
    parser = argparse.ArgumentParser(description='训练或微调AI模型')
    parser.add_argument('--mode', type=str, choices=['train', 'finetune'], default='train',
                       help='模式：train=从头训练，finetune=微调已有模型')
    parser.add_argument('--data_path', type=str, default='data/conversations.json',
                       help='训练数据路径')
    parser.add_argument('--model_path', type=str, default='models/pretrained',
                       help='预训练模型路径（仅微调时使用）')
    parser.add_argument('--output_path', type=str, default='models/trained',
                       help='输出模型路径')
    parser.add_argument('--epochs', type=int, default=10,
                       help='训练轮数')
    parser.add_argument('--lr', type=float, default=1e-4,
                       help='学习率')
    parser.add_argument('--batch_size', type=int, default=4,
                       help='批大小')
    
    args = parser.parse_args()
    
    # 确保目录存在
    output_dir = os.path.dirname(args.output_path)
    if output_dir:  # 只有当目录路径不为空时才创建
        os.makedirs(output_dir, exist_ok=True)
    else:
        # 如果没有目录部分，创建默认目录
        os.makedirs('models', exist_ok=True)
    
    if args.mode == 'train':
        print("开始从头训练模型...")
        
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        print(f"使用设备: {device}")
        
        # 初始化tokenizer
        tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
        if tokenizer.pad_token is None:
            tokenizer.pad_token = tokenizer.eos_token
        
        # 创建模型
        model = SimpleAIModel(vocab_size=tokenizer.vocab_size)
        model.to(device)
        
        # 准备数据
        dataset = ConversationDataset(args.data_path, tokenizer)
        dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True)
        
        print(f"数据集大小: {len(dataset)}")
        print(f"批数: {len(dataloader)}")
        
        # 优化器和损失函数
        optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr)
        criterion = nn.CrossEntropyLoss(ignore_index=tokenizer.pad_token_id)
        
        # 训练
        avg_loss = train_model(model, dataloader, optimizer, criterion, device, args.epochs)
        
        # 保存模型
        save_model(model, tokenizer, args.output_path)
        print(f"训练完成！平均损失: {avg_loss:.4f}")
        
    elif args.mode == 'finetune':
        fine_tune_model(args.model_path, args.data_path, args.output_path, args.epochs, args.lr)
    
    # 测试生成
    print("\n开始测试模型生成...")
    from model import load_model
    model, tokenizer = load_model(args.output_path)
    
    test_prompts = ["你好", "你是谁", "今天天气怎么样"]
    for prompt in test_prompts:
        response = model.generate(tokenizer, prompt, max_length=30)
        print(f"输入: {prompt}")
        print(f"输出: {response}")
        print("-" * 50)

if __name__ == "__main__":
    main()