#!/usr/bin/env python3
"""
预训练脚本 - 使用真实数据集进行预训练
"""

import os
import sys
import json
import torch
import argparse
from pathlib import Path

# 添加当前目录到Python路径
sys.path.append(os.path.dirname(os.path.abspath(__file__)))

from simple_model import SimpleLLMForCausalLM
from hf_tokenizer import MiniMindTokenizer
from simple_dataset import SimplePretrainDataset
from model_config import (
    LLMConfig, 
    get_minimind2_small_config,
    get_minimind2_config,
    get_minimind2_moe_config,
    get_gpt2_small_config
)

def main():
    parser = argparse.ArgumentParser(description="运行预训练")
    parser.add_argument("--data_path", type=str, 
                       default="/Users/sd/Desktop/mycode/myalgo/milvus/minimind/my_llm_implementation/datasets/pretrain_hq.jsonl",
                       help="数据集路径")
    parser.add_argument("--config", type=str, default="minimind2-small", 
                       choices=["minimind2-small", "minimind2", "minimind2-moe", "gpt2-small"],
                       help="模型配置")
    parser.add_argument("--max_length", type=int, default=512, help="最大序列长度")
    parser.add_argument("--batch_size", type=int, default=2, help="批次大小")
    parser.add_argument("--learning_rate", type=float, default=1e-4, help="学习率")
    parser.add_argument("--num_epochs", type=int, default=1, help="训练轮数")
    parser.add_argument("--save_steps", type=int, default=100, help="保存步数")
    parser.add_argument("--log_steps", type=int, default=10, help="日志步数")
    parser.add_argument("--output_dir", type=str, default="./output", help="输出目录")
    parser.add_argument("--device", type=str, default="auto", help="设备")
    
    args = parser.parse_args()
    
    # 设置设备
    if args.device == "auto":
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    else:
        device = torch.device(args.device)
    
    print(f"使用设备: {device}")
    
    # 创建输出目录
    os.makedirs(args.output_dir, exist_ok=True)
    
    # 1. 加载配置
    print("加载模型配置...")
    config_map = {
        "minimind2-small": get_minimind2_small_config,
        "minimind2": get_minimind2_config,
        "minimind2-moe": get_minimind2_moe_config,
        "gpt2-small": get_gpt2_small_config
    }
    config = config_map[args.config]()
    print(f"模型配置: {args.config}")
    print(f"参数量: {config.get_model_size():.1f}M")
    
    # 2. 创建分词器
    print("创建分词器...")
    tokenizer = MiniMindTokenizer()
    print(f"词汇表大小: {tokenizer.vocab_size}")
    
    # 3. 创建模型
    print("创建模型...")
    model = SimpleLLMForCausalLM(config)
    model = model.to(device)
    print(f"模型参数量: {sum(p.numel() for p in model.parameters())/1e6:.1f}M")
    
    # 4. 创建数据集
    print("创建数据集...")
    dataset = SimplePretrainDataset(
        data_path=args.data_path,
        tokenizer=tokenizer,
        max_length=args.max_length
    )
    print(f"数据集大小: {len(dataset)}")
    
    # 5. 创建数据加载器
    from torch.utils.data import DataLoader
    dataloader = DataLoader(
        dataset, 
        batch_size=args.batch_size, 
        shuffle=True,
        collate_fn=lambda x: {
            'input_ids': torch.stack([item['input_ids'] for item in x]),
            'labels': torch.stack([item['labels'] for item in x])
        }
    )
    
    # 6. 设置优化器
    optimizer = torch.optim.AdamW(model.parameters(), lr=args.learning_rate)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.num_epochs)
    
    # 7. 训练循环
    print("开始预训练...")
    model.train()
    total_loss = 0
    step = 0
    
    for epoch in range(args.num_epochs):
        print(f"\nEpoch {epoch + 1}/{args.num_epochs}")
        
        for batch_idx, batch in enumerate(dataloader):
            # 移动数据到设备
            input_ids = batch['input_ids'].to(device)
            labels = batch['labels'].to(device)
            
            # 前向传播
            optimizer.zero_grad()
            outputs = model(input_ids=input_ids, labels=labels)
            loss = outputs['loss']
            
            # 反向传播
            loss.backward()
            optimizer.step()
            
            total_loss += loss.item()
            step += 1
            
            # 日志
            if step % args.log_steps == 0:
                avg_loss = total_loss / step
                print(f"Step {step}, Loss: {loss.item():.4f}, Avg Loss: {avg_loss:.4f}")
            
            # 保存检查点
            if step % args.save_steps == 0:
                save_path = os.path.join(args.output_dir, f"checkpoint-{step}")
                os.makedirs(save_path, exist_ok=True)
                
                # 保存模型
                model.save_pretrained(save_path)
                tokenizer.save_pretrained(save_path)
                
                # 保存配置
                config_dict = {
                    "model_type": "llm",
                    "config": config.__dict__,
                    "training_args": vars(args)
                }
                with open(os.path.join(save_path, "training_config.json"), "w") as f:
                    json.dump(config_dict, f, indent=2)
                
                print(f"保存检查点到: {save_path}")
        
        # 更新学习率
        scheduler.step()
    
    # 8. 保存最终模型
    final_save_path = os.path.join(args.output_dir, "final_model")
    os.makedirs(final_save_path, exist_ok=True)
    
    model.save_pretrained(final_save_path)
    tokenizer.save_pretrained(final_save_path)
    
    # 保存最终配置
    final_config = {
        "model_type": "llm",
        "config": config.__dict__,
        "training_args": vars(args),
        "final_loss": total_loss / step
    }
    with open(os.path.join(final_save_path, "training_config.json"), "w") as f:
        json.dump(final_config, f, indent=2)
    
    print(f"\n预训练完成!")
    print(f"最终平均损失: {total_loss / step:.4f}")
    print(f"模型保存到: {final_save_path}")

if __name__ == "__main__":
    main()
