#!/usr/bin/env python3
"""
简化的DPO训练脚本
"""

import os
import sys
import json
import torch
import argparse
from pathlib import Path

# 添加当前目录到Python路径
sys.path.append(os.path.dirname(os.path.abspath(__file__)))

from simple_model import SimpleLLMForCausalLM
from hf_tokenizer import MiniMindTokenizer
from simple_dpo_dataset import SimpleDPODataset
from model_config import get_minimind2_small_config

def dpo_loss(logits_chosen, logits_rejected, chosen_labels, rejected_labels, beta=0.1):
    """计算DPO损失"""
    # 计算chosen和rejected的log概率
    chosen_log_probs = torch.log_softmax(logits_chosen, dim=-1)
    rejected_log_probs = torch.log_softmax(logits_rejected, dim=-1)
    
    # 计算每个序列的平均log概率
    chosen_log_probs = chosen_log_probs.gather(-1, chosen_labels.unsqueeze(-1)).squeeze(-1)
    rejected_log_probs = rejected_log_probs.gather(-1, rejected_labels.unsqueeze(-1)).squeeze(-1)
    
    # 计算平均log概率（忽略padding tokens）
    chosen_mask = (chosen_labels != 0).float()  # 假设0是pad token
    rejected_mask = (rejected_labels != 0).float()
    
    chosen_log_probs = (chosen_log_probs * chosen_mask).sum(dim=-1) / chosen_mask.sum(dim=-1).clamp(min=1)
    rejected_log_probs = (rejected_log_probs * rejected_mask).sum(dim=-1) / rejected_mask.sum(dim=-1).clamp(min=1)
    
    # 计算DPO损失
    logits_diff = chosen_log_probs - rejected_log_probs
    loss = -torch.log(torch.sigmoid(beta * logits_diff)).mean()
    
    return loss

def main():
    parser = argparse.ArgumentParser(description="运行简化DPO训练")
    parser.add_argument("--data_path", type=str, 
                       default="/Users/sd/Desktop/mycode/myalgo/milvus/minimind/my_llm_implementation/datasets/pretrain_hq.jsonl",
                       help="数据集路径")
    parser.add_argument("--pretrained_model", type=str, 
                       default="./sft_output/checkpoint-2650",
                       help="预训练模型路径")
    parser.add_argument("--max_length", type=int, default=64, help="最大序列长度")
    parser.add_argument("--batch_size", type=int, default=1, help="批次大小")
    parser.add_argument("--learning_rate", type=float, default=1e-5, help="学习率")
    parser.add_argument("--num_steps", type=int, default=100, help="训练步数")
    parser.add_argument("--log_steps", type=int, default=10, help="日志步数")
    parser.add_argument("--output_dir", type=str, default="./dpo_simple_output", help="输出目录")
    parser.add_argument("--beta", type=float, default=0.1, help="DPO温度参数")
    
    args = parser.parse_args()
    
    # 设置设备
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"使用设备: {device}")
    
    # 创建输出目录
    os.makedirs(args.output_dir, exist_ok=True)
    
    # 1. 加载配置
    print("加载模型配置...")
    config = get_minimind2_small_config()
    print(f"参数量: {config.get_model_size():.1f}M")
    
    # 2. 创建分词器
    print("创建分词器...")
    tokenizer = MiniMindTokenizer()
    print(f"词汇表大小: {tokenizer.vocab_size}")
    
    # 3. 创建模型
    print("创建模型...")
    model = SimpleLLMForCausalLM(config)
    model = model.to(device)
    print(f"模型参数量: {sum(p.numel() for p in model.parameters())/1e6:.1f}M")
    
    # 4. 加载预训练权重
    if os.path.exists(args.pretrained_model):
        print(f"加载预训练权重从: {args.pretrained_model}")
        model_path = os.path.join(args.pretrained_model, "pytorch_model.bin")
        if os.path.exists(model_path):
            model.load_state_dict(torch.load(model_path, map_location=device))
            print("预训练权重加载成功!")
        else:
            print("预训练权重文件不存在，使用随机初始化")
    else:
        print("预训练模型路径不存在，使用随机初始化")
    
    # 5. 创建DPO数据集
    print("创建DPO数据集...")
    dataset = SimpleDPODataset(
        data_path=args.data_path,
        tokenizer=tokenizer,
        max_length=args.max_length
    )
    print(f"数据集大小: {len(dataset)}")
    
    # 6. 创建数据加载器
    from torch.utils.data import DataLoader
    dataloader = DataLoader(
        dataset, 
        batch_size=args.batch_size, 
        shuffle=True,
        collate_fn=lambda x: {
            'chosen_input_ids': torch.stack([item['chosen_input_ids'] for item in x]),
            'chosen_labels': torch.stack([item['chosen_labels'] for item in x]),
            'rejected_input_ids': torch.stack([item['rejected_input_ids'] for item in x]),
            'rejected_labels': torch.stack([item['rejected_labels'] for item in x])
        }
    )
    
    # 7. 设置优化器
    optimizer = torch.optim.AdamW(model.parameters(), lr=args.learning_rate)
    
    # 8. 训练循环
    print("开始DPO训练...")
    model.train()
    total_loss = 0
    step = 0
    
    for batch_idx, batch in enumerate(dataloader):
        if step >= args.num_steps:
            break
            
        # 移动数据到设备
        chosen_input_ids = batch['chosen_input_ids'].to(device)
        chosen_labels = batch['chosen_labels'].to(device)
        rejected_input_ids = batch['rejected_input_ids'].to(device)
        rejected_labels = batch['rejected_labels'].to(device)
        
        # 前向传播 - chosen序列
        chosen_outputs = model(input_ids=chosen_input_ids, labels=chosen_labels)
        chosen_logits = chosen_outputs['logits']
        
        # 前向传播 - rejected序列
        rejected_outputs = model(input_ids=rejected_input_ids, labels=rejected_labels)
        rejected_logits = rejected_outputs['logits']
        
        # 计算DPO损失
        loss = dpo_loss(
            chosen_logits, rejected_logits,
            chosen_labels, rejected_labels,
            beta=args.beta
        )
        
        # 反向传播
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
        total_loss += loss.item()
        step += 1
        
        # 日志
        if step % args.log_steps == 0:
            avg_loss = total_loss / step
            print(f"Step {step}, Loss: {loss.item():.4f}, Avg Loss: {avg_loss:.4f}")
    
    # 9. 保存最终模型
    print("保存最终模型...")
    final_save_path = os.path.join(args.output_dir, "final_model")
    os.makedirs(final_save_path, exist_ok=True)
    
    # 保存模型状态字典
    model_path = os.path.join(final_save_path, "pytorch_model.bin")
    torch.save(model.state_dict(), model_path)
    
    # 保存配置
    config_dict = {
        "model_type": "llm",
        "config": config.__dict__,
        "training_args": vars(args)
    }
    with open(os.path.join(final_save_path, "config.json"), "w") as f:
        json.dump(config_dict, f, indent=2)
    
    # 保存tokenizer配置
    tokenizer.save_pretrained(final_save_path)
    
    print(f"\nDPO训练完成!")
    print(f"最终平均损失: {total_loss / step:.4f}")
    print(f"模型保存到: {final_save_path}")

if __name__ == "__main__":
    main()
