#!/usr/bin/env python3
"""
PyTorch原生训练脚本
用于建立性能基线
"""

import os
import json
import time
import argparse
from pathlib import Path
from typing import Dict, Any, Optional
from dataclasses import dataclass, field

import torch
import torch.nn as nn
import torch.distributed as dist
from torch.utils.data import DataLoader, DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP

from transformers import (
    GPT2LMHeadModel,
    GPT2Config,
    GPT2Tokenizer,
    get_linear_schedule_with_warmup,
    set_seed
)
from datasets import load_dataset, load_from_disk
import numpy as np
from tqdm import tqdm
import psutil


class PerformanceMonitor:
    """性能监控类"""
    
    def __init__(self, log_interval: int = 10, output_dir: str = "./results"):
        self.log_interval = log_interval
        self.output_dir = Path(output_dir)
        self.output_dir.mkdir(parents=True, exist_ok=True)
        
        # 性能指标
        self.metrics = {
            "loss": [],
            "throughput_samples": [],
            "throughput_tokens": [],
            "memory_allocated": [],
            "memory_reserved": [],
            "step_time": [],
            "gpu_utilization": []
        }
        
        self.start_time = time.time()
        self.step_start_time = None
        
    def start_step(self):
        """开始计时一个训练步骤"""
        if torch.cuda.is_available():
            torch.cuda.synchronize()
        self.step_start_time = time.time()
        
    def end_step(self, loss: float, batch_size: int, seq_length: int, step: int):
        """结束一个训练步骤并记录指标"""
        if torch.cuda.is_available():
            torch.cuda.synchronize()
        
        step_time = time.time() - self.step_start_time
        
        # 计算吞吐量
        samples_per_sec = batch_size / step_time
        tokens_per_sec = (batch_size * seq_length) / step_time
        
        # 记录GPU内存
        if torch.cuda.is_available():
            memory_allocated = torch.cuda.memory_allocated() / (1024**3)  # GB
            memory_reserved = torch.cuda.memory_reserved() / (1024**3)  # GB
        else:
            memory_allocated = 0
            memory_reserved = 0
        
        # 记录指标
        self.metrics["loss"].append(loss)
        self.metrics["throughput_samples"].append(samples_per_sec)
        self.metrics["throughput_tokens"].append(tokens_per_sec)
        self.metrics["memory_allocated"].append(memory_allocated)
        self.metrics["memory_reserved"].append(memory_reserved)
        self.metrics["step_time"].append(step_time)
        
        # 定期打印
        if step % self.log_interval == 0:
            self._print_metrics(step, loss, samples_per_sec, tokens_per_sec, 
                              memory_allocated, step_time)
    
    def _print_metrics(self, step: int, loss: float, samples_per_sec: float,
                      tokens_per_sec: float, memory_gb: float, step_time: float):
        """打印性能指标"""
        print(f"\n[Step {step}]")
        print(f"  Loss: {loss:.4f}")
        print(f"  Throughput: {samples_per_sec:.2f} samples/sec, {tokens_per_sec:.2f} tokens/sec")
        print(f"  Memory: {memory_gb:.2f} GB")
        print(f"  Step time: {step_time*1000:.2f} ms")
    
    def save_metrics(self, filename: str = "metrics.json"):
        """保存性能指标到文件"""
        # 如果filename是绝对路径，直接使用
        if os.path.isabs(filename):
            filepath = Path(filename)
        # 如果filename包含路径分隔符，说明是相对路径，直接使用
        elif "/" in filename or "\\" in filename:
            filepath = Path(filename)
        # 否则相对于output_dir
        else:
            filepath = self.output_dir / filename
        
        # 计算统计信息
        summary = {
            "total_steps": len(self.metrics["loss"]),
            "avg_loss": float(np.mean(self.metrics["loss"])),
            "avg_throughput_samples": float(np.mean(self.metrics["throughput_samples"])),
            "avg_throughput_tokens": float(np.mean(self.metrics["throughput_tokens"])),
            "avg_memory_allocated": float(np.mean(self.metrics["memory_allocated"])),
            "avg_memory_reserved": float(np.mean(self.metrics["memory_reserved"])),
            "avg_step_time": float(np.mean(self.metrics["step_time"])),
            "total_time": time.time() - self.start_time
        }
        
        output = {
            "summary": summary,
            "detailed_metrics": {k: [float(v) for v in vals] for k, vals in self.metrics.items()}
        }
        
        # 确保目录存在
        filepath.parent.mkdir(parents=True, exist_ok=True)
        
        with open(filepath, 'w') as f:
            json.dump(output, f, indent=2)
        
        print(f"\n{'='*60}")
        print("Performance Summary:")
        print(f"{'='*60}")
        for key, value in summary.items():
            print(f"  {key}: {value}")
        print(f"{'='*60}\n")


def setup_distributed():
    """初始化分布式训练环境"""
    if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
        rank = int(os.environ['RANK'])
        world_size = int(os.environ['WORLD_SIZE'])
        local_rank = int(os.environ['LOCAL_RANK'])
    else:
        rank = 0
        world_size = 1
        local_rank = 0
    
    if world_size > 1:
        dist.init_process_group(backend='nccl')
        torch.cuda.set_device(local_rank)
    
    return rank, world_size, local_rank


def load_config(config_path: str) -> Dict[str, Any]:
    """加载配置文件"""
    with open(config_path, 'r') as f:
        config = json.load(f)
    return config


def prepare_dataset(config: Dict[str, Any], tokenizer):
    """准备数据集"""
    data_config = config["data"]
    
    # 尝试从本地加载
    if data_config.get("dataset_path") and os.path.exists(data_config["dataset_path"]):
        print(f"Loading dataset from {data_config['dataset_path']}")
        dataset = load_from_disk(data_config["dataset_path"])
    else:
        # 从HuggingFace加载
        print(f"Loading dataset {data_config['dataset_name']}")
        dataset = load_dataset(
            data_config["dataset_name"],
            data_config.get("dataset_config", None)
        )
    
    # Tokenization
    block_size = data_config.get("block_size", 1024)
    
    def tokenize_function(examples):
        return tokenizer(examples["text"])
    
    def group_texts(examples):
        # 将所有文本连接起来
        concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
        total_length = len(concatenated_examples[list(examples.keys())[0]])
        
        # 丢弃最后不足block_size的部分
        total_length = (total_length // block_size) * block_size
        
        # 分割成block_size大小的块
        result = {
            k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
            for k, t in concatenated_examples.items()
        }
        result["labels"] = result["input_ids"].copy()
        return result
    
    print("Tokenizing dataset...")
    # 在多进程环境中禁用缓存以避免文件冲突
    tokenized_dataset = dataset.map(
        tokenize_function,
        batched=True,
        num_proc=1,  # 减少进程数避免缓存冲突
        remove_columns=dataset["train"].column_names,
    )
    
    print("Grouping texts...")
    lm_dataset = tokenized_dataset.map(
        group_texts,
        batched=True,
        num_proc=1,  # 减少进程数避免缓存冲突
    )
    
    # 设置格式为torch，确保返回tensor而不是list
    lm_dataset.set_format(type="torch", columns=["input_ids", "attention_mask", "labels"])
    
    return lm_dataset


def train(config: Dict[str, Any], rank: int, world_size: int, local_rank: int):
    """训练函数"""
    
    # 设置随机种子
    set_seed(42)
    
    # 加载tokenizer和模型
    model_config = config["model"]
    print(f"Loading model from {model_config['model_path']}")
    
    tokenizer = GPT2Tokenizer.from_pretrained(model_config["model_path"])
    tokenizer.pad_token = tokenizer.eos_token
    
    model = GPT2LMHeadModel.from_pretrained(model_config["model_path"])
    
    # 移动到GPU
    device = torch.device(f"cuda:{local_rank}" if torch.cuda.is_available() else "cpu")
    model = model.to(device)
    
    # 包装为DDP
    if world_size > 1:
        model = DDP(model, device_ids=[local_rank], output_device=local_rank)
    
    # 准备数据集
    dataset = prepare_dataset(config, tokenizer)
    train_dataset = dataset["train"]
    
    # 创建DataLoader
    training_config = config["training"]
    batch_size = training_config["per_device_train_batch_size"]
    
    sampler = DistributedSampler(train_dataset, num_replicas=world_size, rank=rank) if world_size > 1 else None
    
    train_loader = DataLoader(
        train_dataset,
        batch_size=batch_size,
        sampler=sampler,
        shuffle=(sampler is None),
        num_workers=training_config.get("dataloader_num_workers", 4),
        pin_memory=training_config.get("dataloader_pin_memory", True),
    )
    
    # 优化器
    optimizer = torch.optim.AdamW(
        model.parameters(),
        lr=training_config["learning_rate"],
        betas=(training_config["adam_beta1"], training_config["adam_beta2"]),
        eps=training_config["adam_epsilon"],
        weight_decay=training_config["weight_decay"]
    )
    
    # 学习率调度器
    num_epochs = training_config["num_train_epochs"]
    num_training_steps = len(train_loader) * num_epochs
    num_warmup_steps = int(num_training_steps * training_config["warmup_ratio"])
    
    scheduler = get_linear_schedule_with_warmup(
        optimizer,
        num_warmup_steps=num_warmup_steps,
        num_training_steps=num_training_steps
    )
    
    # 性能监控
    perf_monitor = None
    if rank == 0 and config["performance"]["enable_monitoring"]:
        perf_monitor = PerformanceMonitor(
            log_interval=config["performance"]["log_interval"],
            output_dir=training_config["output_dir"]
        )
    
    # 混合精度训练
    use_amp = training_config.get("bf16", False) or training_config.get("fp16", False)
    scaler = torch.cuda.amp.GradScaler() if use_amp and training_config.get("fp16", False) else None
    
    # 训练循环
    model.train()
    global_step = 0
    
    for epoch in range(num_epochs):
        if sampler:
            sampler.set_epoch(epoch)
        
        epoch_iterator = tqdm(train_loader, desc=f"Epoch {epoch+1}/{num_epochs}", disable=rank!=0)
        
        for step, batch in enumerate(epoch_iterator):
            # 移动数据到GPU
            batch = {k: v.to(device) for k, v in batch.items()}
            
            if perf_monitor:
                perf_monitor.start_step()
            
            # 前向传播
            if use_amp:
                with torch.cuda.amp.autocast(dtype=torch.bfloat16 if training_config.get("bf16") else torch.float16):
                    outputs = model(**batch)
                    loss = outputs.loss
            else:
                outputs = model(**batch)
                loss = outputs.loss
            
            # 反向传播
            if scaler:
                scaler.scale(loss).backward()
                scaler.unscale_(optimizer)
                torch.nn.utils.clip_grad_norm_(model.parameters(), training_config["max_grad_norm"])
                scaler.step(optimizer)
                scaler.update()
            else:
                loss.backward()
                torch.nn.utils.clip_grad_norm_(model.parameters(), training_config["max_grad_norm"])
                optimizer.step()
            
            scheduler.step()
            optimizer.zero_grad()
            
            # 记录性能
            if perf_monitor:
                seq_length = batch["input_ids"].shape[1]
                perf_monitor.end_step(loss.item(), batch_size, seq_length, global_step)
            
            global_step += 1
            
            # 保存检查点
            if rank == 0 and global_step % training_config["save_steps"] == 0:
                save_dir = Path(training_config["output_dir"]) / f"checkpoint-{global_step}"
                save_dir.mkdir(parents=True, exist_ok=True)
                
                model_to_save = model.module if hasattr(model, 'module') else model
                model_to_save.save_pretrained(save_dir)
                tokenizer.save_pretrained(save_dir)
                print(f"Checkpoint saved to {save_dir}")
    
    # 保存最终模型
    if rank == 0:
        final_dir = Path(training_config["output_dir"]) / "final"
        final_dir.mkdir(parents=True, exist_ok=True)
        
        model_to_save = model.module if hasattr(model, 'module') else model
        model_to_save.save_pretrained(final_dir)
        tokenizer.save_pretrained(final_dir)
        
        if perf_monitor:
            # 使用配置文件中的metrics_file参数
            metrics_file = config["performance"].get("metrics_file", "metrics.json")
            perf_monitor.save_metrics(metrics_file)
        
        print(f"Training completed! Model saved to {final_dir}")
    
    # 清理分布式
    if world_size > 1:
        dist.destroy_process_group()


def main():
    parser = argparse.ArgumentParser(description="PyTorch Baseline Training")
    parser.add_argument("--config", type=str, default="config_pytorch.json",
                       help="Path to config file")
    args = parser.parse_args()
    
    # 加载配置
    config = load_config(args.config)
    
    # 设置分布式
    rank, world_size, local_rank = setup_distributed()
    
    if rank == 0:
        print("="*60)
        print("PyTorch Baseline Training")
        print("="*60)
        print(f"World size: {world_size}")
        print(f"Model: {config['model']['name']}")
        print(f"Output: {config['training']['output_dir']}")
        print("="*60)
    
    # 开始训练
    train(config, rank, world_size, local_rank)


if __name__ == "__main__":
    main()

