import argparse
import json
import math
import os
import signal
import time
import torch
import torch.distributed as dist
from contextlib import nullcontext
from torch.utils.data import DataLoader
from typing import Optional, Dict, Any

try:
    from accelerate import Accelerator
    ACCELERATE_AVAILABLE = True
except ImportError:
    ACCELERATE_AVAILABLE = False
    print("Accelerate is not available. Install it using 'pip install accelerate' for distributed training.")

try:
    from torch.utils.tensorboard import SummaryWriter
    TENSORBOARD_AVAILABLE = True
except ImportError:
    TENSORBOARD_AVAILABLE = False
    print("TensorBoard is not available. Install it using 'pip install tensorboard' for logging.")

# 全局变量用于跟踪训练的token数量
total_tokens_processed = 0

class BaseTrainer:
    """基础训练器类"""
    
    def __init__(self, config: Dict[str, Any]):
        self.config = config
        self.global_step = 0
        self.training_start_time = None
        self.training_interrupted = False
        global total_tokens_processed
        total_tokens_processed = 0
        
        # 初始化分布式训练状态
        self.ddp = int(os.environ.get("RANK", -1)) != -1
        
        self._log_if_rank0(f"[INFO] 初始化训练器...")
        
        # 注册信号处理器
        signal.signal(signal.SIGINT, self._signal_handler)
        signal.signal(signal.SIGTERM, self._signal_handler)
        
        # 初始化分布式训练
        self._init_distributed_mode()
        
        # 初始化设备
        self._init_device()
        
        # 初始化随机种子
        self._init_seed()
        
        # 初始化模型、数据集、优化器等组件
        self._init_components()
        
        # 初始化日志记录器
        self._init_loggers()
        
        self._log_if_rank0(f"[INFO] 训练器初始化完成")
        
    def _signal_handler(self, sig, frame):
        """处理中断信号"""
        self.training_interrupted = True
        self._log_if_rank0("[INFO] 训练被中断，正在保存训练统计信息...")
        self._log_training_stats()
        exit(0)
        
    def _init_distributed_mode(self):
        """初始化分布式训练模式"""
        self._log_if_rank0(f"[INFO] 初始化分布式训练模式，DDP状态: {self.ddp}")
        if not self.ddp:
            self._log_if_rank0(f"[INFO] 非分布式训练模式")
            return
            
        # 如果使用Accelerate，分布式初始化将由Accelerate处理
        if self.config.get('use_accelerate', False) and ACCELERATE_AVAILABLE:
            self._log_if_rank0(f"[INFO] 使用Accelerate，跳过手动分布式初始化")
            return
            
        self._log_if_rank0(f"[INFO] 初始化DDP训练环境")
        dist.init_process_group(backend="nccl")
        self.ddp_rank = int(os.environ["RANK"])
        self.ddp_local_rank = int(os.environ["LOCAL_RANK"])
        self.ddp_world_size = int(os.environ["WORLD_SIZE"])
        self.DEVICE = f"cuda:{self.ddp_local_rank}"
        torch.cuda.set_device(self.DEVICE)
        self._log_if_rank0(f"[INFO] DDP初始化完成，rank: {self.ddp_rank}, local_rank: {self.ddp_local_rank}, world_size: {self.ddp_world_size}")
        
    def _init_device(self):
        """初始化设备"""
        self._log_if_rank0(f"[INFO] 初始化设备")
        if self.ddp and hasattr(self, 'DEVICE'):
            self.device = torch.device(self.DEVICE)
            self._log_if_rank0(f"[INFO] 使用DDP设备: {self.device}")
        elif self.ddp:
            # 处理Accelerate模式下没有DEVICE属性的情况
            self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
            self._log_if_rank0(f"[INFO] 使用分布式设备: {self.device}")
        else:
            self.device = torch.device(
                self.config.get("device", "cuda:0") if torch.cuda.is_available() else "cpu"
            )
            self._log_if_rank0(f"[INFO] 使用设备: {self.device}")
            
    def _init_seed(self):
        """初始化随机种子"""
        self._log_if_rank0(f"[INFO] 初始化随机种子")
        base_seed = self.config.get("seed", 1337)
        torch.manual_seed(base_seed)
        torch.cuda.manual_seed(base_seed)
        self._log_if_rank0(f"[INFO] 基础随机种子: {base_seed}")
        
        if self.ddp:
            # 只有在非Accelerate的DDP模式下才需要手动获取rank
            if not (self.config.get('use_accelerate', False) and ACCELERATE_AVAILABLE):
                rank = self._safe_get_rank()
                torch.manual_seed(base_seed + rank)
                torch.cuda.manual_seed(base_seed + rank)
                self._log_if_rank0(f"[INFO] DDP模式下调整随机种子: {base_seed + rank}")
            else:
                # Accelerate模式下，rank信息由Accelerate管理
                torch.manual_seed(base_seed)
                torch.cuda.manual_seed(base_seed)
                self._log_if_rank0(f"[INFO] Accelerate模式下使用基础随机种子: {base_seed}")
                
    def _safe_get_rank(self):
        """安全获取当前进程的rank"""
        try:
            # 首先尝试从环境变量获取
            if "RANK" in os.environ:
                return int(os.environ["RANK"])
                
            # 然后尝试使用torch.distributed API
            if dist.is_available() and dist.is_initialized():
                return dist.get_rank()
                
            # 默认返回0
            return 0
        except:
            return 0
            
    def _log_if_rank0(self, message):
        """仅在rank 0进程打印日志"""
        if not self.ddp or self._safe_get_rank() == 0:
            print(message)
            
    def _init_components(self):
        """初始化模型、数据集、优化器等组件"""
        self._log_if_rank0(f"[INFO] 初始化模型组件（需在子类中实现）")
        raise NotImplementedError("子类必须实现 _init_components 方法")
        
    def _init_loggers(self):
        """初始化日志记录器"""
        self._log_if_rank0(f"[INFO] 初始化日志记录器")
        self.tensorboard_writer = None
        if self.config.get('use_tensorboard', False) and TENSORBOARD_AVAILABLE and (not self.ddp or self._safe_get_rank() == 0):
            log_dir = self.config.get('tensorboard_log_dir', '../tensorboard_logs')
            os.makedirs(log_dir, exist_ok=True)
            self.tensorboard_writer = SummaryWriter(log_dir=log_dir)
            self._log_if_rank0(f"[INFO] TensorBoard日志启用，日志目录: {log_dir}")
            
        self.wandb_logger = None
        if self.config.get('use_wandb', False) and (not self.ddp or self._safe_get_rank() == 0):
            try:
                import wandb
                wandb_project = self.config.get('wandb_project', 'PoXiao-LLM')
                wandb_name = self.config.get('wandb_name', 'poxiao-training')
                wandb.init(project=wandb_project, name=wandb_name)
                self.wandb_logger = wandb
                self._log_if_rank0(f"[INFO] WandB日志启用，项目: {wandb_project}，名称: {wandb_name}")
            except ImportError:
                self._log_if_rank0("[WARNING] WandB不可用，请使用 'pip install wandb' 安装")
                
    def _log_training_stats(self):
        """记录训练统计信息"""
        self._log_if_rank0(f"[INFO] 记录训练统计信息")
        global total_tokens_processed
        if not self.ddp or self._safe_get_rank() == 0:
            elapsed_time = time.time() - self.training_start_time if self.training_start_time else 0
            self._log_if_rank0(f"=== 训练统计信息 ===")
            self._log_if_rank0(f"总处理token数: {total_tokens_processed:,}")
            self._log_if_rank0(f"训练时长: {elapsed_time:.2f} 秒")
            self._log_if_rank0(f"平均处理速度: {total_tokens_processed / elapsed_time if elapsed_time > 0 else 0:.2f} tokens/秒")
            
            # 保存到文件
            stats = {
                "total_tokens_processed": total_tokens_processed,
                "training_duration_seconds": elapsed_time,
                "average_tokens_per_second": total_tokens_processed / elapsed_time if elapsed_time > 0 else 0,
                "training_interrupted": self.training_interrupted
            }
            
            stats_file = os.path.join(self.config.get('output_dir', '../out'), "training_stats.json")
            try:
                with open(stats_file, 'w', encoding='utf-8') as f:
                    json.dump(stats, f, indent=2, ensure_ascii=False)
                self._log_if_rank0(f"[INFO] 训练统计信息已保存到: {stats_file}")
            except Exception as e:
                self._log_if_rank0(f"[ERROR] 保存训练统计信息失败: {e}")
                
    def train(self):
        """训练主循环"""
        self._log_if_rank0(f"[INFO] 开始训练主循环")
        self.training_start_time = time.time()
        
        # 打印配置参数
        if not self.ddp or self._safe_get_rank() == 0:
            self._log_if_rank0("=== 训练配置参数 ===")
            for key, value in self.config.items():
                self._log_if_rank0(f"{key}: {value}")
        
        try:
            epochs = self.config.get('epochs', 1)
            self._log_if_rank0(f"[INFO] 计划训练轮数: {epochs}")
            for epoch in range(epochs):
                self._log_if_rank0(f"[INFO] 开始训练第 {epoch + 1}/{epochs} 轮")
                should_early_stop = self._train_epoch(epoch)
                if should_early_stop:
                    self._log_if_rank0(f"[INFO] 触发早停机制，结束训练")
                    break  # 早停，退出训练循环
                self._log_if_rank0(f"[INFO] 完成第 {epoch + 1}/{epochs} 轮训练")
                
            # 训练正常完成
            if not self.ddp or self._safe_get_rank() == 0:
                self._log_if_rank0("训练正常完成!")
                self._log_training_stats()
                
        except Exception as e:
            self._log_if_rank0(f"[ERROR] 训练过程中发生错误: {e}")
            if not self.ddp or self._safe_get_rank() == 0:
                self._log_training_stats()
            raise
            
        finally:
            # 关闭日志记录器
            if self.tensorboard_writer is not None:
                self.tensorboard_writer.close()
                self._log_if_rank0(f"[INFO] 关闭TensorBoard日志记录器")
                
    def _train_epoch(self, epoch):
        """训练单个epoch"""
        self._log_if_rank0(f"[INFO] 训练单个epoch（需在子类中实现）")
        raise NotImplementedError("子类必须实现 _train_epoch 方法")
        
    def _get_lr(self, current_step, total_steps, lr):
        """获取当前学习率"""
        self._log_if_rank0(f"[INFO] 计算学习率，当前步数: {current_step}, 总步数: {total_steps}, 基础学习率: {lr}")
        return lr / 10 + 0.5 * lr * (1 + math.cos(math.pi * current_step / total_steps))