import os
import torch
import logging
from tqdm import tqdm
import torch.nn as nn
from abc import ABC, abstractmethod
import numpy as np

logger = logging.getLogger(__name__)

class BaseTrainer(ABC):
    """基础训练器类"""
    
    def __init__(self, model, train_loader, eval_loader, optimizer, scheduler, device, args):
        """
        初始化基础训练器
        
        Args:
            model: 模型
            train_loader: 训练数据加载器
            eval_loader: 评估数据加载器
            optimizer: 优化器
            scheduler: 学习率调度器
            device: 设备
            args: 训练参数
        """
        self.model = model
        self.train_loader = train_loader
        self.eval_loader = eval_loader
        self.optimizer = optimizer
        self.scheduler = scheduler
        self.device = device
        self.args = args
        
        # 训练状态
        self.global_step = 0
        self.start_epoch = 0
        
        # 早停相关参数
        self.best_loss = float('inf')  # 初始化最佳损失为无穷大
        self.patience_counter = 0      # 计数器，记录连续没有改善的轮数
        self.early_stopped = False     # 是否已经早停的标志
        
        # 确保输出目录存在
        os.makedirs(args.output_dir, exist_ok=True)
        os.makedirs(os.path.join(args.output_dir, "checkpoints"), exist_ok=True)
        # 可视化目录仍然需要
        os.makedirs(os.path.join(args.output_dir, "visualizations"), exist_ok=True) #
    
    @abstractmethod
    def train_step(self, batch):
        """单步训练，需要在子类中实现"""
        pass
    
    @abstractmethod
    def evaluate(self):
        """评估函数，需要在子类中实现
        
        Returns:
            float: 验证集上的损失值
        """
        pass
    
    @abstractmethod
    def visualize(self, batch, step):
        """可视化函数，需要在子类中实现"""
        pass
    
    def check_early_stopping(self, current_loss):
        """
        检查是否应该早停
        
        Args:
            current_loss: 当前的验证损失值
            
        Returns:
            bool: 是否应该停止训练
        """
        # 如果当前损失比历史最佳损失更好，更新最佳损失并重置计数器
        if current_loss < self.best_loss:
            self.best_loss = current_loss
            self.patience_counter = 0
            logger.info(f"验证损失改善至 {current_loss:.6f}，保存最佳模型")
            
            # 保存最佳模型
            from utils.checkpoint import save_checkpoint #
            save_checkpoint(
                self.model, 
                self.optimizer, 
                None,  # 不保存epoch以区别于常规检查点
                self.global_step, 
                self.args.output_dir,
                filename="best.pth" # 修改文件名
            )
            return False
        else:
            # 当前损失没有改善，增加计数器
            self.patience_counter += 1
            logger.info(f"验证损失未改善，当前耐心值: {self.patience_counter}/{self.args.early_stopping_patience}")
            
            # 如果连续没有改善的次数达到耐心阈值，触发早停
            if self.patience_counter >= self.args.early_stopping_patience: #
                logger.info(f"触发早停! 验证损失已连续 {self.args.early_stopping_patience} 次未改善") #
                return True
            return False
    
    def train(self):
        """主训练循环"""
        for epoch in range(self.start_epoch, self.args.num_epochs): #
            self.model.train()
            epoch_loss = 0.0
            
            with tqdm(self.train_loader, desc=f"Epoch {epoch+1}/{self.args.num_epochs}") as pbar:
                for batch in pbar:
                    try:
                        # 执行一步训练
                        loss = self.train_step(batch)
                        
                        # 更新进度条
                        epoch_loss += loss
                        pbar.set_postfix(loss=loss, avg_loss=epoch_loss/(pbar.n+1))
                        
                        # 定期可视化结果
                        if self.global_step % self.args.vis_interval == 0: #
                            self.visualize(batch, self.global_step)
                        
                        self.global_step += 1
                        
                    except Exception as e:
                        logger.error(f"Error processing batch: {str(e)}")
                        continue
            
            # 每个epoch结束时保存为 latest.pth
            from utils.checkpoint import save_checkpoint #
            save_checkpoint(
                self.model, 
                self.optimizer, 
                epoch, 
                self.global_step, 
                self.args.output_dir,
                filename="latest.pth" # 修改文件名并覆盖
            )
            
            # 评估并检查早停
            if self.args.do_eval and self.eval_loader: #
                if (epoch + 1) % self.args.eval_interval == 0: #
                    # 评估模型并获取验证损失
                    val_loss = self.evaluate()
                    
                    # 更新学习率（如果使用基于验证损失的调度器）
                    if self.scheduler:
                        if hasattr(self.scheduler, 'step') and not isinstance(self.scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):
                            self.scheduler.step()
                        elif isinstance(self.scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):
                            self.scheduler.step(val_loss)
                    
                    # 如果配置了早停，检查是否应该触发早停
                    if hasattr(self.args, 'early_stopping_patience') and self.args.early_stopping_patience > 0: #
                        self.early_stopped = self.check_early_stopping(val_loss)
                        if self.early_stopped:
                            logger.info("早停机制触发，停止训练")
                            break
        
        logger.info("训练完成!")