import os
import torch
import torch.distributed as dist
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP
from tensorboardX import SummaryWriter
from tqdm import tqdm
import logging
from dataset.dataset_dataloader import CustomImageDataset
from utils import PerformanceMeter, TqdmHandler, set_random_seed, AverageMeter, accuracy, Timer
from metrics import calculate_precision_recall_f1, calculate_accuracy, calculate_top_k_accuracy, calculate_confusion_matrix
import datetime
try:
    # 新版本PyTorch (>=1.10)
    from torch.amp import autocast, GradScaler
    AUTOCAST_DEVICE = 'cuda'
except ImportError:
    # 旧版本PyTorch
    from torch.cuda.amp import autocast, GradScaler
    AUTOCAST_DEVICE = None


class Trainer:
    def __init__(self, args, criterion, optimizer, scheduler, model, train_loader, val_loader, log_dir=None):
        self.config = args
        self.criterion = criterion
        self.optimizer = optimizer
        self.scheduler = scheduler
        self.model = model
        self.train_loader = train_loader
        self.val_loader = val_loader
        self.log_dir = log_dir or args.log_dir

        # 分布式训练设置
        self.is_distributed = dist.is_initialized()
        self.local_rank = int(os.environ.get('LOCAL_RANK', 0)) if self.is_distributed else 0
        self.world_size = int(os.environ.get('WORLD_SIZE', 1)) if self.is_distributed else 1
        self.rank = int(os.environ.get('RANK', 0)) if self.is_distributed else 0

        # Initialize logging (只在主进程记录)
        self.logger = self.get_logger()
        if self.rank == 0:
            self.tb_writer = SummaryWriter(self.config.log_dir)
        else:
            self.tb_writer = None

        # Ensure directories exist (只在主进程创建)
        if self.rank == 0:
            os.makedirs(self.config.log_dir, exist_ok=True)
            os.makedirs(self.config.weight_dir, exist_ok=True)

            # Create weight save directory for this training session
            current_time = datetime.datetime.now().strftime("%m%d%H%M")
            dataset_name = os.path.basename(self.config.data_dir).split('_')[0]  # e.g., web400
            self.weight_save_dir = os.path.join(self.config.weight_dir, f"{dataset_name}_{self.config.model_name}_{current_time}")
            os.makedirs(self.weight_save_dir, exist_ok=True)
            self.logger.info(f"Weight save directory created: {self.weight_save_dir}")
        else:
            self.weight_save_dir = None

        # Set device
        if self.is_distributed:
            self.device = torch.device(f'cuda:{self.local_rank}')
            torch.cuda.set_device(self.local_rank)
        else:
            self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        
        if self.rank == 0:
            self.logger.info(f'Using device: {self.device} (Distributed: {self.is_distributed})')

        # Set random seed
        set_random_seed(self.config.random_seed)

        # Initialize best validation accuracy tracking
        self.best_val_accuracy = 0.0
        self.best_weight_path = None  # Track the path of the best weight file

        # Initialize mixed precision training
        self.use_mixed_precision = getattr(self.config, 'use_mixed_precision', False)
        self.scaler = GradScaler() if (torch.cuda.is_available() and self.use_mixed_precision) else None
        
        # Log warmup configuration
        if hasattr(self.scheduler, 'warmup_epochs'):
            self.logger.info(f"Warmup enabled: {self.scheduler.warmup_epochs} epochs, start_lr={self.scheduler.warmup_start_lr:.2e}, type={self.scheduler.warmup_type}")
        else:
            self.logger.info("Warmup disabled")
        
        # Log mixed precision configuration
        if self.use_mixed_precision:
            self.logger.info("Mixed precision training enabled")
        else:
            self.logger.info("Mixed precision training disabled (using FP32)")

        # 将模型移动到设备并包装为DDP
        self.model = self.model.to(self.device)
        if self.is_distributed:
            self.model = DDP(self.model, device_ids=[self.local_rank], output_device=self.local_rank)
            if self.rank == 0:
                self.logger.info(f"Model wrapped with DDP (world_size: {self.world_size})")

    def get_logger(self):
        logger = logging.getLogger()
        logger.handlers = []
        logger.setLevel(logging.INFO)
        screen_handler = TqdmHandler()
        screen_handler.setFormatter(logging.Formatter('[%(asctime)s] %(message)s'))
        logger.addHandler(screen_handler)
        file_handler = logging.FileHandler(os.path.join(self.config.log_dir, 'train.log'), encoding='utf8')
        file_handler.setFormatter(logging.Formatter('[%(asctime)s][%(levelname)s] %(message)s'))
        logger.addHandler(file_handler)
        return logger

    def save_weights(self, epoch):
        """Save regular weights every 100 epochs"""
        # 只在主进程保存权重
        if self.rank != 0 or self.weight_save_dir is None:
            return
            
        weight_filename = f"epoch{epoch}.pth"
        weight_path = os.path.join(self.weight_save_dir, weight_filename)
        
        # 处理DDP包装的模型权重
        if hasattr(self.model, 'module'):
            # 如果是DDP包装的模型，保存原始模型的权重
            state_dict = self.model.module.state_dict()
        else:
            # 如果是普通模型，直接保存
            state_dict = self.model.state_dict()
        
        torch.save(state_dict, weight_path)
        self.logger.info(f"Model weights saved to {weight_path}")
    
    def save_best_weights(self, epoch, accuracy):
        """Save the best validation accuracy weights and delete previous best weight"""
        # Delete previous best weight file if it exists
        if self.best_weight_path is not None and os.path.exists(self.best_weight_path):
            try:
                os.remove(self.best_weight_path)
                self.logger.info(f"Deleted previous best weight: {self.best_weight_path}")
            except Exception as e:
                self.logger.warning(f"Failed to delete previous best weight {self.best_weight_path}: {str(e)}")
        
        # Save new best weight
        best_weight_filename = f"best_accuracy_epoch{epoch}_acc{accuracy:.2f}.pth"
        best_weight_path = os.path.join(self.weight_save_dir, best_weight_filename)
        
        # 处理DDP包装的模型权重
        if hasattr(self.model, 'module'):
            # 如果是DDP包装的模型，保存原始模型的权重
            state_dict = self.model.module.state_dict()
        else:
            # 如果是普通模型，直接保存
            state_dict = self.model.state_dict()
        
        torch.save(state_dict, best_weight_path)
        self.best_weight_path = best_weight_path  # Update the tracked path
        self.logger.info(f"Best validation accuracy weights saved to {best_weight_path} (Accuracy: {accuracy:.2f}%)")

    def train(self):
        dataset_name = 'webfg400' if 'webfg400' in self.config.data_dir else 'webinat5000'
        # 预计算调度器类型，避免重复判断
        is_warmup_scheduler = hasattr(self.scheduler, 'warmup_epochs')
        is_plateau_scheduler = isinstance(self.scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau)
        
        try:
            for epoch in range(self.config.epochs):
                # 简化学习率获取和日志
                current_lr = self.optimizer.param_groups[0]['lr']
                
                # 每个epoch都打印学习率
                warmup_info = ""
                if is_warmup_scheduler and self.scheduler.current_epoch < self.scheduler.warmup_epochs:
                    warmup_info = f" [Warmup {self.scheduler.current_epoch + 1}/{self.scheduler.warmup_epochs}]"
                self.logger.info(f'Epoch {epoch + 1}/{self.config.epochs} - LR: {current_lr:.6f}{warmup_info}')
                
                train_loss = self.train_one_epoch(epoch)
                
                # 分布式训练同步
                if self.is_distributed:
                    dist.barrier()  # 等待所有进程完成训练
                
                # 验证频率：每3个epoch验证一次
                val_loss, val_accuracy = 0.0, 0.0
                if (epoch + 1) % 3 == 0 or epoch == 0:
                    val_loss, val_accuracy = self.validate(epoch)
                
                # 简化调度器步进
                if is_warmup_scheduler:
                    self.scheduler.step(val_loss if val_loss > 0 else None)
                elif is_plateau_scheduler:
                    self.scheduler.step(val_loss)
                else:
                    self.scheduler.step()
                
                # 获取新的学习率
                new_lr = self.optimizer.param_groups[0]['lr']
                
                # 减少TensorBoard写入频率 (只在主进程写入)
                if (epoch + 1) % 5 == 0 and self.rank == 0:
                    self.tb_writer.add_scalar('Learning_Rate', new_lr, epoch)
                
                # Save model weights every 3 epochs (只在主进程保存)
                if (epoch + 1) % 3 == 0 and self.rank == 0:
                    self.save_weights(epoch + 1)
                
                # Save best validation accuracy weights (只在主进程保存)
                if val_accuracy > self.best_val_accuracy and self.rank == 0:
                    self.best_val_accuracy = val_accuracy
                    self.save_best_weights(epoch + 1, val_accuracy)
        except KeyboardInterrupt:
            self.logger.info('Training interrupted. Saving model weights...')
            self.save_weights('interrupted')
            self.logger.info('Model weights saved after interruption.')

    def train_one_epoch(self, epoch):
        self.model.train()
        all_outputs = []
        all_labels = []
        total_loss = 0.0
        
        # 预计算配置，避免重复判断
        use_mixed_precision = self.scaler is not None
        check_nan = getattr(self.config, 'check_nan_loss', True)
        grad_clip = getattr(self.config, 'grad_clip_norm', 0) > 0
        
        # 简化进度条描述
        pbar = tqdm(self.train_loader, desc=f'Epoch {epoch + 1}')
        
        for i, (inputs, labels) in enumerate(pbar):
            inputs, labels = inputs.to(self.device), labels.to(self.device)
            self.optimizer.zero_grad()
            
            # 简化混合精度训练
            if use_mixed_precision:
                autocast_context = autocast(AUTOCAST_DEVICE) if AUTOCAST_DEVICE else autocast()
                with autocast_context:
                    outputs = self.model(inputs)
                    loss = self.criterion(outputs, labels)
            else:
                outputs = self.model(inputs)
                loss = self.criterion(outputs, labels)
            
            # 简化NaN检查
            if check_nan and (torch.isnan(loss) or torch.isinf(loss)):
                continue  # 静默跳过，减少日志输出
            
            # 简化反向传播
            if use_mixed_precision:
                self.scaler.scale(loss).backward()
                if grad_clip:
                    self.scaler.unscale_(self.optimizer)
                    torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=self.config.grad_clip_norm)
                self.scaler.step(self.optimizer)
                self.scaler.update()
            else:
                loss.backward()
                if grad_clip:
                    torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=self.config.grad_clip_norm)
                self.optimizer.step()

            total_loss += loss.item()

            # 每100个批次更新一次进度条
            if (i + 1) % 500 == 0:
                batch_accuracy = calculate_accuracy(outputs, labels)
                pbar.set_postfix({'Loss': f'{loss.item():.4f}', 'Acc': f'{batch_accuracy:.4f}'})

            # 只在需要时收集输出
            if (epoch + 1) % 3 == 0:  # 每3轮收集一次用于最终计算
                all_outputs.append(outputs)
                all_labels.append(labels)

        # 简化epoch指标计算
        avg_loss = total_loss / len(self.train_loader)
        
        # 分布式训练：聚合所有进程的损失
        if self.is_distributed:
            # 将损失转换为tensor并聚合
            loss_tensor = torch.tensor(avg_loss, device=self.device)
            dist.all_reduce(loss_tensor, op=dist.ReduceOp.SUM)
            avg_loss = loss_tensor.item() / self.world_size
        
        # 每3个epoch计算详细指标
        if (epoch + 1) % 3 == 0 and len(all_outputs) > 0:
            all_outputs = torch.cat(all_outputs)
            all_labels = torch.cat(all_labels)
            epoch_accuracy = calculate_accuracy(all_outputs, all_labels)
            epoch_precision, epoch_recall, epoch_f1 = calculate_precision_recall_f1(all_outputs, all_labels)
            top_5_acc = calculate_top_k_accuracy(all_outputs, all_labels, k=5)
            
            # 分布式训练：聚合指标
            if self.is_distributed:
                metrics = torch.tensor([epoch_accuracy, epoch_precision, epoch_recall, epoch_f1, top_5_acc], device=self.device)
                dist.all_reduce(metrics, op=dist.ReduceOp.SUM)
                metrics = metrics / self.world_size
                epoch_accuracy, epoch_precision, epoch_recall, epoch_f1, top_5_acc = metrics.cpu().numpy()
            
            if self.rank == 0:
                self.logger.info(f"Epoch {epoch+1} - Loss: {avg_loss:.4f}, Acc: {epoch_accuracy:.4f}, F1: {epoch_f1:.4f}, Top-5: {top_5_acc:.4f}")
                
                # 减少TensorBoard写入
                if self.tb_writer:
                    self.tb_writer.add_scalar('Training/Accuracy', epoch_accuracy, epoch)
                    self.tb_writer.add_scalar('Training/Loss', avg_loss, epoch)
        else:
            # 简化日志输出
            if self.rank == 0:
                self.logger.info(f"Epoch {epoch+1} - Loss: {avg_loss:.4f}")
        
        return avg_loss

    def validate(self, epoch):
        self.model.eval()
        correct = 0
        total = 0
        total_loss = 0.0
        
        # 预计算配置
        use_mixed_precision = self.scaler is not None
        
        with torch.no_grad():
            for inputs, labels in tqdm(self.val_loader, desc=f'Val {epoch + 1}', leave=False):
                inputs, labels = inputs.to(self.device), labels.to(self.device)
                
                # 简化混合精度验证
                if use_mixed_precision:
                    autocast_context = autocast(AUTOCAST_DEVICE) if AUTOCAST_DEVICE else autocast()
                    with autocast_context:
                        outputs = self.model(inputs)
                        loss = self.criterion(outputs, labels)
                else:
                    outputs = self.model(inputs)
                    loss = self.criterion(outputs, labels)
                
                total_loss += loss.item()
                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()
        
        accuracy = 100 * correct / total
        avg_loss = total_loss / len(self.val_loader)
        
        # 分布式训练：聚合验证指标
        if self.is_distributed:
            correct_tensor = torch.tensor(correct, device=self.device)
            total_tensor = torch.tensor(total, device=self.device)
            loss_tensor = torch.tensor(avg_loss, device=self.device)
            
            dist.all_reduce(correct_tensor, op=dist.ReduceOp.SUM)
            dist.all_reduce(total_tensor, op=dist.ReduceOp.SUM)
            dist.all_reduce(loss_tensor, op=dist.ReduceOp.SUM)
            
            correct = correct_tensor.item()
            total = total_tensor.item()
            avg_loss = loss_tensor.item() / self.world_size
        
        if self.rank == 0:
            self.logger.info(f'Val {epoch + 1} - Acc: {accuracy:.2f}%, Loss: {avg_loss:.4f}')
            
            # 减少TensorBoard写入
            if (epoch + 1) % 5 == 0 and self.tb_writer:
                self.tb_writer.add_scalar('Validation/Accuracy', accuracy, epoch)
                self.tb_writer.add_scalar('Validation/Loss', avg_loss, epoch)
        
        return avg_loss, accuracy

if __name__ == "__main__":
    # Remove main function
    pass
