#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
简化版训练器 - Epoch-based训练
基于兄弟项目的优化策略，使用tqdm进度条和epoch-based训练逻辑
"""

import time
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
from collections import deque

from lib.utils.tools.average_meter import AverageMeter
from lib.datasets.data_loader import DataLoader
from lib.loss.loss_manager import LossManager
from lib.models.model_manager import ModelManager
from lib.utils.tools.logger import Logger as Log
from lib.vis.seg_visualizer import SegVisualizer
from segmentor.tools.module_runner import ModuleRunner
from segmentor.tools.optim_scheduler import OptimScheduler
from segmentor.tools.data_helper import DataHelper
from segmentor.tools.evaluator import get_evaluator
from lib.utils.distributed import get_world_size, get_rank, is_distributed


class SimplifiedTrainer(object):
    """
    简化版训练器，专注于epoch-based训练逻辑
    """

    def __init__(self, configer):
        self.configer = configer
        self.batch_time = AverageMeter()
        self.loss_meter = AverageMeter()
        
        # 简化的统计信息
        self.loss_queue = deque(maxlen=50)
        self.acc_queue = deque(maxlen=50)
        
        # 早停机制 - 优化参数
        self.best_val_loss = float('inf')
        self.patience_counter = 0
        self.patience = 20  # 增加patience，避免过早停止
        self.min_delta = 0.001  # 最小改善阈值
        
        self._init_model()
        self._init_data()
        self._init_train_tools()

    def _init_model(self):
        """初始化模型"""
        self.module_runner = ModuleRunner(self.configer)
        self.model_manager = ModelManager(self.configer)
        self.seg_net = self.model_manager.semantic_segmentor()
        self.seg_net = self.module_runner.load_net(self.seg_net)

        self.loss_manager = LossManager(self.configer)
        self.pixel_loss = self.loss_manager.get_seg_loss()

        self.optim_scheduler = OptimScheduler(self.configer)
        self.optimizer, self.scheduler = self.optim_scheduler.init_optimizer(
            self._get_parameters()
        )

    def _init_data(self):
        """初始化数据加载器"""
        self.data_loader = DataLoader(self.configer)
        self.train_loader = self.data_loader.get_trainloader()
        self.val_loader = self.data_loader.get_valloader(dataset='val')

    def _init_train_tools(self):
        """初始化训练工具"""
        self.data_helper = DataHelper(self.configer, self)
        self.evaluator = get_evaluator(self.configer, self)
        self.seg_visualizer = SegVisualizer(self.configer)

    def _get_parameters(self):
        """获取模型参数"""
        if self.configer.get('optim', 'group_method') == 'decay':
            params_group = self.group_weight(self.seg_net)
        else:
            params_group = self._get_parameters_simple()
        return params_group

    def _get_parameters_simple(self):
        """简单参数获取"""
        return [{'params': self.seg_net.parameters(), 'lr': self.configer.get('lr', 'base_lr')}]

    def group_weight(self, module):
        """参数分组"""
        group_decay = []
        group_no_decay = []
        for m in module.modules():
            if isinstance(m, nn.Linear):
                group_decay.append(m.weight)
                if m.bias is not None:
                    group_no_decay.append(m.bias)
            elif isinstance(m, (nn.Conv2d, nn.Conv3d)):
                group_decay.append(m.weight)
                if m.bias is not None:
                    group_no_decay.append(m.bias)
            elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm3d, nn.GroupNorm)):
                if m.weight is not None:
                    group_no_decay.append(m.weight)
                if m.bias is not None:
                    group_no_decay.append(m.bias)

        assert len(list(module.parameters())) == len(group_decay) + len(group_no_decay)
        groups = [dict(params=group_decay), dict(params=group_no_decay, weight_decay=.0)]
        return groups

    def _calculate_accuracy(self, outputs, targets):
        """计算像素准确率"""
        with torch.no_grad():
            if isinstance(outputs, dict):
                pred = outputs['seg']
            else:
                pred = outputs
            
            # 确保targets是正确的格式
            if isinstance(targets, (list, tuple)):
                targets = targets[0]
            
            # 调整targets的维度以匹配pred
            if len(targets.shape) == 4:
                targets = targets.squeeze(1)
            elif len(targets.shape) == 3:
                pass
            else:
                return 0.0
            
            # 调整pred的尺寸以匹配targets
            if pred.shape[2:] != targets.shape[1:]:
                pred = F.interpolate(pred, size=targets.shape[1:], mode='bilinear', align_corners=True)
            
            # 获取预测类别
            pred_labels = torch.argmax(pred, dim=1)
            
            # 计算准确率（忽略ignore_index）
            ignore_index = -1
            if self.configer.exists('loss', 'params') and 'ce_ignore_index' in self.configer.get('loss', 'params'):
                ignore_index = self.configer.get('loss', 'params')['ce_ignore_index']
            
            # 创建有效像素掩码
            valid_mask = (targets != ignore_index)
            
            if valid_mask.sum() > 0:
                correct = (pred_labels == targets) & valid_mask
                accuracy = correct.sum().float() / valid_mask.sum().float()
                return accuracy.item()
            else:
                num_classes = pred.shape[1]
                return 1.0 / num_classes

    def train(self):
        """主训练函数 - Epoch-based训练"""
        self.seg_net.train()
        self.pixel_loss.train()
        
        max_iters = self.configer.get('solver', 'max_iters')
        display_interval = self.configer.get('solver', 'display_iter')
        test_interval = self.configer.get('solver', 'test_interval')
        
        # 计算epoch数量
        train_dataset_size = len(self.train_loader.dataset)
        batch_size = self.configer.get('train', 'batch_size')
        steps_per_epoch = len(self.train_loader)
        num_epochs = max_iters // steps_per_epoch
        
        print(f"🚀 开始Epoch-based训练")
        print(f"📊 最大迭代数: {max_iters}")
        print(f"📈 显示间隔: {display_interval}")
        print(f"🔍 验证间隔: {test_interval}")
        print(f"📚 训练集大小: {train_dataset_size}")
        print(f"📦 批次大小: {batch_size}")
        print(f"🔄 每epoch步数: {steps_per_epoch}")
        print(f"📅 总epoch数: {num_epochs}")
        print("=" * 60)
        
        start_time = time.time()
        
        # 初始化混合精度训练
        if torch.cuda.is_available():
            scaler = torch.cuda.amp.GradScaler()
        else:
            scaler = None
        
        # 训练循环 - 按epoch进行
        for epoch in range(num_epochs):
            epoch_start = time.time()
            
            print(f"\n🔄 开始 Epoch {epoch+1}/{num_epochs}")
            
            # 训练一个epoch
            train_loss, train_acc = self._train_epoch(epoch, num_epochs, scaler, display_interval)
            
            # 验证
            val_loss = self._validate()
            
            # 学习率调度
            if self.configer.get('lr', 'metric') == 'epochs':
                self.scheduler.step()
            
            # 计算epoch时间
            epoch_time = time.time() - epoch_start
            
            # 打印epoch结果
            print(f"\n📊 Epoch {epoch+1}/{num_epochs} 完成:")
            print(f"  训练 - Loss: {train_loss:.4f}, Acc: {train_acc:.2%}")
            print(f"  验证 - Loss: {val_loss:.4f}")
            print(f"  时间: {epoch_time:.1f}s, 学习率: {self.optimizer.param_groups[0]['lr']:.6f}")
            
            # 检查早停
            improvement = self.best_val_loss - val_loss
            if improvement > self.min_delta:
                self.best_val_loss = val_loss
                self.patience_counter = 0
                print(f"  ✅ 新的最佳验证损失: {val_loss:.4f} (改善: {improvement:.4f})")
            else:
                self.patience_counter += 1
                print(f"  📉 验证损失未显著改善，patience: {self.patience_counter}/{self.patience}")
            
            # 只有在训练了足够多的epoch后才考虑早停
            if epoch >= 10 and self.patience_counter >= self.patience:
                print(f"\n⏹️ 早停触发！训练了{epoch+1}个epoch")
                break
            
            # 保存检查点
            if (epoch + 1) % (self.configer.get('checkpoints', 'save_iters') // steps_per_epoch) == 0:
                self._save_checkpoint(epoch * steps_per_epoch)
        
        # 完成训练
        total_time = time.time() - start_time
        
        print(f"\n🎉 训练完成!")
        print(f"⏱️ 总训练时间: {total_time/60:.1f}分钟")
        print(f"📊 最佳验证损失: {self.best_val_loss:.4f}")

    def _train_epoch(self, epoch, num_epochs, scaler, display_interval):
        """训练一个epoch"""
        self.seg_net.train()
        self.pixel_loss.train()
        
        total_loss = 0
        total_accuracy = 0
        num_batches = 0
        
        # 使用tqdm创建epoch进度条
        pbar = tqdm(self.train_loader, desc=f'训练 Epoch {epoch+1}/{num_epochs}', unit="batch")
        
        for batch_idx, data_dict in enumerate(pbar):
            # 前向传播
            sequences, batch_size = self.data_helper.prepare_data(data_dict)
            inputs, targets = sequences[0], sequences[1]
            
            # 处理inputs格式
            if isinstance(inputs, list) and len(inputs) == 1:
                inputs = inputs[0]
            elif isinstance(inputs, list):
                inputs = inputs[0]
            
            # 根据设备类型选择autocast
            if scaler is not None:
                with torch.cuda.amp.autocast():
                    outputs = self.seg_net(inputs)
            else:
                outputs = self.seg_net(inputs)
                
            # 处理模型输出格式
            if isinstance(outputs, dict):
                model_outputs = outputs['seg']
            else:
                model_outputs = outputs
                
            # 确保模型输出和targets尺寸匹配
            if model_outputs.shape[2:] != targets.shape[1:]:
                model_outputs = F.interpolate(model_outputs, size=targets.shape[1:], mode='bilinear', align_corners=True)
            
            backward_loss = display_loss = self.pixel_loss(model_outputs, targets)
            
            # 反向传播
            if self.configer.get('network', 'loss_balance'):
                backward_loss = backward_loss / get_world_size()
            
            # 检查损失是否为NaN
            if torch.isnan(backward_loss) or torch.isinf(backward_loss):
                print(f"\n⚠️  检测到异常损失值: {backward_loss.item()}, 跳过此次更新")
                self.optimizer.zero_grad()
                continue
            
            # 根据设备类型选择反向传播方式
            if scaler is not None:
                scaler.scale(backward_loss).backward()
                scaler.unscale_(self.optimizer)
                torch.nn.utils.clip_grad_norm_(self.seg_net.parameters(), max_norm=1.0)
                scaler.step(self.optimizer)
                scaler.update()
            else:
                backward_loss.backward()
                torch.nn.utils.clip_grad_norm_(self.seg_net.parameters(), max_norm=1.0)
                self.optimizer.step()
            
            self.optimizer.zero_grad()
            
            # 计算准确率
            accuracy = self._calculate_accuracy(outputs, targets)
            
            # 统计
            total_loss += display_loss.item()
            total_accuracy += accuracy
            num_batches += 1
            
            # 更新进度条显示
            if batch_idx % display_interval == 0:
                current_lr = self.optimizer.param_groups[0]['lr']
                pbar.set_postfix({
                    'Loss': f'{display_loss.item():.4f}',
                    'Acc': f'{accuracy:.2%}',
                    'LR': f'{current_lr:.6f}'
                })
            
            # 学习率调度（如果按iteration）
            if self.configer.get('lr', 'metric') == 'iters':
                self.scheduler.step()
        
        # 计算平均指标
        avg_loss = total_loss / num_batches if num_batches > 0 else 0.0
        avg_accuracy = total_accuracy / num_batches if num_batches > 0 else 0.0
        
        return avg_loss, avg_accuracy

    def _validate(self):
        """简化验证函数"""
        self.seg_net.eval()
        self.pixel_loss.eval()
        
        val_losses = []
        max_val_samples = 50  # 限制验证样本数量
        
        with torch.no_grad():
            for i, data_dict in enumerate(self.val_loader):
                if i >= max_val_samples:
                    break
                try:
                    sequences, batch_size = self.data_helper.prepare_data(data_dict)
                    inputs, targets = sequences[0], sequences[1]
                    
                    # 处理inputs格式
                    if isinstance(inputs, list) and len(inputs) == 1:
                        inputs = inputs[0]
                    elif isinstance(inputs, list):
                        inputs = inputs[0]
                    
                    outputs = self.seg_net(inputs)
                    
                    # 处理模型输出格式
                    if isinstance(outputs, dict):
                        model_outputs = outputs['seg']
                    else:
                        model_outputs = outputs
                    
                    # 确保模型输出和targets尺寸匹配
                    if model_outputs.shape[2:] != targets.shape[1:]:
                        model_outputs = F.interpolate(model_outputs, size=targets.shape[1:], mode='bilinear', align_corners=True)
                    
                    # 计算损失
                    loss = self.pixel_loss(model_outputs, targets)
                    val_losses.append(loss.item())
                    
                except Exception as e:
                    print(f"验证批次 {i} 出错: {e}")
                    continue
        
        self.seg_net.train()
        self.pixel_loss.train()
        
        # 计算平均损失
        avg_loss = np.mean(val_losses) if val_losses else 0.0
        print(f"📊 验证损失: {avg_loss:.4f}")
        
        return avg_loss

    def _save_checkpoint(self, iteration):
        """保存检查点"""
        checkpoint_dir = self.configer.get('checkpoints', 'checkpoints_dir')
        os.makedirs(checkpoint_dir, exist_ok=True)
        
        checkpoint_name = self.configer.get('checkpoints', 'checkpoints_name')
        checkpoint_path = os.path.join(checkpoint_dir, f"{checkpoint_name}_iter_{iteration}.pth")
        
        checkpoint = {
            'iteration': iteration,
            'model_state_dict': self.seg_net.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'scheduler_state_dict': self.scheduler.state_dict(),
            'config': self.configer.to_dict()
        }
        
        torch.save(checkpoint, checkpoint_path)
        Log.info(f"检查点已保存: {checkpoint_path}")
