#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
增强版训练器
支持进度条、详细监控和训练参数展示
"""

import time
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
import colorsys
from collections import deque

from lib.utils.tools.average_meter import AverageMeter
from lib.datasets.data_loader import DataLoader
from lib.loss.loss_manager import LossManager
from lib.models.model_manager import ModelManager
from lib.utils.tools.logger import Logger as Log
from lib.utils.tools.enhanced_logger import enhanced_logger, Colors
from lib.vis.seg_visualizer import SegVisualizer
from segmentor.tools.module_runner import ModuleRunner
from segmentor.tools.optim_scheduler import OptimScheduler
from segmentor.tools.data_helper import DataHelper
from segmentor.tools.evaluator import get_evaluator
from lib.utils.distributed import get_world_size, get_rank, is_distributed


class EnhancedTrainer(object):
    """
    增强版训练器，支持进度条和详细监控
    """

    def __init__(self, configer):
        self.configer = configer
        self.batch_time = AverageMeter()
        self.foward_time = AverageMeter()
        self.backward_time = AverageMeter()
        self.loss_time = AverageMeter()
        self.data_time = AverageMeter()
        self.loss_meter = AverageMeter()
        
        # 训练统计
        self.training_stats = {
            'iterations': [],
            'losses': [],
            'learning_rates': [],
            'forward_times': [],
            'backward_times': [],
            'data_times': [],
            'accuracies': [],
            'grad_norms': [],
            'val_losses': [],
            'val_accuracies': [],
            'val_mious': []
        }
        
        # 早停机制
        self.best_val_loss = float('inf')
        self.best_val_miou = 0.0
        self.patience_counter = 0
        self.patience = 10  # 早停耐心值
        self.min_delta = 0.001  # 最小改善阈值
        
        # 用于计算移动平均的队列
        self.loss_queue = deque(maxlen=50)
        self.acc_queue = deque(maxlen=50)
        self.lr_queue = deque(maxlen=20)
        
        self._init_model()
        self._init_data()
        self._init_train_tools()

    def _init_model(self):
        """初始化模型"""
        self.module_runner = ModuleRunner(self.configer)
        self.model_manager = ModelManager(self.configer)
        self.seg_net = self.model_manager.semantic_segmentor()
        self.seg_net = self.module_runner.load_net(self.seg_net)

        self.loss_manager = LossManager(self.configer)
        self.pixel_loss = self.loss_manager.get_seg_loss()

        self.optim_scheduler = OptimScheduler(self.configer)
        self.optimizer, self.scheduler = self.optim_scheduler.init_optimizer(
            self._get_parameters()
        )

    def _init_data(self):
        """初始化数据加载器"""
        self.data_loader = DataLoader(self.configer)
        self.train_loader = self.data_loader.get_trainloader()
        self.val_loader = self.data_loader.get_valloader(dataset='val')

    def _init_train_tools(self):
        """初始化训练工具"""
        self.data_helper = DataHelper(self.configer, self)
        self.evaluator = get_evaluator(self.configer, self)
        self.seg_visualizer = SegVisualizer(self.configer)

    def _get_parameters(self):
        """获取模型参数"""
        if self.configer.get('optim', 'group_method') == 'decay':
            params_group = self.group_weight(self.seg_net)
        else:
            params_group = self._get_parameters_simple()
        return params_group

    def _get_parameters_simple(self):
        """简单参数获取"""
        return [{'params': self.seg_net.parameters(), 'lr': self.configer.get('lr', 'base_lr')}]

    def group_weight(self, module):
        """参数分组"""
        group_decay = []
        group_no_decay = []
        for m in module.modules():
            if isinstance(m, nn.Linear):
                group_decay.append(m.weight)
                if m.bias is not None:
                    group_no_decay.append(m.bias)
            elif isinstance(m, (nn.Conv2d, nn.Conv3d)):
                group_decay.append(m.weight)
                if m.bias is not None:
                    group_no_decay.append(m.bias)
            elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm3d, nn.GroupNorm)):
                if m.weight is not None:
                    group_no_decay.append(m.weight)
                if m.bias is not None:
                    group_no_decay.append(m.bias)

        assert len(list(module.parameters())) == len(group_decay) + len(group_no_decay)
        groups = [dict(params=group_decay), dict(params=group_no_decay, weight_decay=.0)]
        return groups

    def _calculate_accuracy(self, outputs, targets):
        """计算像素准确率"""
        with torch.no_grad():
            if isinstance(outputs, dict):
                pred = outputs['seg']
            else:
                pred = outputs
            
            # 确保targets是正确的格式
            if isinstance(targets, (list, tuple)):
                targets = targets[0]  # 取第一个target
            
            # 调整targets的维度以匹配pred
            if len(targets.shape) == 4:  # [B, C, H, W]
                targets = targets.squeeze(1)  # 移除通道维度
            elif len(targets.shape) == 3:  # [B, H, W]
                pass  # 已经是正确格式
            else:
                return 0.0
            
            # 调整pred的尺寸以匹配targets
            if pred.shape[2:] != targets.shape[1:]:
                pred = F.interpolate(pred, size=targets.shape[1:], mode='bilinear', align_corners=True)
            
            # 获取预测类别
            pred_labels = torch.argmax(pred, dim=1)
            
            # 计算准确率（忽略ignore_index）
            ignore_index = -1
            if self.configer.exists('loss', 'params') and 'ce_ignore_index' in self.configer.get('loss', 'params'):
                ignore_index = self.configer.get('loss', 'params')['ce_ignore_index']
            
            # 创建有效像素掩码
            valid_mask = (targets != ignore_index)
            
            if valid_mask.sum() > 0:
                correct = (pred_labels == targets) & valid_mask
                accuracy = correct.sum().float() / valid_mask.sum().float()
                return accuracy.item()
            else:
                # 如果没有有效像素，返回随机准确率（1/num_classes）
                num_classes = pred.shape[1]
                return 1.0 / num_classes

    def _calculate_grad_norm(self):
        """计算梯度范数"""
        total_norm = 0.0
        param_count = 0
        for param in self.seg_net.parameters():
            if param.grad is not None:
                param_norm = param.grad.data.norm(2)
                total_norm += param_norm.item() ** 2
                param_count += 1
        if param_count > 0:
            total_norm = total_norm ** (1. / 2)
        return total_norm
    
    def _calculate_miou(self, confusion_matrix):
        """计算mIoU (mean Intersection over Union)"""
        # 计算每个类别的IoU
        ious = []
        for i in range(confusion_matrix.shape[0]):
            # 真正例 + 假正例 + 假负例
            intersection = confusion_matrix[i, i]
            union = confusion_matrix[i, :].sum() + confusion_matrix[:, i].sum() - intersection
            
            if union > 0:
                iou = intersection / union
                ious.append(iou)
            else:
                ious.append(0.0)
        
        # 计算平均IoU
        return np.mean(ious) if ious else 0.0
    
    def _check_early_stopping(self, val_loss, val_miou):
        """检查是否应该早停"""
        # 检查验证损失是否改善
        if val_loss < self.best_val_loss - self.min_delta:
            self.best_val_loss = val_loss
            self.patience_counter = 0
            # 保存最佳模型
            self._save_best_model(val_loss, val_miou)
            return False
        else:
            self.patience_counter += 1
            
        # 检查mIoU是否改善
        if val_miou > self.best_val_miou + self.min_delta:
            self.best_val_miou = val_miou
            # 如果mIoU改善，重置patience_counter
            if self.patience_counter > 0:
                self.patience_counter = max(0, self.patience_counter - 1)
        
        # 检查是否达到早停条件
        if self.patience_counter >= self.patience:
            return True
            
        return False
    
    def _save_best_model(self, val_loss, val_miou):
        """保存最佳模型"""
        checkpoint_dir = self.configer.get('checkpoints', 'checkpoints_dir')
        os.makedirs(checkpoint_dir, exist_ok=True)
        
        best_model_path = os.path.join(checkpoint_dir, 'best_model.pth')
        
        checkpoint = {
            'model_state_dict': self.seg_net.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'scheduler_state_dict': self.scheduler.state_dict() if self.scheduler else None,
            'val_loss': val_loss,
            'val_miou': val_miou,
            'best_val_loss': self.best_val_loss,
            'best_val_miou': self.best_val_miou,
            'training_stats': self.training_stats
        }
        
        torch.save(checkpoint, best_model_path)
        print(f"💾 保存最佳模型: Val Loss={val_loss:.4f}, Val mIoU={val_miou:.4f}")

    def _get_color_for_value(self, value, min_val=0.0, max_val=1.0, color_scheme='hsv'):
        """根据数值获取颜色"""
        if np.isnan(value) or np.isinf(value):
            return '#FF0000'  # 红色表示异常值
        
        # 将值归一化到0-1
        normalized = max(0.0, min(1.0, (value - min_val) / (max_val - min_val)))
        
        if color_scheme == 'hsv':
            # 使用HSV色彩空间：绿色(好) -> 黄色(中等) -> 红色(差)
            hue = normalized * 120  # 0-120度，绿色到红色
            rgb = colorsys.hsv_to_rgb(hue/360, 1.0, 1.0)
            return f"#{int(rgb[0]*255):02x}{int(rgb[1]*255):02x}{int(rgb[2]*255):02x}"
        elif color_scheme == 'loss':
            # 损失值颜色：绿色(低) -> 红色(高)
            hue = (1.0 - normalized) * 120
            rgb = colorsys.hsv_to_rgb(hue/360, 1.0, 1.0)
            return f"#{int(rgb[0]*255):02x}{int(rgb[1]*255):02x}{int(rgb[2]*255):02x}"
        else:
            return '#FFFFFF'  # 默认白色

    def train(self):
        """主训练函数"""
        self.seg_net.train()
        self.pixel_loss.train()
        
        max_iters = self.configer.get('solver', 'max_iters')
        display_interval = self.configer.get('solver', 'display_iter')
        test_interval = self.configer.get('solver', 'test_interval')
        
        # 如果test_interval太大，调整为更合理的值
        if test_interval > max_iters // 2:
            test_interval = max(1, max_iters // 2)  # 每1/2的迭代验证一次，减少验证频率
        
        # 启动增强日志系统
        enhanced_logger.start_training(max_iters, 1)
        
        start_time = time.time()
        # 初始化混合精度训练
        # 检查设备类型以选择合适的scaler
        if torch.cuda.is_available():
            scaler = torch.cuda.amp.GradScaler()
        elif torch.backends.mps.is_available():
            # MPS不支持混合精度，使用普通scaler
            scaler = None
        else:
            # CPU模式，不使用scaler
            scaler = None
        
        if hasattr(self.train_loader.sampler, 'set_epoch'):
            self.train_loader.sampler.set_epoch(self.configer.get('epoch'))

        for i, data_dict in enumerate(self.train_loader):
            iteration = self.configer.get('iters')
            
            # 学习率调度
            if self.configer.get('lr', 'metric') == 'iters':
                self.scheduler.step(iteration)
            else:
                self.scheduler.step(self.configer.get('epoch'))

            # 数据加载时间
            data_time = time.time()
            
            # 前向传播
            forward_start = time.time()
            sequences, batch_size = self.data_helper.prepare_data(data_dict)
            inputs, targets = sequences[0], sequences[1]
            
            # 处理inputs格式 - 确保是tensor而不是list
            if isinstance(inputs, list) and len(inputs) == 1:
                inputs = inputs[0]
            elif isinstance(inputs, list):
                # 如果inputs是多个元素的列表，取第一个
                inputs = inputs[0]
            
            # 根据设备类型选择autocast
            if torch.cuda.is_available():
                with torch.cuda.amp.autocast():
                    outputs = self.seg_net(inputs)
            else:
                # MPS和CPU模式不使用autocast
                outputs = self.seg_net(inputs)
                
            # 处理模型输出格式
            if isinstance(outputs, dict):
                # 对于ProtoSeg模型，使用seg输出
                model_outputs = outputs['seg']
            else:
                model_outputs = outputs
                
            # 确保模型输出和targets尺寸匹配
            if model_outputs.shape[2:] != targets.shape[1:]:
                model_outputs = F.interpolate(model_outputs, size=targets.shape[1:], mode='bilinear', align_corners=True)
            
            backward_loss = display_loss = self.pixel_loss(model_outputs, targets)
            
            forward_time = time.time() - forward_start
            
            # 反向传播
            backward_start = time.time()
            if self.configer.get('network', 'loss_balance'):
                backward_loss = backward_loss / get_world_size()
            
            # 检查损失是否为NaN
            if torch.isnan(backward_loss) or torch.isinf(backward_loss):
                print(f"\n⚠️  检测到异常损失值: {backward_loss.item()}, 跳过此次更新")
                self.optimizer.zero_grad()
                continue
            
            # 根据设备类型选择反向传播方式
            if scaler is not None:
                # CUDA模式使用混合精度
                scaler.scale(backward_loss).backward()
                scaler.unscale_(self.optimizer)
                torch.nn.utils.clip_grad_norm_(self.seg_net.parameters(), max_norm=1.0)
                grad_norm = self._calculate_grad_norm()
                scaler.step(self.optimizer)
                scaler.update()
            else:
                # MPS和CPU模式使用普通反向传播
                backward_loss.backward()
                torch.nn.utils.clip_grad_norm_(self.seg_net.parameters(), max_norm=1.0)
                grad_norm = self._calculate_grad_norm()
                self.optimizer.step()
            
            self.optimizer.zero_grad()
            
            backward_time = time.time() - backward_start
            data_time = time.time() - data_time
            
            # 计算准确率
            accuracy = self._calculate_accuracy(outputs, targets)
            
            # 移除调试信息，保持进度条清洁
            
            # 更新统计信息
            self.training_stats['iterations'].append(iteration)
            self.training_stats['losses'].append(display_loss.item())
            self.training_stats['learning_rates'].append(self.optimizer.param_groups[0]['lr'])
            self.training_stats['forward_times'].append(forward_time)
            self.training_stats['backward_times'].append(backward_time)
            self.training_stats['data_times'].append(data_time)
            self.training_stats['accuracies'].append(accuracy)
            self.training_stats['grad_norms'].append(grad_norm)
            
            # 更新队列
            self.loss_queue.append(display_loss.item())
            self.acc_queue.append(accuracy)
            self.lr_queue.append(self.optimizer.param_groups[0]['lr'])
            
            # 更新进度条显示
            if iteration % display_interval == 0:
                # 计算移动平均
                avg_forward = np.mean(self.training_stats['forward_times'][-10:])
                avg_backward = np.mean(self.training_stats['backward_times'][-10:])
                avg_data = np.mean(self.training_stats['data_times'][-10:])
                
                # 计算移动平均损失和准确率
                valid_losses = [l for l in self.loss_queue if not np.isnan(l)]
                avg_loss = np.mean(valid_losses) if valid_losses else 0.0
                avg_acc = np.mean(list(self.acc_queue)) if self.acc_queue else 0.0
                current_lr = self.optimizer.param_groups[0]['lr']
                current_grad_norm = grad_norm
                
                # 记录训练指标
                train_metrics = {
                    'loss': avg_loss,
                    'accuracy': avg_acc,
                    'learning_rate': current_lr,
                    'grad_norm': current_grad_norm,
                    'forward_time': avg_forward,
                    'backward_time': avg_backward,
                    'data_time': avg_data
                }
                
                enhanced_logger.log_metrics(train_metrics, "🏋️  ")
            
            # 更新增强日志系统
            enhanced_logger.update_iteration(iteration)
            
            # 更新配置器
            self.configer.update(['iters'], iteration + 1)
            
            # 定期验证
            if iteration % test_interval == 0 and iteration > 0:
                enhanced_logger.log_info("开始验证...", Colors.BRIGHT_MAGENTA)
                val_metrics = self._validate_comprehensive()
                val_loss = val_metrics['loss']
                val_acc = val_metrics['accuracy']
                val_miou = val_metrics['miou']
                
                # 更新统计
                self.training_stats['val_losses'].append(val_loss)
                self.training_stats['val_accuracies'].append(val_acc)
                self.training_stats['val_mious'].append(val_miou)
                
                # 记录验证结果
                enhanced_logger.log_validation(val_metrics)
                
                # 检查早停
                should_stop = self._check_early_stopping(val_loss, val_miou)
                
                if should_stop:
                    enhanced_logger.log_warning(f"早停触发！验证损失在{self.patience}次验证中未改善")
                    break
            
            # 保存检查点
            if iteration % self.configer.get('checkpoints', 'save_iters') == 0 and iteration > 0:
                self._save_checkpoint(iteration)
            
            if iteration >= max_iters:
                break
        
        # 完成训练
        enhanced_logger.finish_training()
        
        # 打印训练总结
        self._print_training_summary()

    def _validate_comprehensive(self):
        """综合验证函数 - 验证整个验证集并计算多种指标"""
        self.seg_net.eval()
        self.pixel_loss.eval()
        
        val_losses = []
        val_accuracies = []
        val_mious = []
        
        # 用于计算mIoU的混淆矩阵
        num_classes = self.configer.get('data', 'num_classes')
        confusion_matrix = np.zeros((num_classes, num_classes), dtype=np.int64)
        
        # 限制验证样本数量以加快验证速度（仅用于测试）
        max_val_samples = 50  # 最多验证50个样本
        
        with torch.no_grad():
            for i, data_dict in enumerate(self.val_loader):
                # 限制验证样本数量以加快验证速度
                if i >= max_val_samples:
                    break
                try:
                    sequences, batch_size = self.data_helper.prepare_data(data_dict)
                    inputs, targets = sequences[0], sequences[1]
                    
                    # 处理inputs格式
                    if isinstance(inputs, list) and len(inputs) == 1:
                        inputs = inputs[0]
                    elif isinstance(inputs, list):
                        inputs = inputs[0]
                    outputs = self.seg_net(inputs)
                    
                    # 处理模型输出格式
                    if isinstance(outputs, dict):
                        model_outputs = outputs['seg']
                    else:
                        model_outputs = outputs
                    
                    # 确保模型输出和targets尺寸匹配
                    if model_outputs.shape[2:] != targets.shape[1:]:
                        model_outputs = F.interpolate(model_outputs, size=targets.shape[1:], mode='bilinear', align_corners=True)
                    
                    # 计算损失
                    loss = self.pixel_loss(model_outputs, targets)
                    val_losses.append(loss.item())
                    
                    # 计算准确率
                    pred_labels = torch.argmax(model_outputs, dim=1)
                    accuracy = self._calculate_accuracy(outputs, targets)
                    val_accuracies.append(accuracy)
                    
                    # 更新混淆矩阵
                    pred_np = pred_labels.cpu().numpy().flatten()
                    target_np = targets.cpu().numpy().flatten()
                    
                    # 只考虑有效标签
                    valid_mask = target_np != -1
                    if valid_mask.sum() > 0:
                        pred_valid = pred_np[valid_mask]
                        target_valid = target_np[valid_mask]
                        
                        # 更新混淆矩阵
                        for p, t in zip(pred_valid, target_valid):
                            if 0 <= p < num_classes and 0 <= t < num_classes:
                                confusion_matrix[t, p] += 1
                except Exception as e:
                    print(f"验证批次 {i} 出错: {e}")
                    continue
        
        self.seg_net.train()
        self.pixel_loss.train()
        
        # 计算平均指标
        avg_loss = np.mean(val_losses) if val_losses else 0.0
        avg_accuracy = np.mean(val_accuracies) if val_accuracies else 0.0
        
        # 计算mIoU
        miou = self._calculate_miou(confusion_matrix)
        
        return {
            'loss': avg_loss,
            'accuracy': avg_accuracy,
            'miou': miou
        }
    
    def _validate(self):
        """简单验证函数 - 保持向后兼容"""
        metrics = self._validate_comprehensive()
        return metrics['loss']

    def _save_checkpoint(self, iteration):
        """保存检查点"""
        checkpoint_dir = self.configer.get('checkpoints', 'checkpoints_dir')
        os.makedirs(checkpoint_dir, exist_ok=True)
        
        checkpoint_name = self.configer.get('checkpoints', 'checkpoints_name')
        checkpoint_path = os.path.join(checkpoint_dir, f"{checkpoint_name}_iter_{iteration}.pth")
        
        checkpoint = {
            'iteration': iteration,
            'model_state_dict': self.seg_net.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'scheduler_state_dict': self.scheduler.state_dict(),
            'config': self.configer.to_dict()
        }
        
        torch.save(checkpoint, checkpoint_path)
        Log.info(f"检查点已保存: {checkpoint_path}")

    def _print_training_summary(self):
        """打印训练总结"""
        print("\n" + "=" * 100)
        print("🎯 增强版训练总结报告")
        print("=" * 100)
        
        if self.training_stats['iterations']:
            # 基本统计
            print("📊 基本统计:")
            print(f"   总迭代数: {len(self.training_stats['iterations'])}")
            print(f"   总训练时间: {sum(self.training_stats['forward_times']) + sum(self.training_stats['backward_times']) + sum(self.training_stats['data_times']):.2f}秒")
            
            # 性能统计
            print("\n⚡ 性能统计:")
            print(f"   平均前向时间: {np.mean(self.training_stats['forward_times']):.3f}秒")
            print(f"   平均反向时间: {np.mean(self.training_stats['backward_times']):.3f}秒")
            print(f"   平均数据加载时间: {np.mean(self.training_stats['data_times']):.3f}秒")
            print(f"   平均总时间/迭代: {np.mean([f+b+d for f,b,d in zip(self.training_stats['forward_times'], self.training_stats['backward_times'], self.training_stats['data_times'])]):.3f}秒")
            
            # 损失统计
            valid_losses = [l for l in self.training_stats['losses'] if not np.isnan(l)]
            if valid_losses:
                print("\n📉 损失统计:")
                print(f"   初始损失: {valid_losses[0]:.4f}")
                print(f"   最终损失: {valid_losses[-1]:.4f}")
                print(f"   平均损失: {np.mean(valid_losses):.4f}")
                print(f"   最小损失: {np.min(valid_losses):.4f}")
                print(f"   最大损失: {np.max(valid_losses):.4f}")
                print(f"   损失标准差: {np.std(valid_losses):.4f}")
            
            # 准确率统计
            valid_accs = [a for a in self.training_stats['accuracies'] if not np.isnan(a)]
            if valid_accs:
                print("\n🎯 准确率统计:")
                print(f"   初始准确率: {valid_accs[0]:.4f}")
                print(f"   最终准确率: {valid_accs[-1]:.4f}")
                print(f"   平均准确率: {np.mean(valid_accs):.4f}")
                print(f"   最高准确率: {np.max(valid_accs):.4f}")
                print(f"   最低准确率: {np.min(valid_accs):.4f}")
                print(f"   准确率标准差: {np.std(valid_accs):.4f}")
            
            # 学习率统计
            print("\n📚 学习率统计:")
            print(f"   初始学习率: {self.training_stats['learning_rates'][0]:.6f}")
            print(f"   最终学习率: {self.training_stats['learning_rates'][-1]:.6f}")
            print(f"   平均学习率: {np.mean(self.training_stats['learning_rates']):.6f}")
            
            # 梯度统计
            valid_grads = [g for g in self.training_stats['grad_norms'] if not np.isnan(g) and g > 0]
            if valid_grads:
                print("\n📊 梯度统计:")
                print(f"   平均梯度范数: {np.mean(valid_grads):.4f}")
                print(f"   最大梯度范数: {np.max(valid_grads):.4f}")
                print(f"   最小梯度范数: {np.min(valid_grads):.4f}")
                print(f"   梯度标准差: {np.std(valid_grads):.4f}")
            
            # 验证统计
            if self.training_stats['val_losses']:
                print("\n🔍 验证统计:")
                print(f"   最佳验证损失: {self.best_val_loss:.4f}")
                print(f"   最佳验证mIoU: {self.best_val_miou:.4f}")
                print(f"   平均验证损失: {np.mean(self.training_stats['val_losses']):.4f}")
                print(f"   平均验证准确率: {np.mean(self.training_stats['val_accuracies']):.4f}")
                print(f"   平均验证mIoU: {np.mean(self.training_stats['val_mious']):.4f}")
                print(f"   验证次数: {len(self.training_stats['val_losses'])}")
            
            # 早停信息
            print("\n🛑 早停机制:")
            print(f"   耐心值: {self.patience}")
            print(f"   当前耐心计数: {self.patience_counter}")
            print(f"   最小改善阈值: {self.min_delta}")
            
            # 训练趋势分析
            print("\n📈 训练趋势分析:")
            if len(valid_losses) > 10:
                recent_losses = valid_losses[-10:]
                early_losses = valid_losses[:10]
                loss_trend = np.mean(recent_losses) - np.mean(early_losses)
                if loss_trend < -0.01:
                    print("   ✅ 损失呈下降趋势，训练正常")
                elif loss_trend > 0.01:
                    print("   ⚠️  损失呈上升趋势，可能需要调整学习率")
                else:
                    print("   ➡️  损失相对稳定")
            
            if len(valid_accs) > 10:
                recent_accs = valid_accs[-10:]
                early_accs = valid_accs[:10]
                acc_trend = np.mean(recent_accs) - np.mean(early_accs)
                if acc_trend > 0.01:
                    print("   ✅ 准确率呈上升趋势，模型在学习")
                elif acc_trend < -0.01:
                    print("   ⚠️  准确率呈下降趋势，可能存在过拟合")
                else:
                    print("   ➡️  准确率相对稳定")
        
        print("=" * 100)
