"""
Original work Copyright 2019 Davy Neven,  KU Leuven (licensed under CC BY-NC 4.0 (https://github.com/davyneven/SpatialEmbeddings/blob/master/license.txt))
Modified work Copyright 2021 Manan Lalit, Max Planck Institute of Molecular Cell Biology and Genetics  (MIT License https://github.com/juglab/EmbedSeg/blob/main/LICENSE)
Modified work Copyright 2022 Katharina Löffler, Karlsruhe Institute of Technology (MIT License)
Modifications: remove 3d parts; change lr scheduler; change visualization; train/ eval on image pairs
"""
import shutil
import os
import time
import psutil

import torch


from embedtrack.criterions.loss import EmbedTrackLoss
from embedtrack.utils.utils import get_indices_pandas
from embedtrack.models.net import TrackERFNet
from embedtrack.datasets.dataset import get_dataset
from embedtrack.utils.logging import (
    AverageMeter,
    Logger,
)
from embedtrack.utils.clustering import Cluster
from embedtrack.utils.visualize import VisualizeTraining
from embedtrack.utils.collate_fn import safe_collate_fn
from tqdm import tqdm


# CUDA优化设置
torch.backends.cudnn.benchmark = True  # 自动寻找最优算法
torch.backends.cudnn.deterministic = False  # 关闭确定性算法以提高速度
torch.backends.cudnn.enabled = True  # 启用cuDNN

# 设置CUDA内存分配策略
if torch.cuda.is_available():
    torch.cuda.empty_cache()  # 清空GPU缓存
    # 设置内存分配策略
    torch.cuda.set_per_process_memory_fraction(0.9)  # 使用90%的GPU内存
import matplotlib
import matplotlib.pyplot as plt

matplotlib.use("Agg")
import numpy as np


# 导入GPU配置
import sys
sys.path.append('.')
from gpu_config import setup_gpu_parallel, get_optimal_workers, setup_multi_gpu_model, monitor_gpu_memory

# 设置GPU并行环境
setup_gpu_parallel()

# 设置CUDA设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


class AverageMeter(object):
    """Computes and stores the average and current value"""

    def __init__(self):
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count


def get_memory_usage():
    """获取当前内存使用情况"""
    process = psutil.Process(os.getpid())
    memory_info = process.memory_info()
    return {
        'rss': memory_info.rss / 1024 / 1024,  # MB
        'vms': memory_info.vms / 1024 / 1024,  # MB
        'percent': process.memory_percent()
    }


def print_training_stats(epoch, batch_idx, total_batches, loss, loss_parts, lr, memory_info, elapsed_time):
    """打印训练统计信息"""
    print(f"\n{'='*80}")
    print(f"Epoch {epoch} | Batch {batch_idx}/{total_batches} | Time: {elapsed_time:.2f}s")
    print(f"Learning Rate: {lr:.6f}")
    print(f"Total Loss: {loss:.6f}")
    print(f"Loss Components:")
    for key, value in loss_parts.items():
        print(f"  - {key}: {value:.6f}")
    print(f"Memory Usage: {memory_info['rss']:.1f}MB (RSS) | {memory_info['percent']:.1f}%")
    print(f"{'='*80}\n")


# https://medium.com/huggingface/training-larger-batches-practical-tips-on-1-gpu-multi-gpu-distributed-setups-ec88c3e51255
def train(virtual_batch_multiplier, n_sigma, args):
    # define meters
    loss_meter = AverageMeter()
    loss_parts_meter = {
        key: AverageMeter() for key in ["instance", "variance", "seed", "track"]
    }

    # put model into training mode
    model.train()
    
    # 添加调试信息
    print(f"\n{'='*60}")
    print(f"开始训练 - 虚拟批次倍数: {virtual_batch_multiplier}")
    print(f"设备: {device}")
    print(f"总批次数: {len(train_dataset_it)}")
    print(f"{'='*60}\n")

    for param_group in optimizer.param_groups:
        print("learning rate: {}".format(param_group["lr"]))
    
    start_time = time.time()
    optimizer.zero_grad()  # Reset gradients tensors
    
    for i, sample in enumerate(tqdm(train_dataset_it, desc="Training")):
        batch_start_time = time.time()
        
        # 获取内存使用情况
        memory_info = get_memory_usage()
        
        curr_frames = sample["image_curr"]  # curr frames
        prev_frames = sample["image_prev"]  # prev frames
        offset = sample["flow"].squeeze(1).to(device)  # 1YX
        
        # 添加数据形状调试信息
        if i == 0:
            print(f"\n数据形状调试:")
            print(f"curr_frames: {curr_frames.shape}")
            print(f"prev_frames: {prev_frames.shape}")
            print(f"offset: {offset.shape}")
            print(f"instance_curr: {sample['instance_curr'].shape}")
            print(f"label_curr: {sample['label_curr'].shape}")
            print(f"center_image_curr: {sample['center_image_curr'].shape}")
        
        seg_curr, seg_prev, tracking = model(
            curr_frames, prev_frames
        )  # B 5 Y X, B 5 Y X, B 2 Y X
        
        # 添加模型输出形状调试信息
        if i == 0:
            print(f"模型输出形状:")
            print(f"seg_curr: {seg_curr.shape}")
            print(f"seg_prev: {seg_prev.shape}")
            print(f"tracking: {tracking.shape}")
        
        output = (torch.cat([seg_curr, seg_prev], dim=0), tracking)
        instances = torch.cat(
            [sample["instance_curr"], sample["instance_prev"]], dim=0
        ).squeeze(1)
        class_labels = torch.cat(
            [sample["label_curr"], sample["label_prev"]], dim=0
        ).squeeze(1)
        center_images = torch.cat(
            [sample["center_image_curr"], sample["center_image_prev"]], dim=0
        ).squeeze(1)
        
        loss, loss_parts = loss_fcn(
            output, instances, class_labels, center_images, offset, **args
        )
        loss = loss.mean()
        loss = loss / virtual_batch_multiplier  # Normalize our loss (if averaged)
        
        # 梯度检查
        if torch.isnan(loss) or torch.isinf(loss):
            print(f"警告: 第{i}批次损失为NaN或Inf: {loss.item()}")
            continue
            
        loss.backward()  # Backward pass

        if (i + 1) % virtual_batch_multiplier == 0:  # Wait for several backward steps
            # 梯度裁剪
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
            
            optimizer.step()  # Now we can do an optimizer step
            scheduler.step()
            optimizer.zero_grad()  # Reset gradients tensors

        loss_meter.update(loss.item() * virtual_batch_multiplier)
        for key in loss_parts_meter.keys():
            loss_parts_meter[key].update(loss_parts[key].mean().item() * virtual_batch_multiplier)
        
        # 每50个批次打印详细统计信息
        if i % 50 == 0:
            elapsed_time = time.time() - batch_start_time
            current_lr = optimizer.param_groups[0]['lr']
            print_training_stats(
                epoch=None,  # 这里暂时设为None，在外部调用时传入
                batch_idx=i,
                total_batches=len(train_dataset_it),
                loss=loss.item() * virtual_batch_multiplier,
                loss_parts={k: v.mean().item() * virtual_batch_multiplier for k, v in loss_parts.items()},
                lr=current_lr,
                memory_info=memory_info,
                elapsed_time=elapsed_time
            )

    total_time = time.time() - start_time
    print(f"\n训练完成 - 总时间: {total_time:.2f}s")
    print(f"平均每批次时间: {total_time/len(train_dataset_it):.3f}s")
    
    loss_part_avg = {key: meter.avg for key, meter in loss_parts_meter.items()}
    return loss_meter.avg, loss_part_avg


def train_vanilla(
    display,
    display_it,
    grid_x,
    grid_y,
    pixel_x,
    pixel_y,
    n_sigma,
    args,
):  # this is without virtual batches!

    # define meters
    loss_meter = AverageMeter()
    loss_parts_meter = {
        key: AverageMeter() for key in ["instance", "variance", "seed", "track"]
    }

    # put model into training mode
    model.train()
    
    # 添加调试信息
    print(f"\n{'='*60}")
    print(f"开始训练 (vanilla模式)")
    print(f"设备: {device}")
    print(f"总批次数: {len(train_dataset_it)}")
    print(f"{'='*60}\n")

    for param_group in optimizer.param_groups:
        print("learning rate: {}".format(param_group["lr"]))
    
    start_time = time.time()
    for i, sample in enumerate(tqdm(train_dataset_it, desc="Training")):
        batch_start_time = time.time()
        
        # 获取内存使用情况
        memory_info = get_memory_usage()
        
        # 将所有张量移到GPU
        for k in sample:
            if isinstance(sample[k], torch.Tensor):
                sample[k] = sample[k].to(device)
        
        curr_frames = sample["image_curr"]  # curr frames
        prev_frames = sample["image_prev"]  # prev frames
        offset = sample["flow"].squeeze(1)  # 1YX
        
        # 添加数据形状调试信息
        if i == 0:
            print(f"\n数据形状调试:")
            print(f"curr_frames: {curr_frames.shape}")
            print(f"prev_frames: {prev_frames.shape}")
            print(f"offset: {offset.shape}")
            print(f"instance_curr: {sample['instance_curr'].shape}")
            print(f"label_curr: {sample['label_curr'].shape}")
            print(f"center_image_curr: {sample['center_image_curr'].shape}")
        
        seg_curr, seg_prev, tracking = model(
            curr_frames, prev_frames
        )  # B 5 Y X, B 5 Y X, B 2 Y X
        
        # 添加模型输出形状调试信息
        if i == 0:
            print(f"模型输出形状:")
            print(f"seg_curr: {seg_curr.shape}")
            print(f"seg_prev: {seg_prev.shape}")
            print(f"tracking: {tracking.shape}")
        
        output = (torch.cat([seg_curr, seg_prev], dim=0), tracking)
        instances = torch.cat(
            [sample["instance_curr"], sample["instance_prev"]], dim=0
        ).squeeze(1)
        class_labels = torch.cat(
            [sample["label_curr"], sample["label_prev"]], dim=0
        ).squeeze(1)
        center_images = torch.cat(
            [sample["center_image_curr"], sample["center_image_prev"]], dim=0
        ).squeeze(1)
        
        loss, loss_parts = loss_fcn(
            output, instances, class_labels, center_images, offset, **args
        )
        loss = loss.mean()
        
        # 梯度检查
        if torch.isnan(loss) or torch.isinf(loss):
            print(f"警告: 第{i}批次损失为NaN或Inf: {loss.item()}")
            continue
            
        optimizer.zero_grad()
        loss.backward()
        
        # 梯度裁剪
        torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
        
        optimizer.step()
        # 移除这里的scheduler.step()，在训练循环结束时统一调用

        loss_meter.update(loss.item())
        for key in loss_parts_meter.keys():
            loss_parts_meter[key].update(loss_parts[key].mean().item())
        
        # 每50个批次打印详细统计信息
        if i % 50 == 0:
            elapsed_time = time.time() - batch_start_time
            current_lr = optimizer.param_groups[0]['lr']
            print_training_stats(
                epoch=None,  # 这里暂时设为None，在外部调用时传入
                batch_idx=i,
                total_batches=len(train_dataset_it),
                loss=loss.item(),
                loss_parts={k: v.mean().item() for k, v in loss_parts.items()},
                lr=current_lr,
                memory_info=memory_info,
                elapsed_time=elapsed_time
            )
        
        # visualize data
        if display_it is not None:
            if i % display_it == 0 and i != 0:
                prediction = (output[0][0], output[1][0])
                ground_truth = (
                    instances[0].to(device),
                    center_images[0].to(device),
                    offset[0].to(device),
                )
                prev_instance = instances[len(instances) // 2].to(device)
                image_pair = (sample["image_curr"][0], sample["image_prev"][0])
                visualize_training(prediction, ground_truth, prev_instance, image_pair)

    total_time = time.time() - start_time
    print(f"\n训练完成 - 总时间: {total_time:.2f}s")
    print(f"平均每批次时间: {total_time/len(train_dataset_it):.3f}s")
    
    loss_part_avg = {key: meter.avg for key, meter in loss_parts_meter.items()}
    return loss_meter.avg, loss_part_avg


def val(virtual_batch_multiplier, n_sigma, calc_iou, args):
    # define meters
    loss_meter, iou_meter = AverageMeter(), AverageMeter()
    loss_parts_meter = {
        key: AverageMeter() for key in ["instance", "variance", "seed", "track"]
    }
    # put model into eval mode
    model.eval()
    
    # 添加调试信息
    print(f"\n{'='*60}")
    print(f"开始验证 - 虚拟批次倍数: {virtual_batch_multiplier}")
    print(f"计算IoU: {calc_iou}")
    print(f"设备: {device}")
    print(f"总批次数: {len(val_dataset_it)}")
    print(f"{'='*60}\n")

    start_time = time.time()
    with torch.no_grad():
        for i, sample in enumerate(tqdm(val_dataset_it, desc="Validation")):
            batch_start_time = time.time()
            
            # 获取内存使用情况
            memory_info = get_memory_usage()
            
            # 将所有张量移到GPU
            for k in sample:
                if isinstance(sample[k], torch.Tensor):
                    sample[k] = sample[k].to(device)
            
            curr_frames = sample["image_curr"]  # curr frames
            prev_frames = sample["image_prev"]  # prev frames
            offset = sample["flow"].squeeze(1)  # 1YX
            seg_curr, seg_prev, tracking = model(
                curr_frames, prev_frames
            )  # B 5 Y X, B 5 Y X, B 2 Y X
            output = (torch.cat([seg_curr, seg_prev], dim=0), tracking)
            instances = torch.cat(
                [sample["instance_curr"], sample["instance_prev"]], dim=0
            ).squeeze(1)
            class_labels = torch.cat(
                [sample["label_curr"], sample["label_prev"]], dim=0
            ).squeeze(1)
            center_images = torch.cat(
                [sample["center_image_curr"], sample["center_image_prev"]], dim=0
            ).squeeze(1)
            loss, loss_parts = loss_fcn(
                output,
                instances,
                class_labels,
                center_images,
                offset,
                **args,
                iou=calc_iou,
                iou_meter=iou_meter,
            )
            loss = loss.mean()
            loss = loss / virtual_batch_multiplier
            loss_meter.update(loss.item())
            for key in loss_parts_meter.keys():
                loss_parts_meter[key].update(loss_parts[key].mean().item())
                
            # 每50个批次打印详细统计信息
            if i % 50 == 0:
                elapsed_time = time.time() - batch_start_time
                print(f"\n验证批次 {i}/{len(val_dataset_it)} | 时间: {elapsed_time:.2f}s")
                print(f"当前损失: {loss.item():.6f} | 内存使用: {memory_info['rss']:.1f}MB")
    
    total_time = time.time() - start_time
    print(f"\n验证完成 - 总时间: {total_time:.2f}s")
    print(f"平均每批次时间: {total_time/len(val_dataset_it):.3f}s")
    
    loss_part_avg = {key: meter.avg for key, meter in loss_parts_meter.items()}
    return loss_meter.avg * virtual_batch_multiplier, iou_meter.avg, loss_part_avg


def val_vanilla(
    display,
    display_it,
    grid_x,
    grid_y,
    pixel_x,
    pixel_y,
    n_sigma,
    calc_iou,
    args,
):
    # define meters
    loss_meter, iou_meter = AverageMeter(), AverageMeter()
    loss_parts_meter = {
        key: AverageMeter() for key in ["instance", "variance", "seed", "track"]
    }

    # put model into eval mode
    model.eval()
    
    # 添加调试信息
    print(f"\n{'='*60}")
    print(f"开始验证 (vanilla模式)")
    print(f"计算IoU: {calc_iou}")
    print(f"设备: {device}")
    print(f"总批次数: {len(val_dataset_it)}")
    print(f"{'='*60}\n")
    
    start_time = time.time()
    with torch.no_grad():
        for i, sample in enumerate(tqdm(val_dataset_it, desc="Validation")):
            batch_start_time = time.time()
            
            # 获取内存使用情况
            memory_info = get_memory_usage()
            
            # 将所有张量移到GPU
            for k in sample:
                if isinstance(sample[k], torch.Tensor):
                    sample[k] = sample[k].to(device)
            
            curr_frames = sample["image_curr"]  # curr frames
            prev_frames = sample["image_prev"]  # prev frames
            offset = sample["flow"].squeeze(1)  # 1YX
            seg_curr, seg_prev, tracking = model(
                curr_frames, prev_frames
            )  # B 5 Y X, B 5 Y X, B 2 Y X
            output = (torch.cat([seg_curr, seg_prev], dim=0), tracking)
            instances = torch.cat(
                [sample["instance_curr"], sample["instance_prev"]], dim=0
            ).squeeze(1)
            class_labels = torch.cat(
                [sample["label_curr"], sample["label_prev"]], dim=0
            ).squeeze(1)
            center_images = torch.cat(
                [sample["center_image_curr"], sample["center_image_prev"]], dim=0
            ).squeeze(1)
            loss, loss_parts = loss_fcn(
                output,
                instances,
                class_labels,
                center_images,
                offset,
                **args,
                iou=calc_iou,
                iou_meter=iou_meter,
            )

            loss = loss.mean()

            loss_meter.update(loss.item())
            for key in loss_parts_meter.keys():
                loss_parts_meter[key].update(loss_parts[key].mean().item())
                
            # 每50个批次打印详细统计信息
            if i % 50 == 0:
                elapsed_time = time.time() - batch_start_time
                print(f"\n验证批次 {i}/{len(val_dataset_it)} | 时间: {elapsed_time:.2f}s")
                print(f"当前损失: {loss.item():.6f} | 内存使用: {memory_info['rss']:.1f}MB")
    
    total_time = time.time() - start_time
    print(f"\n验证完成 - 总时间: {total_time:.2f}s")
    print(f"平均每批次时间: {total_time/len(val_dataset_it):.3f}s")
    
    loss_part_avg = {key: meter.avg for key, meter in loss_parts_meter.items()}
    return loss_meter.avg, iou_meter.avg, loss_part_avg


def save_checkpoint(state, is_best, epoch, save_dir, name="checkpoint.pth"):
    print("=> saving checkpoint")
    file_name = os.path.join(save_dir, name)
    torch.save(state, file_name)
    if epoch % 10 == 0:
        file_name2 = os.path.join(save_dir, str(epoch) + "_" + name)
        torch.save(state, file_name2)
    if is_best:
        shutil.copyfile(file_name, os.path.join(save_dir, "best_iou_model.pth"))


def begin_training(
    train_dataset_dict,
    val_dataset_dict,
    model_dict,
    loss_dict,
    configs,
):

    if configs["save"]:
        if not os.path.exists(configs["save_dir"]):
            os.makedirs(configs["save_dir"])

    if configs["display"]:
        plt.ion()
    else:
        plt.ioff()
        plt.switch_backend("agg")

    # define global variables
    global train_dataset_it, val_dataset_it, model, loss_fcn, optimizer, cluster, visualize_training, scheduler

    # train dataloader

    train_dataset = get_dataset(
        train_dataset_dict["name"], train_dataset_dict["kwargs"]
    )
    train_dataset_it = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=train_dataset_dict["batch_size"],
        shuffle=True,
        drop_last=True,
        num_workers=train_dataset_dict["workers"],
        pin_memory=True if configs["cuda"] else False,
        collate_fn=safe_collate_fn,  # 使用自定义collate函数
    )

    # val dataloader
    val_dataset = get_dataset(val_dataset_dict["name"], val_dataset_dict["kwargs"])
    val_dataset_it = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=val_dataset_dict["batch_size"],
        shuffle=True,
        drop_last=True,
        num_workers=val_dataset_dict["workers"],
        pin_memory=True if device == "cuda" else False,
        collate_fn=safe_collate_fn,  # 使用自定义collate函数
    )

    # set model
    model = TrackERFNet(**model_dict["kwargs"])
    model.init_output(loss_dict["lossOpts"]["n_sigma"])
    
    # 使用GPU配置设置多GPU模型
    model = setup_multi_gpu_model(model)
    model = model.to(device)

    cluster = Cluster(
        configs["grid_y"],
        configs["grid_x"],
        configs["pixel_y"],
        configs["pixel_x"],
    )

    cluster = cluster.to(device)
    loss = EmbedTrackLoss(
        grid_y=configs["grid_y"],
        grid_x=configs["grid_x"],
        pixel_y=configs["pixel_y"],
        pixel_x=configs["pixel_x"],
        cluster=cluster,
        **loss_dict["lossOpts"],
    )

    loss_fcn = torch.nn.DataParallel(loss).to(device)

    # set optimizer
    optimizer = torch.optim.Adam(
        model.parameters(), lr=configs["train_lr"], weight_decay=1e-4
    )

    scheduler = torch.optim.lr_scheduler.OneCycleLR(
        optimizer,
        max_lr=configs["train_lr"],
        total_steps=configs["n_epochs"]
        * len(train_dataset_it)
        // train_dataset_dict["virtual_batch_multiplier"],
    )

    visualize_training = VisualizeTraining(
        cluster,
        configs["save_dir"],
        grid_x=configs["grid_x"],
        grid_y=configs["grid_y"],
        pixel_x=configs["pixel_x"],
        pixel_y=configs["pixel_y"],
        n_sigma=loss_dict["lossOpts"]["n_sigma"],
    )

    # Logger
    logger = Logger(("train", "val", "iou"), "loss")

    # resume
    start_epoch = 0
    best_iou = 0
    if configs["resume_path"] is not None and os.path.exists(configs["resume_path"]):
        print("Resuming model from {}".format(configs["resume_path"]))
        state = torch.load(configs["resume_path"])
        start_epoch = state["epoch"] + 1
        best_iou = state["best_iou"]
        model.load_state_dict(state["model_state_dict"], strict=True)
        optimizer.load_state_dict(state["optim_state_dict"])
        logger.data = state["logger_data"]

    for epoch in range(start_epoch, configs["n_epochs"]):
        epoch_start_time = time.time()
        print(f"\n{'='*80}")
        print(f"开始第 {epoch} 轮训练")
        print(f"当前时间: {time.strftime('%Y-%m-%d %H:%M:%S')}")
        print(f"设备: {device}")
        print(f"学习率: {optimizer.param_groups[0]['lr']:.6f}")
        print(f"{'='*80}\n")

        if train_dataset_dict["virtual_batch_multiplier"] > 1:
            train_loss, train_loss_parts = train(
                virtual_batch_multiplier=train_dataset_dict["virtual_batch_multiplier"],
                n_sigma=loss_dict["lossOpts"]["n_sigma"],
                args=loss_dict["lossW"],
            )
        else:
            train_loss, train_loss_parts = train_vanilla(
                display=configs["display"],
                display_it=configs["display_it"],
                n_sigma=loss_dict["lossOpts"]["n_sigma"],
                grid_x=configs["grid_x"],
                grid_y=configs["grid_y"],
                pixel_x=configs["pixel_x"],
                pixel_y=configs["pixel_y"],
                args=loss_dict["lossW"],
            )

        train_time = time.time() - epoch_start_time
        print(f"\n训练阶段完成 - 耗时: {train_time:.2f}s")

        if val_dataset_dict["virtual_batch_multiplier"] > 1:
            # 从第1轮开始就计算IoU，以便更好地监控训练进度
            calc_iou = True
            val_loss, val_iou, val_loss_parts = val(
                virtual_batch_multiplier=val_dataset_dict["virtual_batch_multiplier"],
                calc_iou=calc_iou,
                n_sigma=loss_dict["lossOpts"]["n_sigma"],
                args=loss_dict["lossW"],
            )
        else:
            # 从第1轮开始就计算IoU，以便更好地监控训练进度
            calc_iou = True
            val_loss, val_iou, val_loss_parts = val_vanilla(
                display=configs["display"],
                display_it=configs["display_it"],
                n_sigma=loss_dict["lossOpts"]["n_sigma"],
                grid_x=configs["grid_x"],
                grid_y=configs["grid_y"],
                pixel_x=configs["pixel_x"],
                pixel_y=configs["pixel_y"],
                calc_iou=calc_iou,
                args=loss_dict["lossW"],
            )

        val_time = time.time() - epoch_start_time - train_time
        epoch_total_time = time.time() - epoch_start_time
        
        # 获取内存使用情况
        memory_info = get_memory_usage()
        
        print(f"\n{'='*80}")
        print(f"第 {epoch} 轮训练完成")
        print(f"训练损失: {train_loss:.6f}")
        print(f"验证损失: {val_loss:.6f}")
        print(f"验证IoU: {val_iou:.6f}")
        print(f"训练时间: {train_time:.2f}s")
        print(f"验证时间: {val_time:.2f}s")
        print(f"总时间: {epoch_total_time:.2f}s")
        print(f"内存使用: {memory_info['rss']:.1f}MB (RSS) | {memory_info['percent']:.1f}%")
        print(f"损失分解:")
        print(f"  训练 - Instance: {train_loss_parts['instance']:.6f}, Variance: {train_loss_parts['variance']:.6f}, Seed: {train_loss_parts['seed']:.6f}, Track: {train_loss_parts['track']:.6f}")
        print(f"  验证 - Instance: {val_loss_parts['instance']:.6f}, Variance: {val_loss_parts['variance']:.6f}, Seed: {val_loss_parts['seed']:.6f}, Track: {val_loss_parts['track']:.6f}")
        print(f"{'='*80}\n")

        logger.add("train", train_loss)
        logger.add("val", val_loss)
        logger.add("iou", val_iou)
        for key, value in train_loss_parts.items():
            name = "train_" + key
            if name not in logger.data:
                logger.data[name] = []
            logger.add(name, train_loss_parts[key])

        for key, value in val_loss_parts.items():
            name = "val_" + key
            if name not in logger.data:
                logger.data[name] = []
            logger.add(name, val_loss_parts[key])

        logger.plot(save=configs["save"], save_dir=configs["save_dir"])

        is_best = val_iou > best_iou
        best_iou = max(val_iou, best_iou)
        
        if is_best:
            print(f"🎉 新的最佳IoU: {best_iou:.6f} (之前: {best_iou:.6f})")

        if configs["save"]:
            state = {
                "epoch": epoch,
                "best_iou": best_iou,
                "model_state_dict": model.state_dict(),
                "optim_state_dict": optimizer.state_dict(),
                "logger_data": logger.data,
            }
            save_checkpoint(state, is_best, epoch, save_dir=configs["save_dir"])
        else:
            # 即使不保存，也要创建state用于其他用途
            state = {
                "epoch": epoch,
                "best_iou": best_iou,
                "model_state_dict": model.state_dict(),
                "optim_state_dict": optimizer.state_dict(),
                "logger_data": logger.data,
            }
        
        # 每5个epoch打印一次详细统计
        if epoch % 5 == 0:
            print(f"\n📊 第 {epoch} 轮详细统计:")
            print(f"  学习率变化: {optimizer.param_groups[0]['lr']:.8f}")
            print(f"  最佳IoU: {best_iou:.6f}")
            print(f"  当前轮次IoU: {val_iou:.6f}")
            print(f"  训练损失趋势: {train_loss:.6f}")
            print(f"  验证损失趋势: {val_loss:.6f}")
            print(f"  内存使用趋势: {memory_info['rss']:.1f}MB")
            print(f"  预计剩余时间: {(configs['n_epochs'] - epoch - 1) * epoch_total_time / 3600:.1f}小时")
        
        # 更新学习率调度器
        scheduler.step()
        
        print(f"🔄 准备进入第 {epoch + 1} 轮训练...")
        print(f"   当前epoch: {epoch}, 总epochs: {configs['n_epochs']}")
        print(f"   循环条件: {epoch + 1} < {configs['n_epochs']} = {epoch + 1 < configs['n_epochs']}")
    
    print(f"\n🎉 训练完成！总共完成了 {configs['n_epochs']} 轮训练")
    print(f"   最终最佳IoU: {best_iou:.6f}")
    print(f"   模型已保存到: {configs['save_dir']}")
