# Ultralytics YOLO 🚀, AGPL-3.0 license
"""
Train a model on a dataset.

Usage:
    $ yolo mode=train model=yolov8n.pt data=coco128.yaml imgsz=640 epochs=100 batch=16
"""

import math
import os
import subprocess
import time
import warnings
from copy import deepcopy
from datetime import datetime, timedelta
from pathlib import Path

import numpy as np
import torch
from torch import distributed as dist
from torch import nn, optim
import torch.nn.functional as F

from ultralytics.cfg import get_cfg, get_save_dir
from ultralytics.data.utils import check_cls_dataset, check_det_dataset
from ultralytics.nn.tasks import attempt_load_one_weight, attempt_load_weights
from ultralytics.utils import (DEFAULT_CFG, LOGGER, RANK, TQDM, __version__, callbacks, clean_url, colorstr, emojis,
                               yaml_save)
from ultralytics.utils.autobatch import check_train_batch_size
from ultralytics.utils.checks import check_amp, check_file, check_imgsz, check_model_file_from_stem, print_args
from ultralytics.utils.dist import ddp_cleanup, generate_ddp_command
from ultralytics.utils.files import get_latest_run
from ultralytics.utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, init_seeds, one_cycle, select_device,
                                           strip_optimizer)

from ultralytics.utils.tal import TaskAlignedAssigner, make_anchors, dist2bbox, RotatedTaskAlignedAssigner
from ultralytics.utils.loss import RotatedBboxLoss
from ultralytics.utils.ops import xywh2xyxy, xyxy2xywh


class CWDLoss(nn.Module):

    def __init__(self, channels_s, channels_t, tau=1.0):
        super(CWDLoss, self).__init__()
        self.tau = tau

    def forward(self, y_s, y_t):
        """Forward computation.
        Args:
            y_s (list): The student model prediction with
                shape (N, C, H, W) in list.
            y_t (list): The teacher model prediction with
                shape (N, C, H, W) in list.
        Return:
            torch.Tensor: The calculated loss value of all stages.
        """

        assert len(y_s) == len(y_t)
        losses = []

        for idx, (s, t) in enumerate(zip(y_s, y_t)):
            assert s.shape == t.shape

            N, C, H, W = s.shape

            # normalize in channel diemension
            import torch.nn.functional as F
            softmax_pred_T = F.softmax(t.view(-1, W * H) / self.tau, dim=1)  # [N*C, H*W]

            logsoftmax = torch.nn.LogSoftmax(dim=1)
            cost = torch.sum(
                softmax_pred_T * logsoftmax(t.view(-1, W * H) / self.tau) -
                softmax_pred_T * logsoftmax(s.view(-1, W * H) / self.tau)) * (self.tau ** 2)

            losses.append(cost / (C * N))
        loss = sum(losses)

        return loss


class PKDLoss(nn.Module):
    def __init__(self, channels_s, channels_t):
        super(PKDLoss, self).__init__()

    def forward(self, y_s, y_t):
        """Forward computation.
        Args:
            y_s (list): The student model prediction with
                shape (N, C, H, W) in list.
            y_t (list): The teacher model prediction with
                shape (N, C, H, W) in list.
        Return:
            torch.Tensor: The calculated loss value of all stages.
        """

        assert len(y_s) == len(y_t)
        losses = []

        for idx, (s, t) in enumerate(zip(y_s, y_t)):
            assert s.shape == t.shape

            N, C, H, W = s.shape

            ################ 计算皮尔逊相关系数PCC ################
            s_flat = s.view(N, C, -1)
            t_flat = t.view(N, C, -1)

            mean_s = torch.mean(s_flat, dim=-1, keepdim=True)
            mean_t = torch.mean(t_flat, dim=-1, keepdim=True)

            sm = s_flat - mean_s
            tm = t_flat - mean_t

            pcc_num = torch.sum(sm * tm, dim=-1)
            pcc_den = torch.sqrt(torch.sum(sm ** 2, dim=-1) * torch.sum(tm ** 2, dim=-1))
            pcc = pcc_num / pcc_den

            ################ 计算PKD损失 ################
            pkd_loss = 1 - torch.mean(pcc)
            losses.append(pkd_loss)

        loss = sum(losses)
        return loss


class CrossAttentionLoss(nn.Module):
    def __init__(self, channels_s, channels_t, loss_weight=1.0):
        super(CrossAttentionLoss, self).__init__()
        self.loss_weight = loss_weight

        device = 'cuda' if torch.cuda.is_available() else 'cpu'
        # 将学生特征与老师特征对齐
        self.align_module = nn.ModuleList([
            nn.Conv2d(student_channel, teacher_channel, kernel_size=1, stride=1, padding=0).to(device)
            for student_channel, teacher_channel in zip(channels_s, channels_t)
        ])
        # 教师特征的BN层
        self.norm_t = [
            nn.BatchNorm2d(teacher_channel, affine=False).to(device)
            for teacher_channel in channels_t
        ]
        # 学生特征的BN层
        self.norm_s = [
            nn.BatchNorm2d(student_channel, affine=False).to(device)
            for student_channel in channels_s
        ]

        self.cross_attention_loss = CADLoss(channels_s, channels_t)

    def forward(self, y_s, y_t):
        assert len(y_s) == len(y_t)
        teacher_features = []
        student_features = []

        for i, (s, t) in enumerate(zip(y_s, y_t)):
            s = self.align_module[i](s)
            s = self.norm_t[i](s)
            t = self.norm_t[i](t)

            teacher_features.append(t)
            student_features.append(s)

        loss = self.cross_attention_loss(student_features, teacher_features)
        return self.loss_weight * loss


class FeatureLoss(nn.Module):
    def __init__(self, channels_s, channels_t, distiller='cwd', loss_weight=1.0):
        super(FeatureLoss, self).__init__()
        self.loss_weight = loss_weight
        self.distiller = distiller

        device = 'cuda' if torch.cuda.is_available() else 'cpu'
        # 将学生特征与老师特征对齐
        self.align_module = nn.ModuleList([
            nn.Conv2d(student_channel, teacher_channel, kernel_size=1, stride=1, padding=0).to(device)
            for student_channel, teacher_channel in zip(channels_s, channels_t)
        ])
        # 教师特征的BN层
        self.norm_t = [
            nn.BatchNorm2d(teacher_channel, affine=False).to(device)
            for teacher_channel in channels_t
        ]
        # 学生特征的BN层
        self.norm_s = [
            nn.BatchNorm2d(student_channel, affine=False).to(device)
            for student_channel in channels_s
        ]

        # 选择蒸馏方法
        if distiller == 'CWD':
            self.feature_loss = CWDLoss(channels_s, channels_t)
        elif distiller == 'PKD':
            self.feature_loss = PKDLoss(channels_s, channels_t)
        else:
            raise NotImplementedError

    def forward(self, y_s, y_t):

        assert len(y_s) == len(y_t)
        teacher_features = []
        student_features = []

        for i, (s, t) in enumerate(zip(y_s, y_t)):

            if self.distiller == 'CWD' or self.distiller == 'PKD':
                s = self.align_module[i](s)
                s = self.norm_t[i](s)

            t = self.norm_t[i](t)
            teacher_features.append(t)
            student_features.append(s)

        loss = self.feature_loss(student_features, teacher_features)
        return self.loss_weight * loss


class CADLoss(nn.Module):
    def __init__(self, channels_s, channels_t, e_lambda=1e-4):
        super(CADLoss, self).__init__()
        self.activation = nn.Sigmoid()
        self.e_lambda = e_lambda

    def SimAM(self, x):
        b, c, h, w = x.size()

        n = w * h - 1

        x_minus_mu_square = (x - x.mean(dim=[2, 3], keepdim=True)).pow(2)
        y = x_minus_mu_square / (4 * (x_minus_mu_square.sum(dim=[2, 3], keepdim=True) / n + self.e_lambda)) + 0.5

        return x * self.activation(y)

    def attentionMAP(self, x):
        b, c, h, w = x.size()

        n = w * h - 1

        x_minus_mu_square = (x - x.mean(dim=[2, 3], keepdim=True)).pow(2)
        y = x_minus_mu_square / (4 * (x_minus_mu_square.sum(dim=[2, 3], keepdim=True) / n + self.e_lambda)) + 0.5

        return self.activation(y)

    def forward(self, y_s, y_t):
        if len(y_t) == 6:  # 加入mrl后就会有奇怪的bug，还是得多学一下钩子
            y_t = y_t[:3]
        assert len(y_s) == len(y_t)
        losses = []

        for idx, (s, t) in enumerate(zip(y_s, y_t)):
            assert s.shape == t.shape
            attention_map = self.attentionMAP(t)
            s = s * attention_map
            t = t * attention_map
            N, C, H, W = s.shape

            ################ 计算皮尔逊相关系数PCC ################
            s_flat = s.view(N, C, -1)
            t_flat = t.view(N, C, -1)

            mean_s = torch.mean(s_flat, dim=-1, keepdim=True)
            mean_t = torch.mean(t_flat, dim=-1, keepdim=True)

            sm = s_flat - mean_s
            tm = t_flat - mean_t

            pcc_num = torch.sum(sm * tm, dim=-1)
            pcc_den = torch.sqrt(torch.sum(sm ** 2, dim=-1) * torch.sum(tm ** 2, dim=-1))
            pcc = pcc_num / pcc_den

            ################ 计算PKD损失 ################
            pkd_loss = 1 - torch.mean(pcc)
            losses.append(pkd_loss)

        loss = sum(losses)

        return loss


class Distillation_loss(nn.Module):
    def __init__(self, student_model, teacher_model, distiller="CWD"):
        super(Distillation_loss, self).__init__()
        self.distiller = distiller

        layers_t = ["15", "18", "21"]  # 教师的用于计算蒸馏损失的层数
        layers_s = ["15", "18", "21"]  # 学生的用于计算蒸馏损失的层数
        length_t = len(layers_t)
        length_s = len(layers_s)
        assert length_t == length_s
        channels_s = [64, 128, 256]
        channels_t = [128, 256, 512]

        self.D_loss_fn = FeatureLoss(channels_s=channels_s, channels_t=channels_t, distiller=distiller)

        self.teacher_module_pairs = []
        self.student_module_pairs = []
        self.remove_handle = []

        # 教师模型的特征提取层
        for mname, ml in teacher_model.named_modules():
            if mname is not None:
                name = mname.split(".")
                if name[0] == "module":
                    name.pop(0)
                if len(name) == 3:
                    if name[1] in layers_t:
                        if "cv2" in mname:
                            self.teacher_module_pairs.append(ml)

        # 学生模型的特征提取层
        for mname, ml in student_model.named_modules():
            if mname is not None:
                name = mname.split(".")
                if name[0] == "module":
                    name.pop(0)
                if len(name) == 3:
                    if name[1] in layers_s:
                        if "cv2" in mname:
                            self.student_module_pairs.append(ml)

    def register_hook(self):
        self.teacher_outputs = []
        self.student_outputs = []

        def make_layer_forward_hook(layer):
            def forward_hook(m, input, output):
                layer.append(output)

            return forward_hook

        for ml, ori in zip(self.teacher_module_pairs, self.student_module_pairs):
            # 为每层加入钩子，在进行Forward的时候会自动将每层的特征传送给model_outputs和student_outputs
            self.remove_handle.append(ml.register_forward_hook(make_layer_forward_hook(self.teacher_outputs)))
            self.remove_handle.append(ori.register_forward_hook(make_layer_forward_hook(self.student_outputs)))

    def get_loss(self):
        quant_loss = 0
        quant_loss += self.D_loss_fn(y_t=self.teacher_outputs, y_s=self.student_outputs)
        if self.distiller == 'MGD':
            quant_loss *= 0.3
        self.teacher_outputs.clear()
        self.student_outputs.clear()
        return quant_loss

    def remove_handle_(self):
        for rm in self.remove_handle:
            rm.remove()


class Multimodal_Distillation_loss(nn.Module):
    """
    用于计算多模态模型的蒸馏损失
    其中有一个多模态的学生模型
    另外还分别有一个经过红外数据集训练的单模态教师模型
    以及一个经过可见光数据集训练的单模态教师模型
    两个教师同时分别对学生模型的两路backbone进行蒸馏
    """

    def __init__(self, student_model, teacher_model_rgb, teacher_model_ir, distiller="PKD",
                 cross_attention=True,
                 normal_distillation=True):
        super(Multimodal_Distillation_loss, self).__init__()
        self.distiller = distiller
        self.cross_attention = cross_attention
        self.normal_distillation = normal_distillation
        self.device = 'cuda'

        if len(student_model.model) == 39:  # 如果添加LIF模块
            layers_s_rgb = ["12", "17", "22"]
            layers_s_ir = ["13", "18", "23"]
            layers_t_rgb = ["4", "6", "8"]
            layers_t_ir = ["4", "6", "8"]
        else:
            layers_s_rgb = ["11", "16", "21"]
            layers_s_ir = ["12", "17", "22"]
            layers_t_rgb = ["4", "6", "8"]
            layers_t_ir = ["4", "6", "8"]
        length_t_rgb = len(layers_t_rgb)
        length_t_ir = len(layers_t_ir)
        length_s_rgb = len(layers_s_rgb)
        length_s_ir = len(layers_s_ir)

        assert length_t_rgb == length_s_rgb and length_t_ir == length_s_ir

        # yolov8m
        channels_s_rgb = [192, 384, 576]
        channels_s_ir = [192, 384, 576]
        channels_t_rgb = [192, 384, 576]
        channels_t_ir = [192, 384, 576]

        self.D_loss_fn_rgb = FeatureLoss(channels_s=channels_s_rgb, channels_t=channels_t_rgb, distiller=distiller)
        self.D_loss_fn_ir = FeatureLoss(channels_s=channels_s_ir, channels_t=channels_t_ir, distiller=distiller)

        self.Cross_loss_rgb_to_ir = CrossAttentionLoss(channels_s=channels_s_ir, channels_t=channels_t_rgb)
        self.Cross_loss_ir_to_rgb = CrossAttentionLoss(channels_s=channels_s_rgb, channels_t=channels_t_ir)

        self.teacher_module_pairs_rgb = []
        self.teacher_module_pairs_ir = []
        self.student_module_pairs_rgb = []
        self.student_module_pairs_ir = []

        self.remove_handle = []

        # 可见光教师模型的特征提取层
        for mname, ml in teacher_model_rgb.named_modules():
            if mname is not None:
                name = mname.split(".")
                if name[0] == "module":
                    name.pop(0)
                if len(name) == 3:
                    if name[1] in layers_t_rgb:
                        if "cv2" in mname:
                            self.teacher_module_pairs_rgb.append(ml)

        # 红外教师模型的特征提取层
        for mname, ml in teacher_model_ir.named_modules():
            if mname is not None:
                name = mname.split(".")
                if name[0] == "module":
                    name.pop(0)
                if len(name) == 3:
                    if name[1] in layers_t_ir:
                        if "cv2" in mname:
                            self.teacher_module_pairs_ir.append(ml)

        # 学生模型的特征提取层
        for mname, ml in student_model.named_modules():
            if mname is not None:
                name = mname.split(".")
                if name[0] == "module":
                    name.pop(0)
                if len(name) == 3:
                    if name[1] in layers_s_rgb:
                        if "cv2" in mname:
                            self.student_module_pairs_rgb.append(ml)
                    if name[1] in layers_s_ir:
                        if "cv2" in mname:
                            self.student_module_pairs_ir.append(ml)

    def register_hook(self):
        self.teacher_outputs_rgb = []
        self.teacher_outputs_ir = []
        self.student_outputs_rgb = []
        self.student_outputs_ir = []

        def make_layer_forward_hook(layer):
            def forward_hook(m, input, output):
                layer.append(output)

            return forward_hook

        for tm, sm in zip(self.teacher_module_pairs_rgb, self.student_module_pairs_rgb):
            # 为每层加入钩子，在进行Forward的时候会自动将每层的特征传送给model_outputs和student_outputs
            self.remove_handle.append(tm.register_forward_hook(make_layer_forward_hook(self.teacher_outputs_rgb)))
            self.remove_handle.append(sm.register_forward_hook(make_layer_forward_hook(self.student_outputs_rgb)))

        for tm, sm in zip(self.teacher_module_pairs_ir, self.student_module_pairs_ir):
            # 为每层加入钩子，在进行Forward的时候会自动将每层的特征传送给model_outputs和student_outputs
            self.remove_handle.append(tm.register_forward_hook(make_layer_forward_hook(self.teacher_outputs_ir)))
            self.remove_handle.append(sm.register_forward_hook(make_layer_forward_hook(self.student_outputs_ir)))

    def get_loss(self):
        quant_loss = torch.zeros(2, device=self.device)
        if self.normal_distillation:
            quant_loss[0] += self.D_loss_fn_rgb(y_t=self.teacher_outputs_rgb, y_s=self.student_outputs_rgb)
            quant_loss[0] += self.D_loss_fn_ir(y_t=self.teacher_outputs_ir, y_s=self.student_outputs_ir)
        if self.cross_attention:
            quant_loss[1] += self.Cross_loss_ir_to_rgb(y_t=self.teacher_outputs_ir, y_s=self.student_outputs_rgb)
            quant_loss[1] += self.Cross_loss_rgb_to_ir(y_t=self.teacher_outputs_rgb, y_s=self.student_outputs_ir)

        self.teacher_outputs_rgb.clear()
        self.teacher_outputs_ir.clear()
        self.student_outputs_rgb.clear()
        self.student_outputs_ir.clear()
        return quant_loss

    def remove_handle_(self):
        for rm in self.remove_handle:
            rm.remove()

    @staticmethod
    def _df_loss(pred_dist, target):
        """Return sum of left and right DFL losses."""
        # Distribution Focal Loss (DFL) proposed in Generalized Focal Loss https://ieeexplore.ieee.org/document/9792391
        tl = target.long()  # target left
        tr = tl + 1  # target right
        wl = tr - target  # weight left
        wr = 1 - wl  # weight right
        return (F.cross_entropy(pred_dist, tl.view(-1), reduction='none').view(tl.shape) * wl +
                F.cross_entropy(pred_dist, tr.view(-1), reduction='none').view(tl.shape) * wr).mean(-1, keepdim=True)


class BaseTrainer:
    """
    BaseTrainer.

    A base class for creating trainers.

    Attributes:
        args (SimpleNamespace): Configuration for the trainer.
        check_resume (method): Method to check if training should be resumed from a saved checkpoint.
        validator (BaseValidator): Validator instance.
        model (nn.Module): Model instance.
        callbacks (defaultdict): Dictionary of callbacks.
        save_dir (Path): Directory to save results.
        wdir (Path): Directory to save weights.
        last (Path): Path to the last checkpoint.
        best (Path): Path to the best checkpoint.
        save_period (int): Save checkpoint every x epochs (disabled if < 1).
        batch_size (int): Batch size for training.
        epochs (int): Number of epochs to train for.
        start_epoch (int): Starting epoch for training.
        device (torch.device): Device to use for training.
        amp (bool): Flag to enable AMP (Automatic Mixed Precision).
        scaler (amp.GradScaler): Gradient scaler for AMP.
        data (str): Path to data.
        trainset (torch.utils.data.Dataset): Training dataset.
        testset (torch.utils.data.Dataset): Testing dataset.
        ema (nn.Module): EMA (Exponential Moving Average) of the model.
        resume (bool): Resume training from a checkpoint.
        lf (nn.Module): Loss function.
        scheduler (torch.optim.lr_scheduler._LRScheduler): Learning rate scheduler.
        best_fitness (float): The best fitness value achieved.
        fitness (float): Current fitness value.
        loss (float): Current loss value.
        tloss (float): Total loss value.
        loss_names (list): List of loss names.
        csv (Path): Path to results CSV file.
    """

    def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
        """
        Initializes the BaseTrainer class.

        Args:
            cfg (str, optional): Path to a configuration file. Defaults to DEFAULT_CFG.
            overrides (dict, optional): Configuration overrides. Defaults to None.
        """
        if "Distillation" in overrides:
            pass
        else:
            overrides["Distillation"] = None

        if overrides["Distillation"] is not None:  # 需要蒸馏
            self.Distillation = overrides["Distillation"]
            self.loss_type = overrides['loss_type']
            self.distill_weight = overrides['distill_weight']
            self.online = overrides['online']
            if self.Distillation == "MultiDistillation":
                # 多模态蒸馏, 有一个RGB教师和一个IR教师
                self.Teacher_Model_RGB = overrides["Teacher_Model_RGB"]
                self.Teacher_Model_IR = overrides["Teacher_Model_IR"]
                overrides.pop("Teacher_Model_RGB")
                overrides.pop("Teacher_Model_IR")
            else:  # 单模态蒸馏
                self.Teacher_Model = overrides["Teacher_Model"]
                overrides.pop("Teacher_Model")
                # self.Teacher_Model_RGB = check_model_file_from_stem(overrides["Teacher_Model_RGB"])
                # self.Teacher_Model_IR = check_model_file_from_stem(overrides["Teacher_Model_IR"])

            overrides.pop("loss_type")
            overrides.pop("Distillation")
            overrides.pop("online")
            overrides.pop("distill_weight")
        else:  # 把不需要的args出栈
            distill_args = {
                "Distillation", "loss_type", "online",
                "Teacher_Model_RGB", "Teacher_Model_IR",
                "Teacher_Model", "distill_weight"
            }
            self.Distillation = None
            for item in distill_args:
                if item in overrides:
                    overrides.pop(item)

        self.args = get_cfg(cfg, overrides)
        self.check_resume(overrides)
        self.device = select_device(self.args.device, self.args.batch)
        self.validator = None
        self.model = None
        self.metrics = None
        self.plots = {}
        init_seeds(self.args.seed + 1 + RANK, deterministic=self.args.deterministic)

        # Dirs
        self.save_dir = get_save_dir(self.args)
        self.args.name = self.save_dir.name  # update name for loggers
        self.wdir = self.save_dir / 'weights'  # weights dir
        if RANK in (-1, 0):
            self.wdir.mkdir(parents=True, exist_ok=True)  # make dir
            self.args.save_dir = str(self.save_dir)
            yaml_save(self.save_dir / 'args.yaml', vars(self.args))  # save run args
        self.last, self.best = self.wdir / 'last.pt', self.wdir / 'best.pt'  # checkpoint paths
        self.save_period = self.args.save_period

        self.batch_size = self.args.batch
        self.epochs = self.args.epochs
        self.start_epoch = 0
        if RANK == -1:
            print_args(vars(self.args))

        # Device
        if self.device.type in ('cpu', 'mps'):
            self.args.workers = 0  # faster CPU training as time dominated by inference, not dataloading

        # Model and Dataset
        self.model = check_model_file_from_stem(self.args.model)  # add suffix, i.e. yolov8n -> yolov8n.pt
        try:
            if self.args.task == 'classify':
                self.data = check_cls_dataset(self.args.data)
            elif self.args.data.split('.')[-1] in ('yaml', 'yml') or self.args.task in ('detect', 'segment', 'pose'):
                self.data = check_det_dataset(self.args.data)
                if 'yaml_file' in self.data:
                    self.args.data = self.data['yaml_file']  # for validating 'yolo train data=url.zip' usage
        except Exception as e:
            raise RuntimeError(emojis(f"Dataset '{clean_url(self.args.data)}' error ❌ {e}")) from e

        self.trainset, self.testset = self.get_dataset(self.data)
        self.ema = None

        # Optimization utils init
        self.lf = None
        self.scheduler = None

        # Epoch level metrics
        self.best_fitness = None
        self.fitness = None
        self.loss = None
        self.tloss = None
        self.loss_names = ['Loss']
        self.csv = self.save_dir / 'results.csv'
        self.plot_idx = [0, 1, 2]

        ##### LIF #####

        if 'LIF' in self.model:
            self.FIA = True
            self.pool_for_FIA = nn.AvgPool2d(kernel_size=8, stride=8)
        else:
            self.FIA = False
        # Callbacks
        self.callbacks = _callbacks or callbacks.get_default_callbacks()
        if RANK in (-1, 0):
            callbacks.add_integration_callbacks(self)

    def add_callback(self, event: str, callback):
        """Appends the given callback."""
        self.callbacks[event].append(callback)

    def set_callback(self, event: str, callback):
        """Overrides the existing callbacks with the given callback."""
        self.callbacks[event] = [callback]

    def run_callbacks(self, event: str):
        """Run all existing callbacks associated with a particular event."""
        for callback in self.callbacks.get(event, []):
            callback(self)

    def train(self):
        """Allow device='', device=None on Multi-GPU systems to default to device=0."""
        if isinstance(self.args.device, str) and len(self.args.device):  # i.e. device='0' or device='0,1,2,3'
            world_size = len(self.args.device.split(','))
        elif isinstance(self.args.device, (tuple, list)):  # i.e. device=[0, 1, 2, 3] (multi-GPU from CLI is list)
            world_size = len(self.args.device)
        elif torch.cuda.is_available():  # i.e. device=None or device='' or device=number
            world_size = 1  # default to device 0
        else:  # i.e. device='cpu' or 'mps'
            world_size = 0

        # Run subprocess if DDP training, else train normally
        if world_size > 1 and 'LOCAL_RANK' not in os.environ:
            # Argument checks
            if self.args.rect:
                LOGGER.warning("WARNING ⚠️ 'rect=True' is incompatible with Multi-GPU training, setting 'rect=False'")
                self.args.rect = False
            if self.args.batch == -1:
                LOGGER.warning("WARNING ⚠️ 'batch=-1' for AutoBatch is incompatible with Multi-GPU training, setting "
                               "default 'batch=16'")
                self.args.batch = 16

            # Command
            cmd, file = generate_ddp_command(world_size, self)
            try:
                LOGGER.info(f'{colorstr("DDP:")} debug command {" ".join(cmd)}')
                subprocess.run(cmd, check=True)
            except Exception as e:
                raise e
            finally:
                ddp_cleanup(self, str(file))

        else:
            self._do_train(world_size)

    def _setup_scheduler(self):
        """Initialize training learning rate scheduler."""
        if self.args.cos_lr:
            self.lf = one_cycle(1, self.args.lrf, self.epochs)  # cosine 1->hyp['lrf']
        else:
            self.lf = lambda x: max(1 - x / self.epochs, 0) * (1.0 - self.args.lrf) + self.args.lrf  # linear
        self.scheduler = optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda=self.lf)

    def _setup_ddp(self, world_size):
        """Initializes and sets the DistributedDataParallel parameters for training."""
        torch.cuda.set_device(RANK)
        self.device = torch.device('cuda', RANK)
        # LOGGER.info(f'DDP info: RANK {RANK}, WORLD_SIZE {world_size}, DEVICE {self.device}')
        os.environ['NCCL_BLOCKING_WAIT'] = '1'  # set to enforce timeout
        dist.init_process_group(
            'nccl' if dist.is_nccl_available() else 'gloo',
            timeout=timedelta(seconds=10800),  # 3 hours
            rank=RANK,
            world_size=world_size)

    def _setup_train(self, world_size):
        """Builds dataloaders and optimizer on correct rank process."""

        # Model
        self.run_callbacks('on_pretrain_routine_start')
        ckpt = self.setup_model()
        self.model = self.model.to(self.device)
        self.set_model_attributes()

        # Freeze layers
        freeze_list = self.args.freeze if isinstance(
            self.args.freeze, list) else range(self.args.freeze) if isinstance(self.args.freeze, int) else []
        always_freeze_names = ['.dfl']  # always freeze these layers
        freeze_layer_names = [f'model.{x}.' for x in freeze_list] + always_freeze_names
        for k, v in self.model.named_parameters():
            # v.register_hook(lambda x: torch.nan_to_num(x))  # NaN to 0 (commented for erratic training results)
            if any(x in k for x in freeze_layer_names):
                LOGGER.info(f"Freezing layer '{k}'")
                v.requires_grad = False
            elif not v.requires_grad:
                LOGGER.info(f"WARNING ⚠️ setting 'requires_grad=True' for frozen layer '{k}'. "
                            'See ultralytics.engine.trainer for customization of frozen layers.')
                v.requires_grad = True

        # 是否蒸馏
        if self.Distillation is not None:
            """
            self.Distillation : 教师模型

            v.requires_grad = False 离线蒸馏

            v.requires_grad = True  在线蒸馏

            """

            self.__hidden__ = torch.nn.Linear(1, 1, bias=False)
            if self.Distillation == "MultiDistillation":  # 多模态蒸馏
                for k, v in self.Teacher_Model_IR.model.named_parameters():
                    v.requires_grad = self.online
                self.Teacher_Model_IR = self.Teacher_Model_IR.to(self.device)
                for k, v in self.Teacher_Model_RGB.model.named_parameters():
                    v.requires_grad = self.online
                self.Teacher_Model_RGB = self.Teacher_Model_RGB.to(self.device)
            else:  # 单模态蒸馏
                for k, v in self.Teacher_Model.model.named_parameters():
                    v.requires_grad = self.online
                self.Teacher_Model = self.Teacher_Model.to(self.device)

        else:  # 不蒸馏
            self.distillation_loss = None

        # self.set_model_attributes()

        # Check AMP
        self.amp = torch.tensor(self.args.amp).to(self.device)  # True or False
        if self.amp and RANK in (-1, 0):  # Single-GPU and DDP
            callbacks_backup = callbacks.default_callbacks.copy()  # backup callbacks as check_amp() resets them
            self.amp = torch.tensor(check_amp(self.model), device=self.device)
            callbacks.default_callbacks = callbacks_backup  # restore callbacks
        if RANK > -1 and world_size > 1:  # DDP
            dist.broadcast(self.amp, src=0)  # broadcast the tensor from rank 0 to all other ranks (returns None)
        self.amp = bool(self.amp)  # as boolean
        self.scaler = torch.cuda.amp.GradScaler(enabled=self.amp)
        if world_size > 1:
            self.model = nn.parallel.DistributedDataParallel(self.model, device_ids=[RANK])

            # 是否蒸馏
            if self.Distillation is not None:
                if self.Distillation == "MultiDistillation":  # 多模态蒸馏
                    self.Teacher_Model_IR = nn.parallel.DistributedDataParallel(self.Teacher_Model_IR,
                                                                                device_ids=[RANK])
                    self.Teacher_Model_IR.eval()
                    self.Teacher_Model_RGB = nn.parallel.DistributedDataParallel(self.Teacher_Model_RGB,
                                                                                 device_ids=[RANK])
                    self.Teacher_Model_RGB.eval()
                else:  # 单模态蒸馏
                    self.Teacher_Model = nn.parallel.DistributedDataParallel(self.Teacher_Model, device_ids=[RANK])
                    self.Teacher_Model.eval()

        # Check imgsz
        gs = max(int(self.model.stride.max() if hasattr(self.model, 'stride') else 32), 32)  # grid size (max stride)
        self.args.imgsz = check_imgsz(self.args.imgsz, stride=gs, floor=gs, max_dim=1)
        self.stride = gs  # for multi-scale training

        # Batch size
        if self.batch_size == -1 and RANK == -1:  # single-GPU only, estimate best batch size
            self.args.batch = self.batch_size = check_train_batch_size(self.model, self.args.imgsz, self.amp)

        # Dataloaders
        batch_size = self.batch_size // max(world_size, 1)
        self.train_loader = self.get_dataloader(self.trainset, batch_size=batch_size, rank=RANK, mode='train')
        if RANK in (-1, 0):
            # NOTE: When training DOTA dataset, double batch size could get OOM cause some images got more than 2000 objects.
            self.test_loader = self.get_dataloader(self.testset,
                                                   batch_size=batch_size if self.args.task == 'obb' else batch_size * 2,
                                                   rank=-1,
                                                   mode='val')
            self.validator = self.get_validator()
            metric_keys = self.validator.metrics.keys + self.label_loss_items(prefix='val')
            self.metrics = dict(zip(metric_keys, [0] * len(metric_keys)))
            self.ema = ModelEMA(self.model)
            if self.args.plots:
                self.plot_training_labels()

        # 是否蒸馏
        if self.Distillation is not None:
            if self.Distillation == "MultiDistillation":  # 多模态蒸馏
                self.distillation_loss = Multimodal_Distillation_loss(self.model, self.Teacher_Model_RGB,
                                                                      self.Teacher_Model_IR, distiller=self.loss_type)
            else:  # 单模态蒸馏
                self.distillation_loss = Distillation_loss(self.model, self.Teacher_Model, distiller=self.loss_type)

        # Optimizer
        self.accumulate = max(round(self.args.nbs / self.batch_size), 1)  # accumulate loss before optimizing
        weight_decay = self.args.weight_decay * self.batch_size * self.accumulate / self.args.nbs  # scale weight_decay
        iterations = math.ceil(len(self.train_loader.dataset) / max(self.batch_size, self.args.nbs)) * self.epochs

        if self.Distillation == "MultiDistillation":
            self.optimizer = self.build_optimizer_multi(model=self.model,
                                                        teacher_model_ir=self.Teacher_Model_IR,
                                                        teacher_model_rgb=self.Teacher_Model_RGB,
                                                        distill_loss=self.distillation_loss,
                                                        name=self.args.optimizer,
                                                        lr=self.args.lr0,
                                                        momentum=self.args.momentum,
                                                        decay=weight_decay,
                                                        iterations=iterations)
        else:
            self.optimizer = self.build_optimizer(model=self.model,
                                                  teacher_model=self.Distillation,
                                                  distill_loss=self.distillation_loss,
                                                  name=self.args.optimizer,
                                                  lr=self.args.lr0,
                                                  momentum=self.args.momentum,
                                                  decay=weight_decay,
                                                  iterations=iterations)
        # Scheduler
        self._setup_scheduler()
        self.stopper, self.stop = EarlyStopping(patience=self.args.patience), False
        self.resume_training(ckpt)
        self.scheduler.last_epoch = self.start_epoch - 1  # do not move
        self.run_callbacks('on_pretrain_routine_end')

    def _do_train(self, world_size=1):
        """Train completed, evaluate and plot if specified by arguments."""
        if world_size > 1:
            self._setup_ddp(world_size)
        self._setup_train(world_size)

        # 是否蒸馏
        if self.Distillation is not None:
            if self.Distillation == "MultiDistillation":  # 多模态蒸馏
                self.distillation_loss = Multimodal_Distillation_loss(self.model, self.Teacher_Model_RGB,
                                                                      self.Teacher_Model_IR, distiller=self.loss_type, )
            else:
                self.distillation_loss = Distillation_loss(self.model, self.Teacher_Model, distiller=self.loss_type)

        ##################################### distillation #####################################
        weight_decay = self.args.weight_decay * self.batch_size * self.accumulate / self.args.nbs  # scale weight_decay
        iterations = math.ceil(len(self.train_loader.dataset) / max(self.batch_size, self.args.nbs)) * self.epochs
        if self.Distillation == "MultiDistillation":
            self.optimizer = self.build_optimizer_multi(model=self.model,
                                                        teacher_model_ir=self.Teacher_Model_IR,
                                                        teacher_model_rgb=self.Teacher_Model_RGB,
                                                        distill_loss=self.distillation_loss,
                                                        name=self.args.optimizer,
                                                        lr=self.args.lr0,
                                                        momentum=self.args.momentum,
                                                        decay=weight_decay,
                                                        iterations=iterations)
        else:
            self.optimizer = self.build_optimizer(model=self.model,
                                                  teacher_model=self.Distillation,
                                                  distill_loss=self.distillation_loss,
                                                  name=self.args.optimizer,
                                                  lr=self.args.lr0,
                                                  momentum=self.args.momentum,
                                                  decay=weight_decay,
                                                  iterations=iterations)
        # Scheduler
        if self.args.cos_lr:
            self.lf = one_cycle(1, self.args.lrf, self.epochs)  # cosine 1->hyp['lrf']
        else:
            self.lf = lambda x: (1 - x / self.epochs) * (1.0 - self.args.lrf) + self.args.lrf  # linear
        self.scheduler = optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda=self.lf)
        self.stopper, self.stop = EarlyStopping(patience=self.args.patience), False
        self.scheduler.last_epoch = self.start_epoch - 1  # do not move
        self.run_callbacks('on_pretrain_routine_end')

        nb = len(self.train_loader)  # number of batches
        nw = max(round(self.args.warmup_epochs * nb), 100) if self.args.warmup_epochs > 0 else -1  # warmup iterations
        last_opt_step = -1
        self.epoch_time = None
        self.epoch_time_start = time.time()
        self.train_time_start = time.time()
        self.run_callbacks('on_train_start')
        LOGGER.info(f'Image sizes {self.args.imgsz} train, {self.args.imgsz} val\n'
                    f'Using {self.train_loader.num_workers * (world_size or 1)} dataloader workers\n'
                    f"Logging results to {colorstr('bold', self.save_dir)}\n"
                    f'Starting training for '
                    f'{self.args.time} hours...' if self.args.time else f'{self.epochs} epochs...')
        if self.args.close_mosaic:
            base_idx = (self.epochs - self.args.close_mosaic) * nb
            self.plot_idx.extend([base_idx, base_idx + 1, base_idx + 2])
        epoch = self.epochs  # predefine for resume fully trained model edge cases
        for epoch in range(self.start_epoch, self.epochs):
            self.total_mecd1 = torch.zeros(1, device=self.device)
            self.total_mecd2 = torch.zeros(1, device=self.device)
            self.total_mrl_1 = torch.zeros(1, device=self.device)
            self.total_mrl_2 = torch.zeros(1, device=self.device)
            self.total_FIA = torch.zeros(1, device=self.device)
            self.epoch = epoch
            self.run_callbacks('on_train_epoch_start')
            self.model.train()
            if RANK != -1:
                self.train_loader.sampler.set_epoch(epoch)
            pbar = enumerate(self.train_loader)
            # Update dataloader attributes (optional)
            if epoch == (self.epochs - self.args.close_mosaic):
                self._close_dataloader_mosaic()
                self.train_loader.reset()

            if RANK in (-1, 0):
                LOGGER.info(self.progress_string())
                pbar = TQDM(enumerate(self.train_loader), total=nb)
            self.tloss = None
            self.optimizer.zero_grad()

            # 是否蒸馏
            if self.Distillation is not None:
                self.distillation_loss.register_hook()

            for i, batch in pbar:
                self.run_callbacks('on_train_batch_start')
                # Warmup

                ni = i + nb * epoch
                if ni <= nw:
                    xi = [0, nw]  # x interp
                    self.accumulate = max(1, int(np.interp(ni, xi, [1, self.args.nbs / self.batch_size]).round()))
                    for j, x in enumerate(self.optimizer.param_groups):
                        # Bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
                        x['lr'] = np.interp(
                            ni, xi, [self.args.warmup_bias_lr if j == 0 else 0.0, x['initial_lr'] * self.lf(epoch)])
                        if 'momentum' in x:
                            x['momentum'] = np.interp(ni, xi, [self.args.warmup_momentum, self.args.momentum])

                # Forward
                with torch.cuda.amp.autocast(self.amp):
                    batch = self.preprocess_batch(batch)
                    self.loss, self.loss_items = self.model(batch)
                    if RANK != -1:
                        self.loss *= world_size
                    if self.FIA:
                        RGB_img = batch['img'][:, :3, :, :]
                        ###### LIF ######
                        ###### LIF ######
                        ###### LIF ######

                        # # HSV
                        img_v, _ = torch.max(RGB_img, dim=1, keepdim=True)
                        img_v = img_v.detach()
                        gt = self.pool_for_FIA(img_v)

                        # YCbCr
                        # r = RGB_img[:, 0, :, :]
                        # g = RGB_img[:, 1, :, :]
                        # b = RGB_img[:, 2, :, :]
                        # y = 0.299 * r + 0.587 * g + 0.114 * b
                        # gt = self.pool_for_FIA(y)

                        FIA_module = self.model.model[2]
                        weight = FIA_module(RGB_img)
                        illumination_loss = torch.abs(gt - weight).mean()  # no_sup
                        illumination_loss *= 1.3  # weight
                        self.loss += illumination_loss
                        self.loss_items = torch.cat(
                            (
                                self.loss_items, illumination_loss.unsqueeze(0).detach()),
                            dim=0)
                        ###### LIF ######
                        ###### LIF ######
                        ###### LIF ######

                    # 是否蒸馏
                    if self.Distillation is not None:
                        # distill_weight = ((1 - math.cos(i * math.pi / len(self.train_loader))) / 2) * (0.1 - 1) + 1
                        distill_weight = ((1 - math.cos(self.epoch * math.pi / self.epochs)) / 2) * (0.1 - 1) + 1
                        # distill_weight = 1
                        if self.Distillation == "MultiDistillation":
                            with torch.no_grad():
                                RGB_img = batch['img'][:, :3, :, :]
                                IR_img = batch['img'][:, 3:, :, :]
                                pred_rgb = self.Teacher_Model_RGB(RGB_img)
                                pred_ir = self.Teacher_Model_IR(IR_img)
                        else:
                            with torch.no_grad():
                                pred = self.Teacher_Model(batch['img'])
                        self.d_loss, self.c_loss = self.distillation_loss.get_loss()
                        self.d_loss = self.d_loss * (
                                self.distill_weight * distill_weight)  # distill_weight initiated 0.1 and decay
                        self.c_loss = self.c_loss * (
                                self.distill_weight * distill_weight)  # cross_weight initiated 0.1 and decay
                        self.loss += self.d_loss
                        self.loss += self.c_loss
                        self.loss_items = torch.cat(
                            (self.loss_items, self.d_loss.unsqueeze(0).detach(), self.c_loss.unsqueeze(0).detach()),
                            dim=0)

                    self.tloss = (self.tloss * i + self.loss_items) / (i + 1) if self.tloss is not None \
                        else self.loss_items
                # Backward
                self.scaler.scale(self.loss).backward()

                # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html
                if ni - last_opt_step >= self.accumulate:
                    self.optimizer_step()
                    last_opt_step = ni

                    # Timed stopping
                    if self.args.time:
                        self.stop = (time.time() - self.train_time_start) > (self.args.time * 3600)
                        if RANK != -1:  # if DDP training
                            broadcast_list = [self.stop if RANK == 0 else None]
                            dist.broadcast_object_list(broadcast_list, 0)  # broadcast 'stop' to all ranks
                            self.stop = broadcast_list[0]
                        if self.stop:  # training time exceeded
                            break

                # Log
                mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G'  # (GB)
                loss_len = self.tloss.shape[0] if len(self.tloss.size()) else 1
                losses = self.tloss if loss_len > 1 else torch.unsqueeze(self.tloss, 0)
                losses_list = losses.tolist()
                if RANK in (-1, 0):
                    pbar.set_description(
                        ('%11s' * 2 + '%11.4g' * (2 + len(losses_list))) %
                        (
                            f'{epoch + 1}/{self.epochs}', mem, *losses_list, batch['cls'].shape[0],
                            batch['img'].shape[-1]))
                    # pbar.set_description(
                    #     ('%11s' * 2 + '%11.4g' * (2 + len(losses_list))) %
                    #     (
                    #         f'{epoch + 1}/{self.epochs}', mem, *losses_list, batch['cls'].shape[0],
                    #         self.args.imgsz))
                    self.run_callbacks('on_batch_end')
                    if self.args.plots and ni in self.plot_idx:
                        self.plot_training_samples(batch, ni)

                self.run_callbacks('on_train_batch_end')

            # 是否蒸馏
            if self.Distillation is not None:
                self.distillation_loss.remove_handle_()

            self.lr = {f'lr/pg{ir}': x['lr'] for ir, x in enumerate(self.optimizer.param_groups)}  # for loggers
            self.run_callbacks('on_train_epoch_end')
            if RANK in (-1, 0):
                final_epoch = epoch + 1 == self.epochs
                self.ema.update_attr(self.model, include=['yaml', 'nc', 'args', 'names', 'stride', 'class_weights'])

                # Validation
                if self.args.val or final_epoch or self.stopper.possible_stop or self.stop:
                    self.metrics, self.fitness = self.validate()
                self.save_metrics(metrics={**self.label_loss_items(self.tloss), **self.metrics, **self.lr})
                self.stop |= self.stopper(epoch + 1, self.fitness)
                if self.args.time:
                    self.stop |= (time.time() - self.train_time_start) > (self.args.time * 3600)

                # Save model
                if self.args.save or final_epoch:
                    self.save_model()
                    self.run_callbacks('on_model_save')

            # Scheduler
            t = time.time()
            self.epoch_time = t - self.epoch_time_start
            self.epoch_time_start = t
            with warnings.catch_warnings():
                warnings.simplefilter('ignore')  # suppress 'Detected lr_scheduler.step() before optimizer.step()'
                if self.args.time:
                    mean_epoch_time = (t - self.train_time_start) / (epoch - self.start_epoch + 1)
                    self.epochs = self.args.epochs = math.ceil(self.args.time * 3600 / mean_epoch_time)
                    self._setup_scheduler()
                    self.scheduler.last_epoch = self.epoch  # do not move
                    self.stop |= epoch >= self.epochs  # stop if exceeded epochs
                self.scheduler.step()
            self.run_callbacks('on_fit_epoch_end')
            torch.cuda.empty_cache()  # clear GPU memory at end of epoch, may help reduce CUDA out of memory errors

            # Early Stopping
            if RANK != -1:  # if DDP training
                broadcast_list = [self.stop if RANK == 0 else None]
                dist.broadcast_object_list(broadcast_list, 0)  # broadcast 'stop' to all ranks
                self.stop = broadcast_list[0]
            if self.stop:
                break  # must break all DDP ranks

        if RANK in (-1, 0):
            # Do final val with best.pt
            LOGGER.info(f'\n{epoch - self.start_epoch + 1} epochs completed in '
                        f'{(time.time() - self.train_time_start) / 3600:.3f} hours.')
            self.final_eval()
            if self.args.plots:
                self.plot_metrics()
            self.run_callbacks('on_train_end')
        torch.cuda.empty_cache()
        self.run_callbacks('teardown')

    def save_model(self):
        """Save model training checkpoints with additional metadata."""
        import pandas as pd  # scope for faster startup
        metrics = {**self.metrics, **{'fitness': self.fitness}}
        results = {k.strip(): v for k, v in pd.read_csv(self.csv).to_dict(orient='list').items()}
        ckpt = {
            'epoch': self.epoch,
            'best_fitness': self.best_fitness,
            'model': deepcopy(de_parallel(self.model)).half(),
            'ema': deepcopy(self.ema.ema).half(),
            'updates': self.ema.updates,
            'optimizer': self.optimizer.state_dict(),
            'train_args': vars(self.args),  # save as dict
            'train_metrics': metrics,
            'train_results': results,
            'date': datetime.now().isoformat(),
            'version': __version__}

        # Use dill (if exists) to serialize the lambda functions where pickle does not do this
        try:
            import dill as pickle
        except ImportError:
            import pickle

        # Save last and best
        torch.save(ckpt, self.last)
        if self.best_fitness == self.fitness:
            torch.save(ckpt, self.best)
        if (self.save_period > 0) and (self.epoch > 0) and (self.epoch % self.save_period == 0):
            torch.save(ckpt, self.wdir / f'epoch{self.epoch}.pt')

    @staticmethod
    def get_dataset(data):
        """
        Get train, val path from data dict if it exists.

        Returns None if data format is not recognized.
        """
        return data['train'], data.get('val') or data.get('test')

    def setup_model(self):
        """Load/create/download model for any task."""
        if isinstance(self.model, torch.nn.Module):  # if model is loaded beforehand. No setup needed
            return

        model, weights = self.model, None
        ckpt = None
        if str(model).endswith('.pt'):
            weights, ckpt = attempt_load_one_weight(model)
            cfg = ckpt['model'].yaml

            # 是否蒸馏
            if self.Distillation is not None:
                self.model = ckpt['model']
                self.model.info()

        else:
            cfg = model

        self.model = self.get_model(cfg=cfg, weights=weights, verbose=RANK == -1)  # calls Model(cfg, weights)

        return ckpt

    def optimizer_step(self):
        """Perform a single step of the training optimizer with gradient clipping and EMA update."""
        self.scaler.unscale_(self.optimizer)  # unscale gradients
        torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=10.0)  # clip gradients
        self.scaler.step(self.optimizer)
        self.scaler.update()
        self.optimizer.zero_grad()
        if self.ema:
            self.ema.update(self.model)

    def preprocess_batch(self, batch):
        """Allows custom preprocessing model inputs and ground truths depending on task type."""
        return batch

    def validate(self):
        """
        Runs validation on test set using self.validator.

        The returned dict is expected to contain "fitness" key.
        """
        metrics = self.validator(self)
        fitness = metrics.pop('fitness', -self.loss.detach().cpu().numpy())  # use loss as fitness measure if not found
        if not self.best_fitness or self.best_fitness < fitness:
            self.best_fitness = fitness
        return metrics, fitness

    def get_model(self, cfg=None, weights=None, verbose=True):
        """Get model and raise NotImplementedError for loading cfg files."""
        raise NotImplementedError("This task trainer doesn't support loading cfg files")

    def get_validator(self):
        """Returns a NotImplementedError when the get_validator function is called."""
        raise NotImplementedError('get_validator function not implemented in trainer')

    def get_dataloader(self, dataset_path, batch_size=16, rank=0, mode='train'):
        """Returns dataloader derived from torch.data.Dataloader."""
        raise NotImplementedError('get_dataloader function not implemented in trainer')

    def build_dataset(self, img_path, mode='train', batch=None):
        """Build dataset."""
        raise NotImplementedError('build_dataset function not implemented in trainer')

    def label_loss_items(self, loss_items=None, prefix='train'):
        """Returns a loss dict with labelled training loss items tensor."""
        # Not needed for classification but necessary for segmentation & detection
        return {'loss': loss_items} if loss_items is not None else ['loss']

    def set_model_attributes(self):
        """To set or update model parameters before training."""
        self.model.names = self.data['names']

    def build_targets(self, preds, targets):
        """Builds target tensors for training YOLO model."""
        pass

    def progress_string(self):
        """Returns a string describing training progress."""
        return ''

    # TODO: may need to put these following functions into callback
    def plot_training_samples(self, batch, ni):
        """Plots training samples during YOLO training."""
        pass

    def plot_training_labels(self):
        """Plots training labels for YOLO model."""
        pass

    def save_metrics(self, metrics):
        """Saves training metrics to a CSV file."""
        keys, vals = list(metrics.keys()), list(metrics.values())
        n = len(metrics) + 1  # number of cols
        s = '' if self.csv.exists() else (('%23s,' * n % tuple(['epoch'] + keys)).rstrip(',') + '\n')  # header
        with open(self.csv, 'a') as f:
            f.write(s + ('%23.5g,' * n % tuple([self.epoch + 1] + vals)).rstrip(',') + '\n')

    def plot_metrics(self):
        """Plot and display metrics visually."""
        pass

    def on_plot(self, name, data=None):
        """Registers plots (e.g. to be consumed in callbacks)"""
        path = Path(name)
        self.plots[path] = {'data': data, 'timestamp': time.time()}

    def final_eval(self):
        """Performs final evaluation and validation for object detection YOLO model."""
        for f in self.last, self.best:
            if f.exists():
                strip_optimizer(f)  # strip optimizers
                if f is self.best:
                    LOGGER.info(f'\nValidating {f}...')
                    self.validator.args.plots = self.args.plots
                    self.metrics = self.validator(model=f)
                    self.metrics.pop('fitness', None)
                    self.run_callbacks('on_fit_epoch_end')

    def check_resume(self, overrides):
        """Check if resume checkpoint exists and update arguments accordingly."""
        resume = self.args.resume
        if resume:
            try:
                exists = isinstance(resume, (str, Path)) and Path(resume).exists()
                last = Path(check_file(resume) if exists else get_latest_run())

                # Check that resume data YAML exists, otherwise strip to force re-download of dataset
                ckpt_args = attempt_load_weights(last).args
                if not Path(ckpt_args['data']).exists():
                    ckpt_args['data'] = self.args.data

                resume = True
                self.args = get_cfg(ckpt_args)
                self.args.model = str(last)  # reinstate model
                for k in 'imgsz', 'batch':  # allow arg updates to reduce memory on resume if crashed due to CUDA OOM
                    if k in overrides:
                        setattr(self.args, k, overrides[k])

            except Exception as e:
                raise FileNotFoundError('Resume checkpoint not found. Please pass a valid checkpoint to resume from, '
                                        "i.e. 'yolo train resume model=path/to/last.pt'") from e
        self.resume = resume

    def resume_training(self, ckpt):
        """Resume YOLO training from given epoch and best fitness."""
        if ckpt is None:
            return
        best_fitness = 0.0
        start_epoch = ckpt['epoch'] + 1
        if ckpt['optimizer'] is not None:
            self.optimizer.load_state_dict(ckpt['optimizer'])  # optimizer
            best_fitness = ckpt['best_fitness']
        if self.ema and ckpt.get('ema'):
            self.ema.ema.load_state_dict(ckpt['ema'].float().state_dict())  # EMA
            self.ema.updates = ckpt['updates']
        if self.resume:
            assert start_epoch > 0, \
                f'{self.args.model} training to {self.epochs} epochs is finished, nothing to resume.\n' \
                f"Start a new training without resuming, i.e. 'yolo train model={self.args.model}'"
            LOGGER.info(
                f'Resuming training from {self.args.model} from epoch {start_epoch + 1} to {self.epochs} total epochs')
        if self.epochs < start_epoch:
            LOGGER.info(
                f"{self.model} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {self.epochs} more epochs.")
            self.epochs += ckpt['epoch']  # finetune additional epochs
        self.best_fitness = best_fitness
        self.start_epoch = start_epoch
        if start_epoch > (self.epochs - self.args.close_mosaic):
            self._close_dataloader_mosaic()

    def _close_dataloader_mosaic(self):
        """Update dataloaders to stop using mosaic augmentation."""
        if hasattr(self.train_loader.dataset, 'mosaic'):
            self.train_loader.dataset.mosaic = False
        if hasattr(self.train_loader.dataset, 'close_mosaic'):
            LOGGER.info('Closing dataloader mosaic')
            self.train_loader.dataset.close_mosaic(hyp=self.args)

    def build_optimizer_multi(self, model, teacher_model_rgb, teacher_model_ir, distill_loss, name='auto', lr=0.001,
                              momentum=0.9, decay=1e-5, iterations=1e5):
        g = [], [], []  # optimizer parameter groups
        bn = tuple(v for k, v in nn.__dict__.items() if 'Norm' in k)  # normalization layers, i.e. BatchNorm2d()
        if name == 'auto':
            LOGGER.info(f"{colorstr('optimizer:')} 'optimizer=auto' found, "
                        f"ignoring 'lr0={self.args.lr0}' and 'momentum={self.args.momentum}' and "
                        f"determining best 'optimizer', 'lr0' and 'momentum' automatically... ")
            nc = getattr(model, 'nc', 10)  # number of classes
            lr_fit = round(0.002 * 5 / (4 + nc), 6)  # lr0 fit equation to 6 decimal places
            name, lr, momentum = ('SGD', 0.01, 0.9) if iterations > 10000 else ('AdamW', lr_fit, 0.9)
            self.args.warmup_bias_lr = 0.0  # no higher than 0.01 for Adam

        for module_name, module in model.named_modules():
            for param_name, param in module.named_parameters(recurse=False):
                fullname = f'{module_name}.{param_name}' if module_name else param_name
                if 'bias' in fullname:  # bias (no decay)
                    g[2].append(param)
                elif isinstance(module, bn):  # weight (no decay)
                    g[1].append(param)
                else:  # weight (with decay)
                    g[0].append(param)

        ############################# 蒸馏 ############################
        if self.online:
            for v in teacher_model_rgb.modules():
                if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):  # bias (no decay)
                    g[2].append(v.bias)
                if isinstance(v, bn):  # weight (no decay)
                    g[1].append(v.weight)
                elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):  # weight (with decay)
                    g[0].append(v.weight)

            for v in teacher_model_ir.modules():
                if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):  # bias (no decay)
                    g[2].append(v.bias)
                if isinstance(v, bn):  # weight (no decay)
                    g[1].append(v.weight)
                elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):  # weight (with decay)
                    g[0].append(v.weight)

            if self.Distillation is not None and distill_loss is not None:
                for k, v in distill_loss.named_parameters():
                    if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):  # bias (no decay)
                        g[2].append(v.bias)
                    if isinstance(v, bn) or 'bn' in k:  # weight (no decay)
                        g[1].append(v.weight)
                    elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):  # weight (with decay)
                        g[0].append(v.weight)
        ############################# 蒸馏 ############################

        if name in ('Adam', 'Adamax', 'AdamW', 'NAdam', 'RAdam'):
            optimizer = getattr(optim, name, optim.Adam)(g[2], lr=lr, betas=(momentum, 0.999), weight_decay=0.0)
        elif name == 'RMSProp':
            optimizer = optim.RMSprop(g[2], lr=lr, momentum=momentum)
        elif name == 'SGD':
            optimizer = optim.SGD(g[2], lr=lr, momentum=momentum, nesterov=True)
        else:
            raise NotImplementedError(
                f"Optimizer '{name}' not found in list of available optimizers "
                f'[Adam, AdamW, NAdam, RAdam, RMSProp, SGD, auto].'
                'To request support for addition optimizers please visit https://github.com/ultralytics/ultralytics.')

        optimizer.add_param_group({'params': g[0], 'weight_decay': decay})  # add g0 with weight_decay
        optimizer.add_param_group({'params': g[1], 'weight_decay': 0.0})  # add g1 (BatchNorm2d weights)
        LOGGER.info(
            f"{colorstr('optimizer:')} {type(optimizer).__name__}(lr={lr}, momentum={momentum}) with parameter groups "
            f'{len(g[1])} weight(decay=0.0), {len(g[0])} weight(decay={decay}), {len(g[2])} bias(decay=0.0)')
        return optimizer

    def build_optimizer(self, model, teacher_model, distill_loss, name='auto', lr=0.001, momentum=0.9, decay=1e-5,
                        iterations=1e5):
        """
        Constructs an optimizer for the given model, based on the specified optimizer name, learning rate, momentum,
        weight decay, and number of iterations.

        Args:
            model (torch.nn.Module): The model for which to build an optimizer.
            name (str, optional): The name of the optimizer to use. If 'auto', the optimizer is selected
                based on the number of iterations. Default: 'auto'.
            lr (float, optional): The learning rate for the optimizer. Default: 0.001.
            momentum (float, optional): The momentum factor for the optimizer. Default: 0.9.
            decay (float, optional): The weight decay for the optimizer. Default: 1e-5.
            iterations (float, optional): The number of iterations, which determines the optimizer if
                name is 'auto'. Default: 1e5.

        Returns:
            (torch.optim.Optimizer): The constructed optimizer.
        """

        g = [], [], []  # optimizer parameter groups
        bn = tuple(v for k, v in nn.__dict__.items() if 'Norm' in k)  # normalization layers, i.e. BatchNorm2d()
        if name == 'auto':
            LOGGER.info(f"{colorstr('optimizer:')} 'optimizer=auto' found, "
                        f"ignoring 'lr0={self.args.lr0}' and 'momentum={self.args.momentum}' and "
                        f"determining best 'optimizer', 'lr0' and 'momentum' automatically... ")
            nc = getattr(model, 'nc', 10)  # number of classes
            lr_fit = round(0.002 * 5 / (4 + nc), 6)  # lr0 fit equation to 6 decimal places
            name, lr, momentum = ('SGD', 0.01, 0.9) if iterations > 10000 else ('AdamW', lr_fit, 0.9)
            self.args.warmup_bias_lr = 0.0  # no higher than 0.01 for Adam

        for module_name, module in model.named_modules():
            for param_name, param in module.named_parameters(recurse=False):
                fullname = f'{module_name}.{param_name}' if module_name else param_name
                if 'bias' in fullname:  # bias (no decay)
                    g[2].append(param)
                elif isinstance(module, bn):  # weight (no decay)
                    g[1].append(param)
                else:  # weight (with decay)
                    g[0].append(param)

        ############################# 蒸馏 ############################

        if self.Distillation is not None and self.online:
            for v in teacher_model.modules():
                # print(v)
                if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):  # bias (no decay)
                    g[2].append(v.bias)
                if isinstance(v, bn):  # weight (no decay)
                    g[1].append(v.weight)
                elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):  # weight (with decay)
                    g[0].append(v.weight)

        if self.Distillation is not None and distill_loss is not None:
            for k, v in distill_loss.named_parameters():
                # print(v)
                if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):  # bias (no decay)
                    g[2].append(v.bias)
                if isinstance(v, bn) or 'bn' in k:  # weight (no decay)
                    g[1].append(v.weight)
                elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):  # weight (with decay)
                    g[0].append(v.weight)

        ############################# 蒸馏 ############################

        if name in ('Adam', 'Adamax', 'AdamW', 'NAdam', 'RAdam'):
            optimizer = getattr(optim, name, optim.Adam)(g[2], lr=lr, betas=(momentum, 0.999), weight_decay=0.0)
        elif name == 'RMSProp':
            optimizer = optim.RMSprop(g[2], lr=lr, momentum=momentum)
        elif name == 'SGD':
            optimizer = optim.SGD(g[2], lr=lr, momentum=momentum, nesterov=True)
        else:
            raise NotImplementedError(
                f"Optimizer '{name}' not found in list of available optimizers "
                f'[Adam, AdamW, NAdam, RAdam, RMSProp, SGD, auto].'
                'To request support for addition optimizers please visit https://github.com/ultralytics/ultralytics.')

        optimizer.add_param_group({'params': g[0], 'weight_decay': decay})  # add g0 with weight_decay
        optimizer.add_param_group({'params': g[1], 'weight_decay': 0.0})  # add g1 (BatchNorm2d weights)
        LOGGER.info(
            f"{colorstr('optimizer:')} {type(optimizer).__name__}(lr={lr}, momentum={momentum}) with parameter groups "
            f'{len(g[1])} weight(decay=0.0), {len(g[0])} weight(decay={decay}), {len(g[2])} bias(decay=0.0)')
        return optimizer
