# Ultralytics YOLO 🚀, AGPL-3.0 license

from copy import copy
import os
import numpy as np
import torch
from ultralytics.data import build_dataloader, build_yolo_dataset, build_llltData
from ultralytics.engine.trainer import BaseTrainer
from ultralytics.models import yolo
from ultralytics.nn.tasks import DetectionModel, MultiDetectionModel, MultiLIFDetectionModel
from ultralytics.utils.plotting import plot_images, plot_labels, plot_results
from ultralytics.utils.torch_utils import de_parallel, torch_distributed_zero_first
import torch.distributed as dist
from ultralytics.nn.tasks import attempt_load_one_weight, attempt_load_weights
from ultralytics.utils.dist import ddp_cleanup, generate_ddp_command
from ultralytics.utils import (DEFAULT_CFG, LOGGER, RANK, TQDM, __version__, callbacks, clean_url, colorstr, emojis,
                               yaml_save)
from ultralytics.utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, init_seeds, one_cycle, select_device,
                                           strip_optimizer)
from ultralytics.utils.autobatch import check_train_batch_size
from ultralytics.utils.checks import check_amp, check_file, check_imgsz, print_args
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.cuda import amp
from torch import nn, optim
import math
import subprocess
import time
import warnings
import swanlab
class DetectionTrainer(BaseTrainer):
    """
    A class extending the BaseTrainer class for training based on a detection model.

    Example:
        ```python
        from ultralytics.models.yolo.detect import DetectionTrainer

        args = dict(model='yolov8n.pt', data='coco8.yaml', epochs=3)
        trainer = DetectionTrainer(overrides=args)
        trainer.train()
        ```
    """

    def build_dataset(self, img_path, mode='train', batch=None):
        """
        Build YOLO Dataset.

        Args:
            img_path (str): Path to the folder containing images.
            mode (str): `train` mode or `val` mode, users are able to customize different augmentations for each mode.
            batch (int, optional): Size of batches, this is for `rect`. Defaults to None.
        """
        gs = max(int(de_parallel(self.model).stride.max() if self.model else 0), 32)
        return build_yolo_dataset(self.args, img_path, batch, self.data, mode=mode, rect=mode == 'val', stride=gs)

    def get_dataloader(self, dataset_path, batch_size=16, rank=0, mode='train'):
        """Construct and return dataloader."""
        assert mode in ['train', 'val']
        with torch_distributed_zero_first(rank):  # init dataset *.cache only once if DDP
            dataset = self.build_dataset(dataset_path, mode, batch_size)
        shuffle = mode == 'train'
        if getattr(dataset, 'rect', False) and shuffle:
            LOGGER.warning("WARNING ⚠️ 'rect=True' is incompatible with DataLoader shuffle, setting shuffle=False")
            shuffle = False
        workers = self.args.workers if mode == 'train' else self.args.workers * 2
        return build_dataloader(dataset, batch_size, workers, shuffle, rank)  # return dataloader

    def preprocess_batch(self, batch):
        # TODO 可以在这里进行多模态输入
        """Preprocesses a batch of images by scaling and converting to float."""
        batch['img'] = batch['img'].to(self.device, non_blocking=True).float() / 255
        # LOGGER.info('Preprocessing batch: {}'.format(batch['ratio_pad'])) # Log the shape of the preprocessed images
        return batch

    def set_model_attributes(self):
        """Nl = de_parallel(self.model).model[-1].nl  # number of detection layers (to scale hyps)."""
        # self.args.box *= 3 / nl  # scale to layers
        # self.args.cls *= self.data["nc"] / 80 * 3 / nl  # scale to classes and layers
        # self.args.cls *= (self.args.imgsz / 640) ** 2 * 3 / nl  # scale to image size and layers
        self.model.nc = self.data['nc']  # attach number of classes to model
        self.model.names = self.data['names']  # attach class names to model
        self.model.args = self.args  # attach hyperparameters to model
        # TODO: self.model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc

    def get_model(self, cfg=None, weights=None, verbose=True):
        """Return a YOLO detection model."""
        model = DetectionModel(cfg, nc=self.data['nc'], verbose=verbose and RANK == -1)
        if weights:
            model.load(weights)
        return model

    def get_validator(self):
        """Returns a DetectionValidator for YOLO model validation."""
        self.loss_names = 'box_loss', 'cls_loss', 'dfl_loss'
        return yolo.detect.DetectionValidator(self.test_loader, save_dir=self.save_dir, args=copy(self.args))

    def label_loss_items(self, loss_items=None, prefix='train'):
        """
        Returns a loss dict with labelled training loss items tensor.

        Not needed for classification but necessary for segmentation & detection
        """
        keys = [f'{prefix}/{x}' for x in self.loss_names]
        if loss_items is not None:
            loss_items = [round(float(x), 5) for x in loss_items]  # convert tensors to 5 decimal place floats
            return dict(zip(keys, loss_items))
        else:
            return keys

    def progress_string(self):
        """Returns a formatted string of training progress with epoch, GPU memory, loss, instances and size."""
        return ('\n' + '%11s' *
                (4 + len(self.loss_names))) % ('Epoch', 'GPU_mem', *self.loss_names, 'Instances', 'Size')

    def plot_training_samples(self, batch, ni):
        """Plots training samples with their annotations."""
        plot_images(images=batch['img'],
                    batch_idx=batch['batch_idx'],
                    cls=batch['cls'].squeeze(-1),
                    bboxes=batch['bboxes'],
                    paths=batch['im_file'],
                    fname=self.save_dir / f'train_batch{ni}.jpg',
                    on_plot=self.on_plot)

    def plot_metrics(self):
        """Plots metrics from a CSV file."""
        plot_results(file=self.csv, on_plot=self.on_plot)  # save results.png

    def plot_training_labels(self):
        """Create a labeled training plot of the YOLO model."""
        boxes = np.concatenate([lb['bboxes'] for lb in self.train_loader.dataset.labels], 0)
        cls = np.concatenate([lb['cls'] for lb in self.train_loader.dataset.labels], 0)
        plot_labels(boxes, cls.squeeze(), names=self.data['names'], save_dir=self.save_dir, on_plot=self.on_plot)


class llltTrainer(DetectionTrainer):
    def __init__(self, json_path, isLIF=False,  **kwargs):
        super().__init__(**kwargs)
        self.json_path = json_path

        if isLIF:
            self.FIA = True
            self.pool_for_FIA = nn.AvgPool2d(kernel_size=8, stride=8)
        else:
            self.FIA = False


    def get_dataloader(self, dataset_path, batch_size=16, rank=0, mode='train'):
        # return self.lllt_dataloader
        """Construct and return dataloader."""
        assert self.json_path is not None, "JSON path must be provided for llltTrainer."
        assert mode in ['train', 'val']
    
        if mode == 'val':
            self.json_file = self.json_path + '/val_div.json'
        else:
            self.json_file = self.json_path + '/train_div.json'
        with torch_distributed_zero_first(rank):  # init dataset *.cache only once if DDP
            dataset = self.build_dataset(dataset_path, mode, batch_size)
        shuffle = mode == 'train'
        if getattr(dataset, 'rect', False) and shuffle:
            LOGGER.warning("WARNING ⚠️ 'rect=True' is incompatible with DataLoader shuffle, setting shuffle=False")
            shuffle = False
        workers = self.args.workers if mode == 'train' else self.args.workers * 2
        return build_dataloader(dataset, batch_size, workers, shuffle, rank)  # return dataloader
        
    
    def _setup_ddp(self, world_size):
        if not dist.is_initialized():  # 只有未初始化时才调用
            super()._setup_ddp(world_size)

    def build_dataset(self, img_path, mode='train', batch=None):
        """
        Build YOLO Dataset.

        Args:
            img_path (str): Path to the folder containing images.
            mode (str): `train` mode or `val` mode, users are able to customize different augmentations for each mode.
            batch (int, optional): Size of batches, this is for `rect`. Defaults to None.
        """
        gs = max(int(de_parallel(self.model).stride.max() if self.model else 0), 32)
        assert self.json_file is not None, "JSON file must be provided for llltTrainer."
        return build_llltData(self.args, img_path, self.json_file, batch, mode=mode, rect=mode == 'val', stride=gs)
    
    def preprocess_batch(self, batch):
        # TODO 可以在这里进行多模态输入
        """Preprocesses a batch of images by scaling and converting to float."""
        batch['img'] = batch['img'].to(self.device, non_blocking=True).float() / 255
        # LOGGER.info('Preprocessing batch: {}'.format(batch['ratio_pad'])) # Log the shape of the preprocessed images
        return batch
    

    def get_validator(self):
        """Returns a DetectionValidator for YOLO model validation."""
        # self.loss_names = 'box_loss', 'cls_loss', 'dfl_loss', 'BCE_loss', 'Neg_ent' # --multi
        self.loss_names = 'box_loss', 'cls_loss', 'dfl_loss' # --single
        if self.FIA:
            self.loss_names = self.loss_names + ('illumination_loss',)
        return yolo.detect.lltValidatorForTrain(self.test_loader, save_dir=self.save_dir, args=copy(self.args)) # --multi
        # return yolo.detect.DetectionValidator(self.test_loader, save_dir=self.save_dir, args=copy(self.args)) # --single
    
    def get_model(self, cfg=None, weights=None, verbose=True):
        """Return a YOLO Multi detection model."""
        LOGGER.info("self.data['nc'] is {}".format(self.data['nc']))
        # model = DetectionModel(cfg, ch = 3, nc=self.data['nc'], verbose=verbose and RANK == -1) # --single
        model = MultiLIFDetectionModel(cfg, ch = 6, nc=self.data['nc'], verbose=verbose and RANK == -1) # --multi
        if weights:
            model.load(weights)
        return model
    
    def setup_model(self, force_cfg=None):
        if isinstance(self.model, torch.nn.Module):
            return

        model, weights = self.model, None
        ckpt = None
        if str(model).endswith('.pt'):
            weights, ckpt = attempt_load_one_weight(model)
            if force_cfg is not None:
                cfg = force_cfg        # 🔴 优先用外部传入的 yaml
                ckpt['model'].yaml = force_cfg  # replace model in checkpoint
            else:
                cfg = ckpt['model'].yaml
        else:
            cfg = model
        self.model = self.get_model(cfg=cfg, weights=weights, verbose=RANK == -1)
        return ckpt
    
    def train(self, force_cfg=None):
        """Allow device='', device=None on Multi-GPU systems to default to device=0."""
        if isinstance(self.args.device, str) and len(self.args.device):  # i.e. device='0' or device='0,1,2,3'
            world_size = len(self.args.device.split(','))
        elif isinstance(self.args.device, (tuple, list)):  # i.e. device=[0, 1, 2, 3] (multi-GPU from CLI is list)
            world_size = len(self.args.device)
        elif torch.cuda.is_available():  # i.e. device=None or device='' or device=number
            world_size = 1  # default to device 0
        else:  # i.e. device='cpu' or 'mps'
            world_size = 0

        # Run subprocess if DDP training, else train normally
        if world_size > 1 and 'LOCAL_RANK' not in os.environ:
            # Argument checks
            if self.args.rect:
                LOGGER.warning("WARNING ⚠️ 'rect=True' is incompatible with Multi-GPU training, setting 'rect=False'")
                self.args.rect = False
            if self.args.batch == -1:
                LOGGER.warning("WARNING ⚠️ 'batch=-1' for AutoBatch is incompatible with Multi-GPU training, setting "
                               "default 'batch=16'")
                self.args.batch = 16

            # Command
            cmd, file = generate_ddp_command(world_size, self)
            try:
                LOGGER.info(f'{colorstr("DDP:")} debug command {" ".join(cmd)}')
                subprocess.run(cmd, check=True)
            except Exception as e:
                raise e
            finally:
                ddp_cleanup(self, str(file))

        else:
            self._do_train(world_size, force_cfg)


    def _do_train(self, world_size=1, force_cfg=None):
        """Train completed, evaluate and plot if specified by arguments."""
        if world_size > 1:
            self._setup_ddp(world_size)
        self._setup_train(world_size, force_cfg)

        self.epoch_time = None
        self.epoch_time_start = time.time()
        self.train_time_start = time.time()
        nb = len(self.train_loader)  # number of batches
        nw = max(round(self.args.warmup_epochs * nb), 100) if self.args.warmup_epochs > 0 else -1  # warmup iterations
        last_opt_step = -1
        self.run_callbacks('on_train_start')
        LOGGER.info(f'Image sizes {self.args.imgsz} train, {self.args.imgsz} val\n'
                    f'Using {self.train_loader.num_workers * (world_size or 1)} dataloader workers\n'
                    f"Logging results to {colorstr('bold', self.save_dir)}\n"
                    f'Starting training for {self.epochs} epochs...')
        if self.args.close_mosaic:
            base_idx = (self.epochs - self.args.close_mosaic) * nb
            self.plot_idx.extend([base_idx, base_idx + 1, base_idx + 2])
        epoch = self.epochs  # predefine for resume fully trained model edge cases
        for epoch in range(self.start_epoch, self.epochs):
            self.epoch = epoch
            self.run_callbacks('on_train_epoch_start')
            self.model.train()
            if RANK != -1:
                self.train_loader.sampler.set_epoch(epoch)
            pbar = enumerate(self.train_loader)
            # Update dataloader attributes (optional)
            if epoch == (self.epochs - self.args.close_mosaic):
                LOGGER.info('Closing dataloader mosaic')
                if hasattr(self.train_loader.dataset, 'mosaic'):
                    self.train_loader.dataset.mosaic = False
                if hasattr(self.train_loader.dataset, 'close_mosaic'):
                    self.train_loader.dataset.close_mosaic(hyp=self.args)

                self.train_loader.reset()
            if RANK in (-1, 0):
                LOGGER.info(self.progress_string())
                pbar = TQDM(enumerate(self.train_loader), total=nb)
            self.tloss = None
            self.optimizer.zero_grad()
            swanlab_loss = {i: [] for i, x in enumerate(self.loss_names)}  # for swanlab logging
            # LOGGER.info(f'loss_names: {self.loss_names}')
            for i, batch in pbar:
                self.run_callbacks('on_train_batch_start')
                # Warmup
                ni = i + nb * epoch
                if ni <= nw:
                    xi = [0, nw]  # x interp
                    self.accumulate = max(1, np.interp(ni, xi, [1, self.args.nbs / self.batch_size]).round())
                    for j, x in enumerate(self.optimizer.param_groups):
                        # Bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
                        x['lr'] = np.interp(
                            ni, xi, [self.args.warmup_bias_lr if j == 0 else 0.0, x['initial_lr'] * self.lf(epoch)])
                        if 'momentum' in x:
                            x['momentum'] = np.interp(ni, xi, [self.args.warmup_momentum, self.args.momentum])

                # Forward
                with torch.cuda.amp.autocast(self.amp):
                    batch = self.preprocess_batch(batch)
                    self.loss, self.loss_items = self.model(batch)
                    
                    if RANK != -1:
                        self.loss *= world_size

                    if self.FIA:
                        # 取前3通道作为RGB
                        RGB_img = batch['img'][:, :3, :, :]

                        # 计算亮度（V通道），并detach
                        img_v, _ = torch.max(RGB_img, dim=1, keepdim=True)
                        img_v = img_v.detach()

                        # 得到FIA的目标（也detach）
                        gt = self.pool_for_FIA(img_v).detach()

                        # 选择正确的FIA模块
                        if isinstance(self.model, torch.nn.parallel.DistributedDataParallel):
                            FIA_module = self.model.module.model[2]
                        else:
                            FIA_module = self.model.model[2]

                        # FIA模块输出，detach避免计算图
                        with torch.no_grad():
                            weight = FIA_module(RGB_img)

                        # 计算illumination loss，并detach
                        illumination_loss = torch.abs(gt - weight).mean() * 1.3
                        self.loss += illumination_loss.detach()  # 确保累加到loss的是叶子Tensor

                        # 更新loss_items，确保所有Tensor都是叶子
                        illumination_loss_item = illumination_loss.detach().unsqueeze(0)
                        if self.loss_items.numel() == 0:
                            self.loss_items = illumination_loss_item.clone()
                        else:
                            self.loss_items = torch.cat((self.loss_items, illumination_loss_item), dim=0)


                    self.tloss = (self.tloss * i + self.loss_items) / (i + 1) if self.tloss is not None \
                        else self.loss_items  #这个是用于输出的，不用管

                # Backward
                # with torch.autograd.set_detect_anomaly(True):
                self.scaler.scale(self.loss).backward()

                # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html
                if ni - last_opt_step >= self.accumulate:
                    self.optimizer_step()
                    last_opt_step = ni
                
                # Log
                mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G'  # (GB)
                # 获取当前 GPU 的已保留显存大小（单位：GB），如果没有可用的 GPU，则显存大小为 0。

                loss_len = self.tloss.shape[0] if len(self.tloss.size()) else 1
                # 获取损失张量的长度，如果损失张量是标量（没有维度），则长度为 1。

                losses = self.tloss if loss_len > 1 else torch.unsqueeze(self.tloss, 0)

                # 如果损失张量有多个维度，则直接使用；如果是标量，则通过 torch.unsqueeze 将其扩展为一维张量。
                for i, x in enumerate(losses):
                    if i not in swanlab_loss:
                        swanlab_loss[i] = []  # 动态初始化
                    swanlab_loss[i].append(float(x))
                if RANK in (-1, 0):
                    # 如果当前进程是主进程（RANK 为 -1 或 0），执行以下操作：

                    pbar.set_description(
                        ('%11s' * 2 + '%11.4g' * (2 + loss_len)) %
                        (f'{epoch + 1}/{self.epochs}', mem, *losses, batch['cls'].shape[0], batch['img'].shape[-1]))
                    # 更新进度条的描述信息：
                    # 包括当前 epoch 和总 epoch 数、显存使用情况、损失值、当前批次的类别数量以及图像的宽度。

                    self.run_callbacks('on_batch_end')
                    # 运行 "on_batch_end" 回调函数，用于在每个批次结束时执行自定义操作。

                    if self.args.plots and ni in self.plot_idx:
                        # 如果启用了绘图功能，并且当前批次索引在绘图索引列表中：

                        self.plot_training_samples(batch, ni)
                        # 绘制当前批次的训练样本。

                self.run_callbacks('on_train_batch_end')
                # 运行 "on_train_batch_end" 回调函数，用于在每个训练批次结束时执行自定义操作。
            
            self.lr = {f'lr/pg{ir}': x['lr'] for ir, x in enumerate(self.optimizer.param_groups)}  # for loggers
            swanlab.log(self.lr, step=epoch + 1)  # log learning rate
            if len(swanlab_loss) == len(self.loss_names):  # 检查长度是否一致
                swanlab_loss = {"train/" + self.loss_names[k]: np.mean(v) for k, v in swanlab_loss.items()}  # mean loss for swanlab
                swanlab.log(swanlab_loss, step=epoch + 1)
            else:
                LOGGER.warning(f"Mismatch between swanlab_loss: {len(swanlab_loss)} and loss_names:{len(self.loss_names)} lengths. Skipping logging.")
            
            with warnings.catch_warnings():
                warnings.simplefilter('ignore')  # suppress 'Detected lr_scheduler.step() before optimizer.step()'
                self.scheduler.step()
            self.run_callbacks('on_train_epoch_end')

            if RANK in (-1, 0):

                # Validation
                self.ema.update_attr(self.model, include=['yaml', 'nc', 'args', 'names', 'stride', 'class_weights'])
                final_epoch = (epoch + 1 == self.epochs) or self.stopper.possible_stop

                if self.args.val or final_epoch:
                    self.metrics, self.fitness = self.validate()
                    # LOGGER.info(f'metrics: {self.metrics}')
                    swanlab.log(self.metrics, step=epoch + 1)

                self.save_metrics(metrics={**self.label_loss_items(self.tloss), **self.metrics, **self.lr})
                self.stop = self.stopper(epoch + 1, self.fitness)

                # Save model
                if self.args.save or (epoch + 1 == self.epochs):
                    # bad = self.find_non_leaf_tensors(self.model)
                    # if bad:
                    #     print("Found non-leaf tensors saved in model modules:")
                    #     for mod_name, attr, shape, req_grad, has_grad_fn in bad:
                    #         print(f" module={mod_name:30} attr={attr:20} shape={shape} requires_grad={req_grad} has_grad_fn={has_grad_fn}")
                    # else:
                    #     print("No non-leaf tensors found in module attributes.")
                    self.save_model()
                    
                    self.run_callbacks('on_model_save')

            tnow = time.time()
            self.epoch_time = tnow - self.epoch_time_start
            self.epoch_time_start = tnow
            self.run_callbacks('on_fit_epoch_end')
            torch.cuda.empty_cache()  # clears GPU vRAM at end of epoch, can help with out of memory errors

            # Early Stopping
            if RANK != -1:  # if DDP training
                broadcast_list = [self.stop if RANK == 0 else None]
                dist.broadcast_object_list(broadcast_list, 0)  # broadcast 'stop' to all ranks
                if RANK != 0:
                    self.stop = broadcast_list[0]
            if self.stop:
                break  # must break all DDP ranks

        if RANK in (-1, 0):
            # Do final val with best.pt
            LOGGER.info(f'\n{epoch - self.start_epoch + 1} epochs completed in '
                        f'{(time.time() - self.train_time_start) / 3600:.3f} hours.')
            self.final_eval()
            if self.args.plots:
                self.plot_metrics()
            self.run_callbacks('on_train_end')
        torch.cuda.empty_cache()
        self.run_callbacks('teardown')

    def find_non_leaf_tensors(self, model):
        bad = []
        m = model.module if isinstance(model, torch.nn.parallel.DistributedDataParallel) else model
        for name, module in m.named_modules():
            for attr, val in vars(module).items():
                # 只关注 Tensor 类型属性
                if isinstance(val, torch.Tensor):
                    # 非叶子（有 grad_fn 或 requires_grad 且不是 leaf）
                    is_non_leaf = (val.grad_fn is not None) or (not val.is_leaf)
                    if is_non_leaf:
                        bad.append((name or "root", attr, val.shape, val.requires_grad, bool(val.grad_fn)))
        return bad

    def _setup_train(self, world_size, force_cfg=None):
        """Builds dataloaders and optimizer on correct rank process."""

        # Model
        self.run_callbacks('on_pretrain_routine_start')
        ckpt = self.setup_model(force_cfg)
        self.model = self.model.to(self.device)
        self.set_model_attributes()

        # Freeze layers
        freeze_list = self.args.freeze if isinstance(
            self.args.freeze, list) else range(self.args.freeze) if isinstance(self.args.freeze, int) else []
        always_freeze_names = ['.dfl']  # always freeze these layers
        freeze_layer_names = [f'model.{x}.' for x in freeze_list] + always_freeze_names
        for k, v in self.model.named_parameters():
            # v.register_hook(lambda x: torch.nan_to_num(x))  # NaN to 0 (commented for erratic training results)
            if any(x in k for x in freeze_layer_names):
                LOGGER.info(f"Freezing layer '{k}'")
                v.requires_grad = False
            elif not v.requires_grad:
                LOGGER.info(f"WARNING ⚠️ setting 'requires_grad=True' for frozen layer '{k}'. "
                            'See ultralytics.engine.trainer for customization of frozen layers.')
                v.requires_grad = True

        # Check AMP
        self.amp = torch.tensor(self.args.amp).to(self.device)  # True or False
        if self.amp and RANK in (-1, 0):  # Single-GPU and DDP
            callbacks_backup = callbacks.default_callbacks.copy()  # backup callbacks as check_amp() resets them
            self.amp = torch.tensor(check_amp(self.model), device=self.device)
            callbacks.default_callbacks = callbacks_backup  # restore callbacks
        if RANK > -1 and world_size > 1:  # DDP
            dist.broadcast(self.amp, src=0)  # broadcast the tensor from rank 0 to all other ranks (returns None)
        self.amp = bool(self.amp)  # as boolean
        self.scaler = amp.GradScaler(enabled=self.amp)
        if world_size > 1:
            self.model = DDP(self.model, device_ids=[RANK])
        LOGGER.info("check AMP is over, amp is {}".format(self.amp))


        # Check imgsz
        gs = max(int(self.model.stride.max() if hasattr(self.model, 'stride') else 32), 32)  # grid size (max stride)
        self.args.imgsz = check_imgsz(self.args.imgsz, stride=gs, floor=gs, max_dim=1)
        LOGGER.info(f'Image sizes {self.args.imgsz} train, {self.args.imgsz} val\n')

        # Batch size
        if self.batch_size == -1 and RANK == -1:  # single-GPU only, estimate best batch size
            self.args.batch = self.batch_size = check_train_batch_size(self.model, self.args.imgsz, self.amp)
        LOGGER.info(f'Using batch size {self.batch_size} for training')

        # Dataloaders
        batch_size = self.batch_size // max(world_size, 1)
        LOGGER.info(f'Print trainSet info: {self.trainset}')
        self.train_loader = self.get_dataloader(self.trainset, batch_size=batch_size, rank=RANK, mode='train')
        LOGGER.info("RANK is {}, train_loader num_workers is {}".format(RANK, self.train_loader.num_workers))
        if RANK in (-1, 0):
            # LOGGER.info('RANK is -1 or 0, setting up validation dataloader')
            self.test_loader = self.get_dataloader(self.testset, batch_size=batch_size * 2, rank=-1, mode='val')
            self.validator = self.get_validator()
            metric_keys = self.validator.metrics.keys + self.label_loss_items(prefix='val')
            self.metrics = dict(zip(metric_keys, [0] * len(metric_keys)))
            self.ema = ModelEMA(self.model)
            if self.args.plots:
                self.plot_training_labels()
        LOGGER.info(f'Using {self.train_loader.num_workers * (world_size or 1)} dataloader workers\n'
                    f'Logging results to {colorstr("bold", self.save_dir)}\n')

        # Optimizer
        self.accumulate = max(round(self.args.nbs / self.batch_size), 1)  # accumulate loss before optimizing
        weight_decay = self.args.weight_decay * self.batch_size * self.accumulate / self.args.nbs  # scale weight_decay
        iterations = math.ceil(len(self.train_loader.dataset) / max(self.batch_size, self.args.nbs)) * self.epochs
        self.optimizer = self.build_optimizer(model=self.model,
                                              name=self.args.optimizer,
                                              lr=self.args.lr0,
                                              momentum=self.args.momentum,
                                              decay=weight_decay,
                                              iterations=iterations)
        LOGGER.info(f'Optimizer {self.optimizer.__class__.__name__} with lr={self.args.lr0}, '
                    f'momentum={self.args.momentum}, weight_decay={weight_decay} and {len(self.optimizer.param_groups)} parameter groups')
        # Scheduler
        if self.args.cos_lr:
            self.lf = one_cycle(1, self.args.lrf, self.epochs)  # cosine 1->hyp['lrf']
        else:
            self.lf = lambda x: (1 - x / self.epochs) * (1.0 - self.args.lrf) + self.args.lrf  # linear
        self.scheduler = optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda=self.lf)
        self.stopper, self.stop = EarlyStopping(patience=self.args.patience), False
        self.resume_training(ckpt)
        self.scheduler.last_epoch = self.start_epoch - 1  # do not move
        self.run_callbacks('on_pretrain_routine_end')



