# Ultralytics YOLO 🚀, AGPL-3.0 license
import contextlib
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from collections import defaultdict
import cv2
import numpy as np
import torch
import torchvision
import json
from ultralytics.utils import LOCAL_RANK, NUM_THREADS, TQDM, colorstr, is_dir_writeable
import math
from .augment import Compose, Format, Instances, LetterBox, classify_albumentations, classify_transforms, v8_transforms, v8_multi_transforms, LetterBox6Channel
from .base import BaseDataset
from .utils import HELP_URL, LOGGER, get_hash, img2label_paths, verify_image, verify_image_label
from copy import deepcopy


# Ultralytics dataset *.cache version, >= 1.0.0 for YOLOv8
DATASET_CACHE_VERSION = '1.0.3'


class YOLODataset(BaseDataset):
    """
    Dataset class for loading object detection and/or segmentation labels in YOLO format.

    Args:
        data (dict, optional): A dataset YAML dictionary. Defaults to None.
        use_segments (bool, optional): If True, segmentation masks are used as labels. Defaults to False.
        use_keypoints (bool, optional): If True, keypoints are used as labels. Defaults to False.

    Returns:
        (torch.utils.data.Dataset): A PyTorch dataset object that can be used for training an object detection model.
    """

    def __init__(self, *args, data=None, use_segments=False, use_keypoints=False, **kwargs):
        """Initializes the YOLODataset with optional configurations for segments and keypoints."""
        self.use_segments = use_segments
        self.use_keypoints = use_keypoints
        self.data = data
        assert not (self.use_segments and self.use_keypoints), 'Can not use both segments and keypoints.'
        super().__init__(*args, **kwargs)

    def cache_labels(self, path=Path('./labels.cache')):
        """
        缓存数据集标签，检查图像并读取形状。

        Args:
            path (Path): 保存缓存文件的路径（默认值：Path('./labels.cache')）。
        Returns:
            (dict): 返回标签字典。
        """
        x = {'labels': []}  # 初始化标签字典
        nm, nf, ne, nc, msgs = 0, 0, 0, 0, []  # 初始化计数器：缺失、找到、空标签、损坏标签的数量，以及消息列表
        desc = f'{self.prefix}Scanning {path.parent / path.stem}...'  # 设置进度条描述信息
        total = len(self.im_files)  # 获取图像文件总数
        nkpt, ndim = self.data.get('kpt_shape', (0, 0))  # 从数据中获取关键点的形状（数量和维度）

        # 如果使用关键点但关键点形状不正确，则抛出异常
        if self.use_keypoints and (nkpt <= 0 or ndim not in (2, 3)):
            raise ValueError("'kpt_shape' 在 data.yaml 中缺失或不正确。应该是一个列表，例如 [关键点数量, 维度]，如 'kpt_shape: [17, 3]'")

        # 使用线程池并行处理图像和标签文件
        with ThreadPool(NUM_THREADS) as pool:
            results = pool.imap(
                func=verify_image_label,  # 验证图像和标签的函数
                iterable=zip(
                    self.im_files,  # 图像文件列表
                    self.label_files,  # 标签文件列表
                    repeat(self.prefix),  # 前缀
                    repeat(self.use_keypoints),  # 是否使用关键点
                    repeat(len(self.data['names'])),  # 类别名称数量
                    repeat(nkpt),  # 关键点数量
                    repeat(ndim)  # 关键点维度
                )
            )
            # 设置进度条
            pbar = TQDM(results, desc=desc, total=total)

            # 遍历验证结果
            for im_file, lb, shape, segments, keypoint, nm_f, nf_f, ne_f, nc_f, msg in pbar:
                nm += nm_f  # 累加缺失标签数量
                nf += nf_f  # 累加找到的标签数量
                ne += ne_f  # 累加空标签数量
                nc += nc_f  # 累加损坏标签数量

                # 如果图像文件存在，则将标签信息添加到字典中
                if im_file:
                    x['labels'].append(
                        dict(
                            im_file=im_file,  # 图像文件路径
                            shape=shape,  # 图像形状
                            cls=lb[:, 0:1],  # 类别标签（n, 1）
                            bboxes=lb[:, 1:],  # 边界框（n, 4）
                            segments=segments,  # 分割信息
                            keypoints=keypoint,  # 关键点信息
                            normalized=True,  # 是否归一化
                            bbox_format='xywh'  # 边界框格式
                        )
                    )

                # 如果有消息，则添加到消息列表
                if msg:
                    msgs.append(msg)

                # 更新进度条描述信息
                pbar.desc = f'{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt'

            # 关闭进度条
            pbar.close()

        # 如果有消息，则打印日志信息
        if msgs:
            LOGGER.info('\n'.join(msgs))

        # 如果没有找到标签，则打印警告信息
        if nf == 0:
            LOGGER.warning(f'{self.prefix}WARNING ⚠️ No labels found in {path}. {HELP_URL}')

        # 计算图像和标签文件的哈希值
        x['hash'] = get_hash(self.label_files + self.im_files)

        # 保存结果统计信息
        x['results'] = nf, nm, ne, nc, len(self.im_files)

        # 保存警告消息
        x['msgs'] = msgs

        # 保存数据集缓存文件
        save_dataset_cache_file(self.prefix, path, x)

        # 返回标签字典
        return x

    def get_labels(self):
        """返回用于 YOLO 训练的标签字典。"""
        # 根据图像文件路径生成对应的标签文件路径
        self.label_files = img2label_paths(self.im_files)
        # 设置缓存文件路径，标签文件所在目录的后缀为 '.cache'
        cache_path = Path(self.label_files[0]).parent.with_suffix('.cache')
        try:
            # 尝试加载缓存文件，并标记缓存文件是否存在
            cache, exists = load_dataset_cache_file(cache_path), True
            # 检查缓存文件的版本是否与当前版本匹配
            assert cache['version'] == DATASET_CACHE_VERSION
            # 检查缓存文件的哈希值是否与当前图像和标签文件的哈希值一致
            assert cache['hash'] == get_hash(self.label_files + self.im_files)
        except (FileNotFoundError, AssertionError, AttributeError):
            # 如果缓存文件不存在或不匹配，则重新生成缓存文件，并标记缓存文件不存在
            cache, exists = self.cache_labels(cache_path), False

        # 显示缓存信息
        nf, nm, ne, nc, n = cache.pop('results')  # 提取找到、缺失、空标签、损坏标签和总数
        if exists and LOCAL_RANK in (-1, 0):  # 如果缓存文件存在且当前进程是主进程
            # 设置进度条描述信息
            d = f'Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt'
            # 显示进度条
            TQDM(None, desc=self.prefix + d, total=n, initial=n)
            # 如果有警告信息，则打印日志
            if cache['msgs']:
                LOGGER.info('\n'.join(cache['msgs']))

        # 读取缓存内容
        [cache.pop(k) for k in ('hash', 'version', 'msgs')]  # 移除哈希值、版本和警告信息
        labels = cache['labels']  # 提取标签信息
        if not labels:  # 如果标签为空，打印警告信息
            LOGGER.warning(f'WARNING ⚠️ No images found in {cache_path}, training may not work correctly. {HELP_URL}')
        # 更新图像文件路径列表
        self.im_files = [lb['im_file'] for lb in labels]

        # 检查数据集是否仅包含边界框或分割信息
        lengths = ((len(lb['cls']), len(lb['bboxes']), len(lb['segments'])) for lb in labels)  # 统计每个标签的类别、边界框和分割数量
        len_cls, len_boxes, len_segments = (sum(x) for x in zip(*lengths))  # 计算总类别、边界框和分割数量
        if len_segments and len_boxes != len_segments:  # 如果分割数量和边界框数量不相等
            # 打印警告信息，提示分割和边界框数量不一致
            LOGGER.warning(
                f'WARNING ⚠️ Box and segment counts should be equal, but got len(segments) = {len_segments}, '
                f'len(boxes) = {len_boxes}. To resolve this only boxes will be used and all segments will be removed. '
                'To avoid this please supply either a detect or segment dataset, not a detect-segment mixed dataset.')
            # 移除所有分割信息，仅保留边界框
            for lb in labels:
                lb['segments'] = []
        if len_cls == 0:  # 如果没有类别标签，打印警告信息
            LOGGER.warning(f'WARNING ⚠️ No labels found in {cache_path}, training may not work correctly. {HELP_URL}')
        return labels  # 返回标签字典

    def build_transforms(self, hyp=None):
        """构建并添加数据增强和格式化的变换到列表中。"""
        if self.augment:  # 如果启用了数据增强
            hyp.mosaic = hyp.mosaic if self.augment and not self.rect else 0.0  # 如果不是矩形训练，启用 mosaic，否则禁用
            hyp.mixup = hyp.mixup if self.augment and not self.rect else 0.0  # 如果不是矩形训练，启用 mixup，否则禁用
            transforms = v8_transforms(self, self.imgsz, hyp)  # 使用 v8_transforms 构建数据增强变换
            LOGGER.info("Using YOLOv8 data augmentation transforms.")  # 输出日志，表示使用了 YOLOv8 数据增强变换
        else:  # 如果未启用数据增强
            transforms = Compose([LetterBox(new_shape=(self.imgsz, self.imgsz), scaleup=False)])  # 使用 LetterBox 调整图片大小
            LOGGER.info("Just using LetterBox transform.")  # 输出日志，表示只使用了 LetterBox 变换

        # 添加格式化变换
        transforms.append(
            Format(bbox_format='xywh',  # 边界框格式为 xywh
                   normalize=True,  # 归一化边界框坐标
                   return_mask=self.use_segments,  # 是否返回分割掩码
                   return_keypoint=self.use_keypoints,  # 是否返回关键点
                   batch_idx=True,  # 是否返回批次索引
                   mask_ratio=hyp.mask_ratio,  # 掩码比例
                   mask_overlap=hyp.overlap_mask))  # 掩码重叠设置
        return transforms  # 返回构建的变换列表

    def close_mosaic(self, hyp):
        """Sets mosaic, copy_paste and mixup options to 0.0 and builds transformations."""
        hyp.mosaic = 0.0  # set mosaic ratio=0.0
        hyp.copy_paste = 0.0  # keep the same behavior as previous v8 close-mosaic
        hyp.mixup = 0.0  # keep the same behavior as previous v8 close-mosaic
        self.transforms = self.build_transforms(hyp)

    def update_labels_info(self, label):
        """Custom your label format here."""
        # NOTE: cls is not with bboxes now, classification and semantic segmentation need an independent cls label
        # We can make it also support classification and semantic segmentation by add or remove some dict keys there.
        bboxes = label.pop('bboxes')
        segments = label.pop('segments', None)
        keypoints = label.pop('keypoints', None)
        bbox_format = label.pop('bbox_format')
        normalized = label.pop('normalized')
        label['instances'] = Instances(bboxes, segments, keypoints, bbox_format=bbox_format, normalized=normalized)
        return label

    @staticmethod
    def collate_fn(batch):
        """将数据样本整理成批次。"""
        new_batch = {}  # 初始化一个新的批次字典
        keys = batch[0].keys()  # 获取每个样本的键（字段名）
        values = list(zip(*[list(b.values()) for b in batch]))  # 将每个样本的值按字段名分组
        for i, k in enumerate(keys):  # 遍历每个字段名
            value = values[i]  # 获取当前字段对应的值
            if k == 'img':  # 如果字段名是 'img'（图像数据）
                value = torch.stack(value, 0)  # 将图像数据堆叠成一个张量
            if k in ['masks', 'keypoints', 'bboxes', 'cls']:  # 如果字段名是 'masks', 'keypoints', 'bboxes', 'cls'
                value = torch.cat(value, 0)  # 将这些数据拼接成一个张量
            new_batch[k] = value  # 将处理后的值存入新的批次字典
        new_batch['batch_idx'] = list(new_batch['batch_idx'])  # 将 'batch_idx' 转换为列表
        for i in range(len(new_batch['batch_idx'])):  # 遍历每个样本的索引
            new_batch['batch_idx'][i] += i  # 为每个样本添加目标图像索引，用于构建目标
        new_batch['batch_idx'] = torch.cat(new_batch['batch_idx'], 0)  # 将 'batch_idx' 拼接成一个张量
        return new_batch  # 返回整理后的批次数据


# Classification dataloaders -------------------------------------------------------------------------------------------
class ClassificationDataset(torchvision.datasets.ImageFolder):
    """
    YOLO Classification Dataset.

    Args:
        root (str): Dataset path.

    Attributes:
        cache_ram (bool): True if images should be cached in RAM, False otherwise.
        cache_disk (bool): True if images should be cached on disk, False otherwise.
        samples (list): List of samples containing file, index, npy, and im.
        torch_transforms (callable): torchvision transforms applied to the dataset.
        album_transforms (callable, optional): Albumentations transforms applied to the dataset if augment is True.
    """

    def __init__(self, root, args, augment=False, cache=False, prefix=''):
        """
        Initialize YOLO object with root, image size, augmentations, and cache settings.

        Args:
            root (str): Dataset path.
            args (Namespace): Argument parser containing dataset related settings.
            augment (bool, optional): True if dataset should be augmented, False otherwise. Defaults to False.
            cache (bool | str | optional): Cache setting, can be True, False, 'ram' or 'disk'. Defaults to False.
        """
        super().__init__(root=root)
        if augment and args.fraction < 1.0:  # reduce training fraction
            self.samples = self.samples[:round(len(self.samples) * args.fraction)]
        self.prefix = colorstr(f'{prefix}: ') if prefix else ''
        self.cache_ram = cache is True or cache == 'ram'
        self.cache_disk = cache == 'disk'
        self.samples = self.verify_images()  # filter out bad images
        self.samples = [list(x) + [Path(x[0]).with_suffix('.npy'), None] for x in self.samples]  # file, index, npy, im
        self.torch_transforms = classify_transforms(args.imgsz, rect=args.rect)
        self.album_transforms = classify_albumentations(
            augment=augment,
            size=args.imgsz,
            scale=(1.0 - args.scale, 1.0),  # (0.08, 1.0)
            hflip=args.fliplr,
            vflip=args.flipud,
            hsv_h=args.hsv_h,  # HSV-Hue augmentation (fraction)
            hsv_s=args.hsv_s,  # HSV-Saturation augmentation (fraction)
            hsv_v=args.hsv_v,  # HSV-Value augmentation (fraction)
            mean=(0.0, 0.0, 0.0),  # IMAGENET_MEAN
            std=(1.0, 1.0, 1.0),  # IMAGENET_STD
            auto_aug=False) if augment else None

    def __getitem__(self, i):
        """Returns subset of data and targets corresponding to given indices."""
        f, j, fn, im = self.samples[i]  # filename, index, filename.with_suffix('.npy'), image
        if self.cache_ram and im is None:
            im = self.samples[i][3] = cv2.imread(f)
        elif self.cache_disk:
            if not fn.exists():  # load npy
                np.save(fn.as_posix(), cv2.imread(f), allow_pickle=False)
            im = np.load(fn)
        else:  # read image
            im = cv2.imread(f)  # BGR
        if self.album_transforms:
            sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))['image']
        else:
            sample = self.torch_transforms(im)
        return {'img': sample, 'cls': j}

    def __len__(self) -> int:
        """Return the total number of samples in the dataset."""
        return len(self.samples)

    def verify_images(self):
        """Verify all images in dataset."""
        desc = f'{self.prefix}Scanning {self.root}...'
        path = Path(self.root).with_suffix('.cache')  # *.cache file path

        with contextlib.suppress(FileNotFoundError, AssertionError, AttributeError):
            cache = load_dataset_cache_file(path)  # attempt to load a *.cache file
            assert cache['version'] == DATASET_CACHE_VERSION  # matches current version
            assert cache['hash'] == get_hash([x[0] for x in self.samples])  # identical hash
            nf, nc, n, samples = cache.pop('results')  # found, missing, empty, corrupt, total
            if LOCAL_RANK in (-1, 0):
                d = f'{desc} {nf} images, {nc} corrupt'
                TQDM(None, desc=d, total=n, initial=n)
                if cache['msgs']:
                    LOGGER.info('\n'.join(cache['msgs']))  # display warnings
            return samples

        # Run scan if *.cache retrieval failed
        nf, nc, msgs, samples, x = 0, 0, [], [], {}
        with ThreadPool(NUM_THREADS) as pool:
            results = pool.imap(func=verify_image, iterable=zip(self.samples, repeat(self.prefix)))
            pbar = TQDM(results, desc=desc, total=len(self.samples))
            for sample, nf_f, nc_f, msg in pbar:
                if nf_f:
                    samples.append(sample)
                if msg:
                    msgs.append(msg)
                nf += nf_f
                nc += nc_f
                pbar.desc = f'{desc} {nf} images, {nc} corrupt'
            pbar.close()
        if msgs:
            LOGGER.info('\n'.join(msgs))
        x['hash'] = get_hash([x[0] for x in self.samples])
        x['results'] = nf, nc, len(samples), samples
        x['msgs'] = msgs  # warnings
        save_dataset_cache_file(self.prefix, path, x)
        return samples


def load_dataset_cache_file(path):
    """Load an Ultralytics *.cache dictionary from path."""
    import gc
    gc.disable()  # reduce pickle load time https://github.com/ultralytics/ultralytics/pull/1585
    cache = np.load(str(path), allow_pickle=True).item()  # load dict
    gc.enable()
    return cache


def save_dataset_cache_file(prefix, path, x):
    """Save an Ultralytics dataset *.cache dictionary x to path."""
    x['version'] = DATASET_CACHE_VERSION  # add cache version
    if is_dir_writeable(path.parent):
        if path.exists():
            path.unlink()  # remove *.cache file if exists
        np.save(str(path), x)  # save cache for next time
        path.with_suffix('.cache.npy').rename(path)  # remove .npy suffix
        LOGGER.info(f'{prefix}New cache created: {path}')
    else:
        LOGGER.warning(f'{prefix}WARNING ⚠️ Cache directory {path.parent} is not writeable, cache not saved.')


# TODO: support semantic segmentation
class SemanticDataset(BaseDataset):
    """
    Semantic Segmentation Dataset.

    This class is responsible for handling datasets used for semantic segmentation tasks. It inherits functionalities
    from the BaseDataset class.

    Note:
        This class is currently a placeholder and needs to be populated with methods and attributes for supporting
        semantic segmentation tasks.
    """

    def __init__(self):
        """Initialize a SemanticDataset object."""
        super().__init__()


class llltDataset(YOLODataset):
    """Handles object detection tasks by loading annotations from a specified JSON file, supporting YOLO format."""

    def __init__(self, *args, task="detect", json_file, **kwargs):
        """Initializes a GroundingDataset for object detection, loading annotations from a specified JSON file."""
        assert task == "detect", "`GroundingDataset` only support `detect` task for now!"
        self.json_file = json_file
        # 读取kwargs中的mode参数
        self.isTest = kwargs.pop('isTest', False)  # 获取模式参数，默认为 'train'
        super().__init__(*args, data={}, **kwargs)
        self.img_sts, self.img_nds = [None] * self.ni, [None] * self.ni

    def get_labels(self):
        """
        从 JSON 文件加载注释，过滤并归一化每张图片的边界框。

        Returns:
            (List[dict]): 包含每张图片及其注释信息的字典列表。
        """
        self.im_files = []  # 初始化图片文件路径列表
        labels = []  # 初始化标签列表
        LOGGER.info("Loading annotation file...")  # 输出日志，表示开始加载注释文件
        with open(self.json_file) as f:  # 打开 JSON 文件
            annotations = json.load(f)  # 加载 JSON 文件内容为字典
        images = {f"{x['id']:d}": x for x in annotations["images"]}  # 创建图片 ID 到图片信息的映射字典
        # print(images)  # 可选：打印图片信息（调试用）
        img_to_anns = defaultdict(list)  # 初始化图片 ID 到注释列表的映射字典
        for ann in annotations["annotations"]:  # 遍历所有注释
            img_to_anns[ann["image_id"]].append(ann)  # 将注释按图片 ID 分组
        # LOGGER.info("img_to_anns: %s", img_to_anns)
        if self.isTest:  # 如果是测试模式
            # 将self.img_path中的train换成test
            self.img_path = self.img_path.replace('train', 'test')  # 替换图片路径中的 'train' 为 'test'
        for img_id, anns in TQDM(img_to_anns.items(), desc=f"Reading annotations {self.json_file}"):  # 遍历每张图片的注释

            img = images[f"{img_id:d}"]  # 获取图片信息
            h, w, f = img["height"], img["width"], img["file_name"]  # 提取图片的高度、宽度和文件名

            # LOGGER.info(f"image path: {self.img_path}")  # 输出图片路径
            im_file = Path(self.img_path) / f  # 构造图片的完整路径

            if not im_file.exists():  # 如果图片文件不存在
                LOGGER.info(f"Image {im_file} not found, skipping...")  # 输出日志，表示跳过该图片
                continue  # 跳过该图片
            self.im_files.append(str(im_file))  # 将图片文件路径添加到 `im_files` 列表
            bboxes = []  # 初始化边界框列表
            cat2id = {}  # 初始化类别到 ID 的映射字典（未使用）
            texts = []  # 初始化文本列表（未使用）
            for ann in anns:  # 遍历该图片的所有注释
                box = np.array(ann["bbox"], dtype=np.float32)  # 获取边界框并转换为 NumPy 数组
                # box[:2] += box[2:] / 2  # 将边界框的左上角坐标转换为中心点坐标
                box[0] = box[0] + box[2] / 2
                box[1] = box[1] + box[3] / 2
                box[[0, 2]] /= float(w)  # 将边界框的 x 坐标归一化到 [0, 1]
                box[[1, 3]] /= float(h)  # 将边界框的 y 坐标归一化到 [0, 1]
                if box[2] <= 0 or box[3] <= 0:  # 如果边界框的宽度或高度小于等于 0
                    continue  # 跳过该边界框
                cls = ann["category_id"]  # 获取类别 ID
                box = [cls] + box.tolist()  # 将类别 ID 添加到边界框的前面
                bboxes.append(box)  # 将边界框添加到边界框列表
            lb = np.array(bboxes, dtype=np.float32) if len(bboxes) else np.zeros(
                (0, 5), dtype=np.float32)  # 转换边界框列表为 NumPy 数组
            labels.append(  # 将图片的标签信息添加到标签列表
                dict(
                    im_file=im_file,  # 图片文件路径
                    shape=(h, w),  # 图片的高度和宽度
                    cls=lb[:, 0:1],  # 类别信息（n, 1）
                    bboxes=lb[:, 1:],  # 边界框信息（n, 4）
                    normalized=True,  # 标记边界框已归一化
                    bbox_format="xywh",  # 边界框格式为中心点坐标和宽高
                )
            )
        # LOGGER.info(f'print labels: {labels}')  # 输出标签列表（调试用）
        return labels  # 返回标签列表

    def build_transforms(self, hyp=None):
        """Configures augmentations for training with optional text loading; `hyp` adjusts augmentation intensity."""
        transforms = super().build_transforms(hyp)
        return transforms

    def load_image(self, i, rect_mode=True):
        """从数据集索引 i 加载1张图像，返回 (im, 原始高宽, 调整后高宽)"""
        im, ft, fn = self.ims[i], self.im_files[i], self.npy_files[i]
        # 从缓存里取已经加载的图像 im、当前索引对应的文件路径 ft 和 npy 文件路径 fn

        fll = Path(str(ft).replace('channel2', 'channel').replace('t.jpg', 'll.jpg'))
        # 构建另一张对应通道的图像路径 fll，通过字符串替换得到（通常是“低光照”图像）

        if im is None:  # 如果图像还没有缓存到内存中
            img_st = cv2.imread(str(ft), cv2.IMREAD_GRAYSCALE)
            # 读取原始灰度图（第1通道）
            img_nd = cv2.imread(str(fll), cv2.IMREAD_GRAYSCALE)
            # 读取低光照灰度图（第2通道）

            if img_st is None or img_nd is None:
                raise ValueError("Failed to read ft or fll image.")
                # 如果任何一张图像读取失败，就报错

            img_rd = cv2.addWeighted(img_st, 0.5, img_nd, 0.5, 0)
            # 将两张灰度图按权重0.5进行加权融合，生成第3通道（融合图）

            im = cv2.merge((img_st, img_nd, img_rd))
            # 将三张灰度图合并成一个3通道图像（RGB格式：st, nd, rd）

            if im is None:
                raise FileNotFoundError(f'Image Not Found {ft}')
                # 如果合成图像失败，抛出找不到图像的错误

            h0, w0 = im.shape[:2]  # 原始图像的高宽

            if rect_mode:  # 如果需要保持长边缩放
                r = self.imgsz / max(h0, w0)  # 缩放比例（让长边变为imgsz）
                if r != 1:  # 需要缩放
                    w, h = (min(math.ceil(w0 * r), self.imgsz), min(math.ceil(h0 * r), self.imgsz))
                    # 计算缩放后的宽和高，保持长宽比
                    im = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR)
                    # 按比例缩放图像
            elif not (h0 == w0 == self.imgsz):  # 如果不是保持长边模式，并且图像不是目标大小的正方形
                im = cv2.resize(im, (self.imgsz, self.imgsz), interpolation=cv2.INTER_LINEAR)
                # 将图像直接拉伸为目标大小的正方形

            # 如果是训练模式并开启了增强，图像将被缓存以避免重复读取
            if self.augment:
                self.ims[i], self.im_hw0[i], self.im_hw[i] = im, (h0, w0), im.shape[:2]
                # 缓存当前索引的图像、原始高宽和调整后高宽
                self.buffer.append(i)
                if len(self.buffer) >= self.max_buffer_length:
                    # 如果缓存超过最大容量，释放最早缓存的图像
                    j = self.buffer.pop(0)
                    self.ims[j], self.im_hw0[j], self.im_hw[j] = None, None, None

            return im, (h0, w0), im.shape[:2]
            # 返回图像、原始高宽、调整后的高宽

        return self.ims[i], self.im_hw0[i], self.im_hw[i]
        # 如果图像已经缓存，直接返回缓存的图像和高宽

    def close_mosaic(self, hyp):
        """Sets mosaic, copy_paste and mixup options to 0.0 and builds transformations."""
        hyp.mosaic = 0.0  # set mosaic ratio=0.0
        hyp.copy_paste = 0.0  # keep the same behavior as previous v8 close-mosaic
        hyp.mixup = 0.0  # keep the same behavior as previous v8 close-mosaic
        self.transforms = self.build_transforms(hyp)


# 单模态微光红外数据集加载
    """Handles object detection tasks by loading annotations from a specified JSON file, supporting YOLO format."""


class llltDataset_single(llltDataset):

    def load_image(self, i, rect_mode=True):
        """Loads 1 image from dataset index 'i', returns (im, resized hw)."""
        im, ft, fn = self.ims[i], self.im_files[i], self.npy_files[i]
        fll = Path(str(ft).replace('channel2', 'channel').replace('t.jpg', 'll.jpg'))
        # fuse = Path(str(ft).replace('channel2', 'seafusion').replace('t.jpg', '.jpg'))
        if im is None:  # not cached in RAM
            # img_st = cv2.imread(str(ft), cv2.IMREAD_GRAYSCALE)
            # img_nd = cv2.imread(str(fll), cv2.IMREAD_GRAYSCALE)
            # if img_st is None or img_nd is None:
            #     raise ValueError("Failed to read ft or fll image.")
            # img_rd = cv2.addWeighted(img_st, 0.5, img_nd, 0.5, 0)
            # im = cv2.imread(str(ft))  # 单模态红外
            im = cv2.imread(str(ft)) # 单模态微光
            # im = cv2.imread(str(fuse)) # 融合后影像
            if im is None:
                raise FileNotFoundError(f'Image Not Found {ft}')
            h0, w0 = im.shape[:2]  # orig hw
            if rect_mode:  # resize long side to imgsz while maintaining aspect ratio
                r = self.imgsz / max(h0, w0)  # ratio
                if r != 1:  # if sizes are not equal
                    w, h = (min(math.ceil(w0 * r), self.imgsz), min(math.ceil(h0 * r), self.imgsz))
                    im = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR)
            elif not (h0 == w0 == self.imgsz):  # resize by stretching image to square imgsz
                im = cv2.resize(im, (self.imgsz, self.imgsz), interpolation=cv2.INTER_LINEAR)

            # Add to buffer if training with augmentations
            if self.augment:
                self.ims[i], self.im_hw0[i], self.im_hw[i] = im, (h0, w0), im.shape[:2]  # im, hw_original, hw_resized
                self.buffer.append(i)
                if len(self.buffer) >= self.max_buffer_length:
                    j = self.buffer.pop(0)
                    self.ims[j], self.im_hw0[j], self.im_hw[j] = None, None, None

            return im, (h0, w0), im.shape[:2]

        return self.ims[i], self.im_hw0[i], self.im_hw[i]

class llltDataset_lif(llltDataset):

    def load_image(self, i, rect_mode=True):
        """Loads 1 image from dataset index 'i', returns (im, resized hw)."""
        img_st, img_nd, ft, fn = self.img_sts[i], self.img_nds[i], self.im_files[i], self.npy_files[i]
        fll = Path(str(ft).replace('channel2', 'channel').replace('t.jpg', 'll.jpg'))
        if img_st is None:  # not cached in RAM
            img_st = cv2.imread(str(ft))
            img_nd = cv2.imread(str(fll))
            # im = cv2.merge((img_st, img_nd))  # Merge the two images into a 6-channel image
            # if img_st is None or img_nd is None:
            #     raise ValueError("Failed to read ft or fll image.")
            # img_rd = cv2.addWeighted(img_st, 0.5, img_nd, 0.5, 0)
            # im = cv2.imread(str(fll))
            if img_st is None or img_nd is None:
                raise FileNotFoundError(f'Image Not Found {ft} and {fll}')
            # LOGGER.info(f'img_st shape: {img_st.shape}, img_nd shape: {img_nd.shape}')
            h0, w0 = img_st.shape[:2]  # orig hw
            if rect_mode:  # resize long side to imgsz while maintaining aspect ratio
                r = self.imgsz / max(h0, w0)  # ratio
                if r != 1:  # if sizes are not equal
                    w, h = (min(math.ceil(w0 * r), self.imgsz), min(math.ceil(h0 * r), self.imgsz))
                    img_st = cv2.resize(img_st, (w, h), interpolation=cv2.INTER_LINEAR)
                    img_nd = cv2.resize(img_nd, (w, h), interpolation=cv2.INTER_LINEAR)
            elif not (h0 == w0 == self.imgsz):  # resize by stretching image to square imgsz
                img_st = cv2.resize(img_st, (self.imgsz, self.imgsz), interpolation=cv2.INTER_LINEAR)
                img_nd = cv2.resize(img_nd, (self.imgsz, self.imgsz), interpolation=cv2.INTER_LINEAR)

            # 如果是训练模式并开启了增强，图像将被缓存以避免重复读取
            if self.augment:
                self.img_sts[i], self.img_nds[i], self.im_hw0[i], self.im_hw[i] = img_st, img_nd, (h0, w0), img_st.shape[:2]
                # 缓存当前索引的图像、原始高宽和调整后高宽
                self.buffer.append(i)
                if len(self.buffer) >= self.max_buffer_length:
                    # 如果缓存超过最大容量，释放最早缓存的图像
                    j = self.buffer.pop(0)
                    self.img_sts[i], self.img_nds[i], self.im_hw0[i], self.im_hw[i] = None, None, None, None

            return img_st, img_nd, (h0, w0), img_st.shape[:2]

        return self.img_sts[i], self.img_nds[i], self.im_hw0[i], self.im_hw[i] 

    def get_image_and_label(self, index):
        """Get and return label information from the dataset."""
        label = deepcopy(self.labels[index])  # requires deepcopy() https://github.com/ultralytics/ultralytics/pull/1948
        label.pop('shape', None)  # shape is for rect, remove it
        # LOGGER.info(label)
        img_st, img_nd, label['ori_shape'], label['resized_shape'] = self.load_image(index)  #这里还是初始图片的比例
        label['img'] = np.concatenate([img_st, img_nd], axis=2)
        label['ratio_pad'] = (label['resized_shape'][0] / label['ori_shape'][0],
                              label['resized_shape'][1] / label['ori_shape'][1])  # for evaluation
        # LOGGER.info(f'{label["img"].shape} ')
        if self.rect:
            label['rect_shape'] = self.batch_shapes[self.batch[index]]
        return self.update_labels_info(label)

    def build_transforms(self, hyp=None):
        """构建并添加数据增强和格式化的变换到列表中。"""
        if self.augment:  # 如果启用了数据增强
            hyp.mosaic = hyp.mosaic if self.augment and not self.rect else 0.0  # 如果不是矩形训练，启用 mosaic，否则禁用
            hyp.mixup = hyp.mixup if self.augment and not self.rect else 0.0  # 如果不是矩形训练，启用 mixup，否则禁用
            transforms = v8_multi_transforms(self, self.imgsz, hyp)  # 使用 v8_transforms 构建数据增强变换
            LOGGER.info("Using YOLOv8 data augmentation transforms.")  # 输出日志，表示使用了 YOLOv8 数据增强变换
        else:  # 如果未启用数据增强
            transforms = Compose([LetterBox6Channel(new_shape=(self.imgsz, self.imgsz), scaleup=False)])  # 使用 LetterBox 调整图片大小
            LOGGER.info("Just using LetterBox transform.")  # 输出日志，表示只使用了 LetterBox 变换

        # 添加格式化变换
        transforms.append(
            Format(bbox_format='xywh',  # 边界框格式为 xywh
                   normalize=True,  # 归一化边界框坐标
                   return_mask=self.use_segments,  # 是否返回分割掩码
                   return_keypoint=self.use_keypoints,  # 是否返回关键点
                   batch_idx=True,  # 是否返回批次索引
                   mask_ratio=hyp.mask_ratio,  # 掩码比例
                   mask_overlap=hyp.overlap_mask))  # 掩码重叠设置
        return transforms  # 返回构建的变换列表



class llltDataset_6ch(llltDataset):
# 多模态6通道微光红外数据集加载
    """Handles object detection tasks by loading annotations from a specified JSON file, supporting YOLO format."""

    def load_image(self, i, rect_mode=True):
        """Loads 1 image from dataset index 'i', returns (im, resized hw)."""
        img_st, img_nd, ft, fn = self.img_sts[i], self.img_nds[i], self.im_files[i], self.npy_files[i]
        fll = Path(str(ft).replace('channel2', 'channel').replace('t.jpg', 'll.jpg'))
        if img_st is None:  # not cached in RAM
            img_st = cv2.imread(str(ft))
            img_nd = cv2.imread(str(fll))
            # im = cv2.merge((img_st, img_nd))  # Merge the two images into a 6-channel image
            # if img_st is None or img_nd is None:
            #     raise ValueError("Failed to read ft or fll image.")
            # img_rd = cv2.addWeighted(img_st, 0.5, img_nd, 0.5, 0)
            # im = cv2.imread(str(fll))
            if img_st is None or img_nd is None:
                raise FileNotFoundError(f'Image Not Found {ft} and {fll}')
            # LOGGER.info(f'img_st shape: {img_st.shape}, img_nd shape: {img_nd.shape}')
            h0, w0 = img_st.shape[:2]  # orig hw
            if rect_mode:  # resize long side to imgsz while maintaining aspect ratio
                r = self.imgsz / max(h0, w0)  # ratio
                if r != 1:  # if sizes are not equal
                    w, h = (min(math.ceil(w0 * r), self.imgsz), min(math.ceil(h0 * r), self.imgsz))
                    img_st = cv2.resize(img_st, (w, h), interpolation=cv2.INTER_LINEAR)
                    img_nd = cv2.resize(img_nd, (w, h), interpolation=cv2.INTER_LINEAR)
            elif not (h0 == w0 == self.imgsz):  # resize by stretching image to square imgsz
                img_st = cv2.resize(img_st, (self.imgsz, self.imgsz), interpolation=cv2.INTER_LINEAR)
                img_nd = cv2.resize(img_nd, (self.imgsz, self.imgsz), interpolation=cv2.INTER_LINEAR)

            # 如果是训练模式并开启了增强，图像将被缓存以避免重复读取
            if self.augment:
                self.img_sts[i], self.img_nds[i], self.im_hw0[i], self.im_hw[i] = img_st, img_nd, (h0, w0), img_st.shape[:2]
                # 缓存当前索引的图像、原始高宽和调整后高宽
                self.buffer.append(i)
                if len(self.buffer) >= self.max_buffer_length:
                    # 如果缓存超过最大容量，释放最早缓存的图像
                    j = self.buffer.pop(0)
                    self.img_sts[i], self.img_nds[i], self.im_hw0[i], self.im_hw[i] = None, None, None, None

            return img_st, img_nd, (h0, w0), img_st.shape[:2]

        return self.img_sts[i], self.img_nds[i], self.im_hw0[i], self.im_hw[i]
    
    def __getitem__(self, index):
        """Returns transformed label information for given index."""
        # LOGGER.info("I' m in BaseDataset __getitem__: {}".format(index))
        label = self.transforms(self.get_image_and_label(index))
        return self.add_mask(label)
    
    def add_mask(self, label):
        """Add mask to label if available."""
        # LOGGER.info(f"label['img'] shape: {label['img'].shape}")
        img_st, img_nd = label['img'].split(3, dim=0)  # 分离图像的两个通道
        assert img_st.shape == img_nd.shape
        nl = label["bboxes"].shape[0]
        from ultralytics.utils import ops
        from PIL import Image
        labels_out = torch.zeros((nl, 6))
        if nl:
            # LOGGER.info(f"label['bboxes']: {label}")
            
            labels_out[:, 2:] = label["bboxes"] # 复制标签数据
            labels_out[:, 0] = label["cls"].squeeze(-1)  # 类别
        mask = torch.zeros(img_st.shape[1], img_st.shape[2]).numpy()
        mask_h, mask_w = np.shape(mask)
        if nl:
            # 将标签归一化坐标转换为像素xyxy坐标
            labels_per_img = ops.xywhn2xyxy(label["bboxes"], mask_w, mask_h)
            # 填充目标区域为255
            for boxp in labels_per_img:
                mask[int(boxp[1]):int(boxp[3])+1, int(boxp[0]):int(boxp[2])+1] = 255
            # LOGGER.info(f"label['masks']: {label['masks']}")
        pil_ds = lambda inputs, dsr: np.expand_dims(
                                np.array(
                                    Image.fromarray(inputs).resize((mask_w//dsr, mask_h//dsr), Image.NEAREST),
                                    inputs.dtype), 0)
        label['mask8'] = torch.from_numpy(np.ascontiguousarray(pil_ds(mask, 8)))
        # LOGGER.info(f"label['mask8']: {label['mask8'].shape}")
        label['mask16'] = torch.from_numpy(np.ascontiguousarray(pil_ds(mask, 16)))
        label['mask32'] = torch.from_numpy(np.ascontiguousarray(pil_ds(mask, 32)))
        label['labels_out'] = labels_out  # 添加标签输出
        return label  # 返回包含标签和掩码的字典

    def get_image_and_label(self, index):
        """Get and return label information from the dataset."""
        label = deepcopy(self.labels[index])  # requires deepcopy() https://github.com/ultralytics/ultralytics/pull/1948
        label.pop('shape', None)  # shape is for rect, remove it
        # LOGGER.info(label)
        img_st, img_nd, label['ori_shape'], label['resized_shape'] = self.load_image(index)  #这里还是初始图片的比例
        label['img'] = np.concatenate([img_st, img_nd], axis=2)
        label['ratio_pad'] = (label['resized_shape'][0] / label['ori_shape'][0],
                              label['resized_shape'][1] / label['ori_shape'][1])  # for evaluation
        # LOGGER.info(f'{label["img"].shape} ')
        if self.rect:
            label['rect_shape'] = self.batch_shapes[self.batch[index]]
        return self.update_labels_info(label)
    
    def build_transforms(self, hyp=None):
        """构建并添加数据增强和格式化的变换到列表中。"""
        if self.augment:  # 如果启用了数据增强
            hyp.mosaic = hyp.mosaic if self.augment and not self.rect else 0.0  # 如果不是矩形训练，启用 mosaic，否则禁用
            hyp.mixup = hyp.mixup if self.augment and not self.rect else 0.0  # 如果不是矩形训练，启用 mixup，否则禁用
            transforms = v8_multi_transforms(self, self.imgsz, hyp)  # 使用 v8_transforms 构建数据增强变换
            LOGGER.info("Using YOLOv8 data augmentation transforms.")  # 输出日志，表示使用了 YOLOv8 数据增强变换
        else:  # 如果未启用数据增强
            transforms = Compose([LetterBox6Channel(new_shape=(self.imgsz, self.imgsz), scaleup=False)])  # 使用 LetterBox 调整图片大小
            LOGGER.info("Just using LetterBox transform.")  # 输出日志，表示只使用了 LetterBox 变换

        # 添加格式化变换
        transforms.append(
            Format(bbox_format='xywh',  # 边界框格式为 xywh
                   normalize=True,  # 归一化边界框坐标
                   return_mask=self.use_segments,  # 是否返回分割掩码
                   return_keypoint=self.use_keypoints,  # 是否返回关键点
                   batch_idx=True,  # 是否返回批次索引
                   mask_ratio=hyp.mask_ratio,  # 掩码比例
                   mask_overlap=hyp.overlap_mask))  # 掩码重叠设置
        return transforms  # 返回构建的变换列表
    
    def close_mosaic(self, hyp):
        """Sets mosaic, copy_paste and mixup options to 0.0 and builds transformations."""
        hyp.mosaic = 0.0  # set mosaic ratio=0.0
        hyp.copy_paste = 0.0  # keep the same behavior as previous v8 close-mosaic
        hyp.mixup = 0.0  # keep the same behavior as previous v8 close-mosaic
        self.transforms = self.build_transforms(hyp)    

class MutliCOCODataset(llltDataset_6ch):
    """Handles object detection tasks by loading annotations from a specified JSON file, supporting YOLO format."""

    def load_image(self, i, rect_mode=True):
        """Loads 1 image from dataset index 'i', returns (im, resized hw)."""
        img_st, img_nd, ft, fn = self.img_sts[i], self.img_nds[i], self.im_files[i], self.npy_files[i]
        if img_st is None:  # not cached in RAM
            img_st = cv2.imread(str(ft))
            # img_nd = cv2.imread(str(ft))
            img_nd = np.zeros_like(img_st)
            # im = cv2.merge((img_st, img_nd))  # Merge the two images into a 6-channel image
            # if img_st is None or img_nd is None:
            #     raise ValueError("Failed to read ft or fll image.")
            # img_rd = cv2.addWeighted(img_st, 0.5, img_nd, 0.5, 0)
            # im = cv2.imread(str(fll))
            if img_st is None or img_nd is None:
                raise FileNotFoundError(f'Image Not Found {ft}')
            # LOGGER.info(f'img_st shape: {img_st.shape}, img_nd shape: {img_nd.shape}')
            h0, w0 = img_st.shape[:2]  # orig hw
            if rect_mode:  # resize long side to imgsz while maintaining aspect ratio
                r = self.imgsz / max(h0, w0)  # ratio
                if r != 1:  # if sizes are not equal
                    w, h = (min(math.ceil(w0 * r), self.imgsz), min(math.ceil(h0 * r), self.imgsz))
                    img_st = cv2.resize(img_st, (w, h), interpolation=cv2.INTER_LINEAR)
                    img_nd = cv2.resize(img_nd, (w, h), interpolation=cv2.INTER_LINEAR)
            elif not (h0 == w0 == self.imgsz):  # resize by stretching image to square imgsz
                img_st = cv2.resize(img_st, (self.imgsz, self.imgsz), interpolation=cv2.INTER_LINEAR)
                img_nd = cv2.resize(img_nd, (self.imgsz, self.imgsz), interpolation=cv2.INTER_LINEAR)

            # 如果是训练模式并开启了增强，图像将被缓存以避免重复读取
            if self.augment:
                self.img_sts[i], self.img_nds[i], self.im_hw0[i], self.im_hw[i] = img_st, img_nd, (h0, w0), img_st.shape[:2]
                # 缓存当前索引的图像、原始高宽和调整后高宽
                self.buffer.append(i)
                if len(self.buffer) >= self.max_buffer_length:
                    # 如果缓存超过最大容量，释放最早缓存的图像
                    j = self.buffer.pop(0)
                    self.img_sts[i], self.img_nds[i], self.im_hw0[i], self.im_hw[i] = None, None, None, None

            return img_st, img_nd, (h0, w0), img_st.shape[:2]

        return self.img_sts[i], self.img_nds[i], self.im_hw0[i], self.im_hw[i]

    def get_labels(self):
        """
        从 JSON 文件加载注释，过滤并归一化每张图片的边界框。

        Returns:
            (List[dict]): 包含每张图片及其注释信息的字典列表。
        """
        self.im_files = []  # 初始化图片文件路径列表
        labels = []  # 初始化标签列表
        LOGGER.info("Loading annotation file...")  # 输出日志，表示开始加载注释文件
        with open(self.json_file) as f:  # 打开 JSON 文件
            annotations = json.load(f)  # 加载 JSON 文件内容为字典
        images = {f"{x['id']:d}": x for x in annotations["images"]}  # 创建图片 ID 到图片信息的映射字典
        # print(images)  # 可选：打印图片信息（调试用）
        img_to_anns = defaultdict(list)  # 初始化图片 ID 到注释列表的映射字典
        for ann in annotations["annotations"]:  # 遍历所有注释
            img_to_anns[ann["image_id"]].append(ann)  # 将注释按图片 ID 分组
        # LOGGER.info("img_to_anns: %s", img_to_anns)
        if self.isTest:  # 如果是测试模式
            # 将self.img_path中的train换成test
            self.img_path = self.img_path.replace('train', 'test')  # 替换图片路径中的 'train' 为 'test'
        for img_id, anns in TQDM(img_to_anns.items(), desc=f"Reading annotations {self.json_file}"):  # 遍历每张图片的注释

            img = images[f"{img_id:d}"]  # 获取图片信息
            h, w, f = img["height"], img["width"], img["file_name"]  # 提取图片的高度、宽度和文件名

            # LOGGER.info(f"image path: {self.img_path}")  # 输出图片路径
            im_file = Path(self.img_path) / f  # 构造图片的完整路径

            if not im_file.exists():  # 如果图片文件不存在
                LOGGER.info(f"Image {im_file} not found, skipping...")  # 输出日志，表示跳过该图片
                continue  # 跳过该图片
            self.im_files.append(str(im_file))  # 将图片文件路径添加到 `im_files` 列表
            bboxes = []  # 初始化边界框列表
            cat2id = {}  # 初始化类别到 ID 的映射字典（未使用）
            texts = []  # 初始化文本列表（未使用）
            for ann in anns:  # 遍历该图片的所有注释
                box = np.array(ann["bbox"], dtype=np.float32)  # 获取边界框并转换为 NumPy 数组
                # box[:2] += box[2:] / 2  # 将边界框的左上角坐标转换为中心点坐标
                box[0] = box[0] + box[2] / 2
                box[1] = box[1] + box[3] / 2
                box[[0, 2]] /= float(w)  # 将边界框的 x 坐标归一化到 [0, 1]
                box[[1, 3]] /= float(h)  # 将边界框的 y 坐标归一化到 [0, 1]
                if box[2] <= 0 or box[3] <= 0:  # 如果边界框的宽度或高度小于等于 0
                    continue  # 跳过该边界框
                cls = ann["category_id"]  # 获取类别 ID
                box = [cls] + box.tolist()  # 将类别 ID 添加到边界框的前面
                bboxes.append(box)  # 将边界框添加到边界框列表
            lb = np.array(bboxes, dtype=np.float32) if len(bboxes) else np.zeros(
                (0, 5), dtype=np.float32)  # 转换边界框列表为 NumPy 数组
            labels.append(  # 将图片的标签信息添加到标签列表
                dict(
                    im_file=im_file,  # 图片文件路径
                    shape=(h, w),  # 图片的高度和宽度
                    cls=lb[:, 0:1],  # 类别信息（n, 1）
                    bboxes=lb[:, 1:],  # 边界框信息（n, 4）
                    normalized=True,  # 标记边界框已归一化
                    bbox_format="xywh",  # 边界框格式为中心点坐标和宽高
                )
            )
        # LOGGER.info(f'print labels: {labels}')  # 输出标签列表（调试用）
        return labels  # 返回标签列表


# 多模态6通道微光红外数据集加载
class llltDataset_2ch(llltDataset):
    """Handles object detection tasks by loading annotations from a specified JSON file, supporting YOLO format."""

    def load_image(self, i, rect_mode=True):
        """Loads 1 image from dataset index 'i', returns (im, resized hw)."""
        im, ft, fn = self.ims[i], self.im_files[i], self.npy_files[i]
        fll = Path(str(ft).replace('channel2', 'channel').replace('t.jpg', 'll.jpg'))
        if im is None:  # not cached in RAM
            img_st = cv2.imread(str(ft), cv2.IMREAD_GRAYSCALE)
            img_nd = cv2.imread(str(fll), cv2.IMREAD_GRAYSCALE)
            im = cv2.merge((img_st, img_nd))  # Merge the two images into a 6-channel image
            # if img_st is None or img_nd is None:
            #     raise ValueError("Failed to read ft or fll image.")
            # img_rd = cv2.addWeighted(img_st, 0.5, img_nd, 0.5, 0)
            # im = cv2.imread(str(fll))
            if im is None:
                raise FileNotFoundError(f'Image Not Found {ft}')
            h0, w0 = im.shape[:2]  # orig hw
            if rect_mode:  # resize long side to imgsz while maintaining aspect ratio
                r = self.imgsz / max(h0, w0)  # ratio
                if r != 1:  # if sizes are not equal
                    w, h = (min(math.ceil(w0 * r), self.imgsz), min(math.ceil(h0 * r), self.imgsz))
                    im = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR)
            elif not (h0 == w0 == self.imgsz):  # resize by stretching image to square imgsz
                im = cv2.resize(im, (self.imgsz, self.imgsz), interpolation=cv2.INTER_LINEAR)

            # Add to buffer if training with augmentations
            if self.augment:
                self.ims[i], self.im_hw0[i], self.im_hw[i] = im, (h0, w0), im.shape[:2]  # im, hw_original, hw_resized
                self.buffer.append(i)
                if len(self.buffer) >= self.max_buffer_length:
                    j = self.buffer.pop(0)
                    self.ims[j], self.im_hw0[j], self.im_hw[j] = None, None, None

            return im, (h0, w0), im.shape[:2]

        return self.ims[i], self.im_hw0[i], self.im_hw[i]
