# Ultralytics YOLO 🚀, AGPL-3.0 license
import os
import random
from pathlib import Path

import numpy as np
import torch
from PIL import Image
from torch.utils.data import dataloader, distributed

from ultralytics.data.loaders import (LOADERS, LoadImages, LoadPilAndNumpy, LoadScreenshots, LoadStreams, LoadTensor,
                                      SourceTypes, autocast_list, LoadMultiImages)
from ultralytics.data.utils import IMG_FORMATS, VID_FORMATS
from ultralytics.utils import RANK, colorstr, LOGGER
from ultralytics.utils.checks import check_file


from .dataset import YOLODataset, llltDataset, llltDataset_6ch, llltDataset_single, MutliCOCODataset, llltDataset_lif
from .utils import PIN_MEMORY


class InfiniteDataLoader(dataloader.DataLoader):
    """
    Dataloader that reuses workers.

    Uses same syntax as vanilla DataLoader.
    """

    def __init__(self, *args, **kwargs):
        """Dataloader that infinitely recycles workers, inherits from DataLoader."""
        super().__init__(*args, **kwargs)
        object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
        self.iterator = super().__iter__()

    def __len__(self):
        """Returns the length of the batch sampler's sampler."""
        return len(self.batch_sampler.sampler)

    def __iter__(self):
        """Creates a sampler that repeats indefinitely."""
        for _ in range(len(self)):
            yield next(self.iterator)

    def reset(self):
        """
        Reset iterator.

        This is useful when we want to modify settings of dataset while training.
        """
        self.iterator = self._get_iterator()


class _RepeatSampler:
    """
    Sampler that repeats forever.

    Args:
        sampler (Dataset.sampler): The sampler to repeat.
    """

    def __init__(self, sampler):
        """Initializes an object that repeats a given sampler indefinitely."""
        self.sampler = sampler

    def __iter__(self):
        """Iterates over the 'sampler' and yields its contents."""
        while True:
            yield from iter(self.sampler)


def seed_worker(worker_id):  # noqa
    """Set dataloader worker seed https://pytorch.org/docs/stable/notes/randomness.html#dataloader."""
    worker_seed = torch.initial_seed() % 2 ** 32
    np.random.seed(worker_seed)
    random.seed(worker_seed)


def build_yolo_dataset(cfg, img_path, batch, data, mode='train', rect=False, stride=32):
    """Build YOLO Dataset."""
    return YOLODataset(
        img_path=img_path,
        imgsz=cfg.imgsz,
        batch_size=batch,
        augment=mode == 'train',  # augmentation
        hyp=cfg,  # TODO: probably add a get_hyps_from_cfg function
        rect=cfg.rect or rect,  # rectangular batches
        cache=cfg.cache or None,
        single_cls=cfg.single_cls or False,
        stride=int(stride),
        pad=0.0 if mode == 'train' else 0.5,
        prefix=colorstr(f'{mode}: '),
        use_segments=cfg.task == 'segment',
        use_keypoints=cfg.task == 'pose',
        classes=cfg.classes,
        data=data,
        fraction=cfg.fraction if mode == 'train' else 1.0)


def build_llltData(cfg, img_path, json_file, batch, mode="train", rect=False, stride=32, isTest=False):

    LOGGER.info(f"build attributes is {img_path}, {json_file}, {batch}, {mode}, {rect}, {stride}, {isTest}")

    """Build YOLO Dataset."""
    # single/multi
    # llltDataset_lif
    return MutliCOCODataset(
        img_path=img_path,
        json_file=json_file,
        imgsz=cfg.imgsz,
        batch_size=batch,
        augment=mode == "train",  # augmentation
        hyp=cfg,  # TODO: probably add a get_hyps_from_cfg function
        rect=cfg.rect or rect,  # rectangular batches
        cache=cfg.cache or None,
        single_cls=cfg.single_cls or False,
        stride=int(stride),
        pad=0.0 if mode == "train" else 0.5,
        prefix=colorstr(f"{mode}: "),
        classes=cfg.classes,
        fraction=cfg.fraction if mode == "train" else 1.0,
        isTest=isTest
    )

    # return llltDataset_single(
    #     img_path=img_path,
    #     json_file=json_file,
    #     imgsz=cfg.imgsz,
    #     batch_size=batch,
    #     augment=mode == "train",  # augmentation
    #     hyp=cfg,  # TODO: probably add a get_hyps_from_cfg function
    #     rect=cfg.rect or rect,  # rectangular batches
    #     cache=cfg.cache or None,
    #     single_cls=cfg.single_cls or False,
    #     stride=int(stride),
    #     pad=0.0 if mode == "train" else 0.5,
    #     prefix=colorstr(f"{mode}: "),
    #     classes=cfg.classes,
    #     fraction=cfg.fraction if mode == "train" else 1.0,
    #     isTest = isTest
    # )


def build_dataloader(dataset, batch, workers, shuffle=True, rank=-1):
    """Return an InfiniteDataLoader or DataLoader for training or validation set."""
    batch = min(batch, len(dataset))
    nd = torch.cuda.device_count()  # number of CUDA devices
    nw = min([os.cpu_count() // max(nd, 1), batch if batch > 1 else 0, workers])  # number of workers
    sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)
    generator = torch.Generator()
    generator.manual_seed(6148914691236517205 + RANK)
    return InfiniteDataLoader(dataset=dataset,
                              batch_size=batch,
                              shuffle=shuffle and sampler is None,
                              num_workers=nw,
                              sampler=sampler,
                              pin_memory=PIN_MEMORY,
                              collate_fn=getattr(dataset, 'collate_fn', None),
                              worker_init_fn=seed_worker,
                              generator=generator)


def check_source(source):
    """检查输入源类型并返回对应的标志值。"""
    # 初始化标志变量，分别表示是否为摄像头、屏幕截图、图像来源、内存来源、张量来源
    webcam, screenshot, from_img, in_memory, tensor = False, False, False, False, False

    if isinstance(source, (str, int, Path)):  # int 表示本地 USB 摄像头
        # 如果输入源是字符串、整数或路径对象
        source = str(source)  # 将输入源转换为字符串
        # 检查文件后缀是否属于图像或视频格式
        is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
        # 检查输入源是否为 URL（支持多种协议）
        is_url = source.lower().startswith(('https://', 'http://', 'rtsp://', 'rtmp://', 'tcp://'))
        # 判断是否为摄像头来源
        webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file)
        # 判断是否为屏幕截图来源
        screenshot = source.lower() == 'screen'
        # 如果是 URL 且是文件，则下载文件
        if is_url and is_file:
            source = check_file(source)  # 下载文件
    # 如果输入源属于加载器类型
    elif isinstance(source, LOADERS):
        in_memory = True  # 标记为内存来源
    # 如果输入源是列表或元组
    elif isinstance(source, (list, tuple)):
        source = autocast_list(source)  # 将列表中的所有元素转换为 PIL 或 numpy 数组
        from_img = True  # 标记为图像来源
    # 如果输入源是 PIL 图像或 numpy 数组
    elif isinstance(source, (Image.Image, np.ndarray)):
        from_img = True  # 标记为图像来源
    # 如果输入源是 PyTorch 张量
    elif isinstance(source, torch.Tensor):
        tensor = True  # 标记为张量来源
    # 如果输入源类型不支持，则抛出异常
    else:
        raise TypeError('不支持的图像类型。支持的类型请参考 https://docs.ultralytics.com/modes/predict')

    # 返回处理后的输入源以及各标志变量
    return source, webcam, screenshot, from_img, in_memory, tensor


def load_inference_source(source=None, imgsz=640, vid_stride=1, buffer=False, isMulti=False):
    """
    Loads an inference source for object detection and applies necessary transformations.

    Args:
        source (str, Path, Tensor, PIL.Image, np.ndarray): The input source for inference.
        imgsz (int, optional): The size of the image for inference. Default is 640.
        vid_stride (int, optional): The frame interval for video sources. Default is 1.
        buffer (bool, optional): Determined whether stream frames will be buffered. Default is False.

    Returns:
        dataset (Dataset): A dataset object for the specified input source.
    """
    source, webcam, screenshot, from_img, in_memory, tensor = check_source(source)
    source_type = source.source_type if in_memory else SourceTypes(webcam, screenshot, from_img, tensor)

    # Dataloader
    if tensor:
        dataset = LoadTensor(source)
    elif in_memory:
        dataset = source
    elif webcam:
        dataset = LoadStreams(source, imgsz=imgsz, vid_stride=vid_stride, buffer=buffer)
    elif screenshot:
        dataset = LoadScreenshots(source, imgsz=imgsz)
    elif from_img:
        dataset = LoadPilAndNumpy(source, imgsz=imgsz)
    else:
        if isMulti:
            dataset = LoadMultiImages(source, imgsz=imgsz, vid_stride=vid_stride)
        else:
            dataset = LoadImages(source, imgsz=imgsz, vid_stride=vid_stride)

    # Attach source types to the dataset
    setattr(dataset, 'source_type', source_type)

    return dataset


if __name__ == "__main__":
    from ultralytics.cfg import get_cfg
    from ultralytics.utils import DEFAULT_CFG
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("--data", type=str, default="lllt.yaml", help="data yaml file")
    parser.add_argument("--model", type=str, default="yolov8l.yaml", help="model yaml file")
    parser.add_argument("--weights", type=str, default="./weight/69M_best.pt", help="weights file")
    parser.add_argument("--epochs", type=int, default=100, help="epochs num")
    parser.add_argument("--batch", type=int, default=12, help="batch size") # 24G 16 50G 24 16G 12
    parser.add_argument("--workers", type=int, default=16, help="workers num")
    args = parser.parse_args()
    train_args = dict(
        model=args.weights,
        data=args.data,
        epochs=args.epochs,
        batch=args.batch,
        device=[2,3,4], 
        # half=True,
        # amp=False,
        # single_cls=False,
        # optimizer='auto',
        project='runs/train',
        name='exp_llt_train9_t_pretrain',
        exist_ok=True,
        # resume=True,
    )
    args = get_cfg(DEFAULT_CFG, train_args)
    dataset = build_llltData(cfg = args,
                    img_path="/data1/lkf24/data/NESR/train",
                    json_file="/data1/lkf24/data/NESR/annotations/train_div.json",
                    batch=2,
                    mode="train",
                    rect=False,
                    stride=32,
                    isTest=False)
    
    
