"""
    Title

    author: wxz
    date: 2021-12-10
    github: https://github.com/xinzwang
"""

import glob
import os
from multiprocessing import Pool
from pathlib import Path
from itertools import repeat

import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm

from yolov5.utils.augmentations import letterbox
from yolov5.utils.datasets import img2label_paths, get_hash, verify_image_label, load_image
from yolov5.utils.general import LOGGER, xywhn2xyxy, xyxy2xywhn

# Parameters
HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo']  # acceptable image suffixes
VID_FORMATS = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv']  # acceptable video suffixes
WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1))  # DPP
NUM_THREADS = min(8, max(1, os.cpu_count() - 1))  # number of multiprocessing threads


def ImageLoaderHelper(path, img_size=640, batch_size=16, workers=8):
    # 从路径读取数据
    dataset = ImageSet(path, img_size, batch_size)
    batch_size = min(batch_size, len(dataset))
    num_workers = min(
        [os.cpu_count() // WORLD_SIZE, batch_size if batch_size > 1 else 0, workers])  # number of workers
    loader = DataLoader(dataset,
                        batch_size=batch_size,
                        shuffle=False,
                        num_workers=num_workers,
                        collate_fn=ImageSet.collate_fn)
    return loader, dataset


class ImageSet(Dataset):
    cache_version = 0.6

    def __init__(self, path='../datasets/coco/', img_size=640, batch_size=16):
        self.img_size = img_size
        self.batch_size = batch_size

        img_file = []
        # 加载所有图片的路径
        for p in path if isinstance(path, list) else [path]:
            p = Path(p)
            if p.is_dir():  # dir
                img_file += glob.glob(str(p / '**' / '*.*'), recursive=True)
            elif p.is_file():  # file    train.txt or val.txt
                with open(p) as t:
                    t = t.read().strip().splitlines()
                    parent = str(p.parent) + os.sep
                    # local to global path
                    for x in t:
                        if x.startswith('./'):
                            img_file += [x.replace('./', parent)]
                        else:
                            img_file += x
            else:
                raise Exception(f'{p} does not exist')
        # 替换分隔符
        self.img_files = sorted(x.replace('/', os.sep) for x in img_file if x.split('.')[-1].lower() in IMG_FORMATS)
        assert self.img_files, f'No images found'

        # 加载标签cache
        self.label_files = img2label_paths(self.img_files)
        cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache')  # label 缓存路径
        try:
            cache, exists = np.load(cache_path, allow_pickle=True).item(), True
            assert cache['version'] == self.cache_version  # same version
            assert cache['hash'] == get_hash(self.label_files + self.img_files)  # same hash
        except:
            print('\nCache dose note exist or has expired.')
            print('Trying to create new cache file ......')
            cache, exists = self.cache_labels(cache_path), False

        # 显示标签cache信息
        nf, nm, ne, nc, n = cache.pop('results')  # found, missing, empty, corrupted, total
        if exists:
            d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
            tqdm(None, desc=d, total=n, initial=n)  # display cache results
            if cache['msgs']:
                LOGGER.info('\n'.join(cache['msgs']))  # display warnings

        # 解析cache
        [cache.pop(k) for k in ('hash', 'version', 'msgs')]  # 删除cache摘要
        labels, shapes, self.segments = zip(*cache.values())
        self.labels = list(labels)
        self.shape = np.array(shapes, dtype=np.float64)
        self.img_files = list(cache.keys())
        self.label_files = img2label_paths(cache.keys())  # update
        n = len(shapes)  # number of images
        bi = np.floor(np.arange(n) / batch_size).astype(np.int32)  # batch index
        nb = bi[-1] + 1  # number of batches
        self.batch = bi  # batch index of image
        self.n = n
        self.indices = range(n)

        # 筛选标签    屏蔽不想要的类别
        include_class = []  # filter labels to include only these classes (optional)
        include_class_array = np.array(include_class).reshape(1, -1)
        for i, (label, segment) in enumerate(zip(self.labels, self.segments)):
            if include_class:
                j = (label[:, 0:1] == include_class_array).any(1)
                self.labels[i] = label[j]
                if segment:
                    self.segments[i] = segment[j]

        # 矩形推理    将图像的灰色边缘裁剪为矩形，加快训练、推理
        # 利于减小图像大小 加快推理
        # TODO-wxz: 举行推理，先不实现

        # 缓存图片
        self.imgs, self.img_npy = [None] * n, [None] * n  # self.imgs用于缓存图片
        # if cache_images:
        #     if cache_images == 'disk':
        #         pass
        #     elif cache_images == 'input':  # 图像缓存来自外部
        #         pass
        #     else:
        #         pass

        return

    def __len__(self):
        return len(self.img_files)

    def __getitem__(self, index):
        index = self.indices[index]

        # 加载图片
        img0, (h0, w0), (h, w) = load_image(self, index)

        # 图像尺寸调整
        shape = self.img_size
        img, ratio, pad = letterbox(img0, shape, auto=False, scaleup=False)
        shapes = (h0, w0), ((h / h0, w / w0), pad)  # for COCO mAP rescaling

        labels = self.labels[index].copy()
        if labels.size:  # xyhw(coco) -> xyxy(yolo)  将coco的xyhw标签格式转为yolo的xyxy格式
            labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])

        nl = len(labels)
        if nl:
            labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3)

        labels_out = torch.zeros((nl, 6))
        if nl:
            labels_out[:, 1:] = torch.from_numpy(labels)

        # 图像格式转换
        img = img.transpose((2, 0, 1))[::-1]  # HWC to CHW, BGR to RGB
        img = np.ascontiguousarray(img)

        return torch.from_numpy(img), labels_out, self.img_files[index], shapes, img0

    def cache_labels(self, path=Path('./labels.cache'), prefix=''):
        # Cache dataset labels, check images and read shapes
        x = {}  # dict
        nm, nf, ne, nc, msgs = 0, 0, 0, 0, []  # number missing, found, empty, corrupt, messages
        desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..."
        with Pool(NUM_THREADS) as pool:
            pbar = tqdm(pool.imap(verify_image_label, zip(self.img_files, self.label_files, repeat(prefix))),
                        desc=desc, total=len(self.img_files))
            for im_file, l, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar:
                nm += nm_f
                nf += nf_f
                ne += ne_f
                nc += nc_f
                if im_file:
                    x[im_file] = [l, shape, segments]
                if msg:
                    msgs.append(msg)
                pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupted"

        pbar.close()
        if msgs:
            LOGGER.info('\n'.join(msgs))
        if nf == 0:
            LOGGER.warning(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}')
        x['hash'] = get_hash(self.label_files + self.img_files)
        x['results'] = nf, nm, ne, nc, len(self.img_files)
        x['msgs'] = msgs  # warnings
        x['version'] = self.cache_version  # cache version
        try:
            np.save(path, x)  # save cache for next time
            path.with_suffix('.cache.npy').rename(path)  # remove .npy suffix
            LOGGER.info(f'{prefix}New cache created: {path}')
        except Exception as e:
            LOGGER.warning(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}')  # not writeable
        return x

    @staticmethod
    def collate_fn(batch):
        img, label, path, shapes, img0 = zip(*batch)  # transposed
        for i, l in enumerate(label):
            l[:, 0] = i  # add target image index for build_targets()
        return torch.stack(img, 0), torch.cat(label, 0), path, shapes, img0


if __name__ == '__main__':
    path = '../../datasets/coco/my_test.txt'
    # path = '../../datasets/coco/val2017.txt'

    dataloader, dataset = ImageLoaderHelper(path, img_size=640, batch_size=32)
    pbar = tqdm(dataloader)

    for batch_i, (im, targets, paths, shapes) in enumerate(pbar):
        print(batch_i)
        pass
