import glob
import math
import os
import random
from pathlib import Path

import cv2
import torch
from torch.utils.data import Dataset
from torchvision.transforms import transforms
from imutils import paths
import numpy as np
from PIL import Image
from imgaug import augmenters as iaa  # 引入数据增强的包
from utils.torch_utils import cv2ToTorch


class LoadImages:  # for inference
    def __init__(self, path, img_size=640):
        self.img_size = img_size
        self.img_files = list(paths.list_images(path))

    def __iter__(self):
        self.count = 0
        return self

    def __next__(self):
        if self.count == len(self.img_files):
            raise StopIteration
        path = self.img_files[self.count]
        img = cv2.imread(path)
        img = cv2.resize(img, (self.img_size[1], self.img_size[0]))  # 需要和load_image中的cv2.resize 一样

        img = cv2ToTorch(img)
        self.count += 1
        return img, path

    def __len__(self):
        return self.img_files.__len__()  # number of files


def create_dataloader(path, opt, hyp, data_dict, isTrain=True):
    dataset = LoadImagesAndLabels(path, opt, hyp, data_dict, isTrain)
    batch_size = min(2,opt.batch_size, len(dataset))
    workers = opt.workers if hasattr(opt, "workers") else 8
    nw = min([os.cpu_count() // 1, batch_size if batch_size > 1 else 0, workers])  # number of workers

    # 当计算机的内存充足的时候，可以设置pin_memory=True 意味着生成的Tensor数据最开始是属于内存中的锁页内存，这样将内存的Tensor转义到GPU的显存就会更快一些
    # pytorch开发者不能确保每一个炼丹玩家都有高端设备，因此pin_memory默认为False。
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=batch_size,
                                             num_workers=nw,
                                             pin_memory=isTrain,
                                             shuffle=isTrain,
                                             drop_last=True)
    return dataloader, dataset


class LoadImagesAndLabels(Dataset):

    def __init__(self, path, opt, hyp, data_dict, isTrain):
        super(LoadImagesAndLabels, self).__init__()
        self.img_files = []  # 图片路径
        self.labels = []  # labels

        self.opt = opt
        self.hyp = hyp
        self.isTrain = isTrain

        for i, name in enumerate(data_dict["names"]):
            namePath = os.path.join(path, name)
            nameImgFiles = list(paths.list_images(namePath))
            self.img_files.extend(nameImgFiles)
            self.labels.extend([i] * len(nameImgFiles))

        self.n = len(self.img_files)  # number of images

    def __getitem__(self, index):

        img, path = load_image(self, index)
        if self.isTrain:
            img, ratio, pad = letterbox(img, self.opt.img_size, auto=False, scaleFill=True)

            if random.random() < self.hyp["cutout_r"]:
                img = cutout(img)
            if random.random() < self.hyp["iaa_r"]:
                img = imgaug_augmenters(img, self.hyp)
            if random.random() < self.hyp['perspective_r']:
                img = random_perspective(img,
                                         degrees=self.hyp['degrees'],
                                         translate=self.hyp['translate'],
                                         scale=self.hyp['scale'],
                                         shear=self.hyp['shear'],
                                         perspective=self.hyp['perspective'])
            # Augment colorspace
            if random.random() < self.hyp['hsv_r']:
                augment_hsv(img, hgain=self.hyp['hsv_h'], sgain=self.hyp['hsv_s'], vgain=self.hyp['hsv_v'])
            # flip up-down 上下翻转
            if random.random() < self.hyp['flipud']:
                img = np.flipud(img)
            # flip left-right 左右翻转
            if random.random() < self.hyp['fliplr']:
                img = np.fliplr(img)

        # Convert
        img = cv2ToTorch(img)

        return img, self.labels[index], path

    def __len__(self):
        return self.n


def mixup_data(x, y, alpha=1.0, device="cpu"):
    '''Compute the mixup data. Return mixed inputs, pairs of targets, and lambda'''
    if alpha > 0.:
        lam = np.random.beta(alpha, alpha)
    else:
        lam = 1.
    batch_size = x.size()[0]
    if str(device) != "cpu":
        index = torch.randperm(batch_size).to(device)
    else:
        index = torch.randperm(batch_size)

    mixed_x = lam * x + (1 - lam) * x[index, :]
    y_a, y_b = y, y[index]
    return mixed_x, y_a, y_b, lam


def mixup_criterion(y_a, y_b, lam):
    return lambda criterion, pred: lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)

def cutout(img,num_holes=1):
    """
    其思想也很简单，就是对训练图像进行随机遮挡，该方法激励神经网络在决策时能够更多考虑次要特征，而不是主要依赖于很少的主要特征，如下图所示：
    Randomly mask out one or more patches from an image.
    """
    h,w,_ = img.shape

    scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16
    s = random.choice(scales)
    for _ in range(num_holes):
        y = np.random.randint(h)
        x = np.random.randint(w)

        mask_h = random.randint(1, int(h * s))
        mask_w = random.randint(1, int(w * s))

        y1 = np.clip(max(0, y - mask_h // 2), 0, h)
        y2 = np.clip(max(0, y + mask_h // 2), 0, h)
        x1 = np.clip(max(0, x - mask_w // 2), 0, w)
        x2 = np.clip(max(0, x + mask_w // 2), 0, w)

        # apply random color mask
        img[y1: y2, x1: x2,:] = [random.randint(64, 191) for _ in range(3)]
    return img


# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, index):
    # loads 1 image from dataset, returns img, original hw, resized hw
    path = self.img_files[index]
    img = cv2.imread(path)

    if self.isTrain:
        h0, w0 = img.shape[:2]  # origin hw
        rh0 = self.opt.img_size[0] / h0  # resize image to img_size
        rw0 = self.opt.img_size[1] / w0  # resize image to img_size
        if rw0 != 1 and rh0 != 1:  # always resize down, only resize up if training with augmentation
            # INTER_NEAREST	最近邻插值
            # cv2.INTER_LINEAR  双线性插值（默认设置）
            # cv2.INTER_AREA 使用像素区域关系进行重采样。 它可能是图像抽取的首选方法，因为它会产生无云纹理的结果。 但是当图像缩放时，它类似于INTER_NEAREST方法。
            img = cv2.resize(img, (int(w0 * rw0), int(h0 * rh0)), interpolation=cv2.INTER_LINEAR)
    else:
        img = cv2.resize(img, (self.opt.img_size[1], self.opt.img_size[0]))
    return img, path


def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
    r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1  # random gains
    hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
    dtype = img.dtype  # uint8

    x = np.arange(0, 256, dtype=np.int16)
    lut_hue = ((x * r[0]) % 180).astype(dtype)
    lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
    lut_val = np.clip(x * r[2], 0, 255).astype(dtype)

    img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
    cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img)  # no return needed

    # Histogram equalization
    # if random.random() < 0.2:
    #     for i in range(3):
    #         img[:, :, i] = cv2.equalizeHist(img[:, :, i])


def imgaug_augmenters(img, hyp):
    """imgaug库增强"""
    seq = iaa.Sequential([
        # Sometimes是指指针对50%的图片做处理
        iaa.Sometimes(
            hyp["iaa_Gaussian_r"],
            # 高斯模糊
            iaa.GaussianBlur(sigma=(hyp["iaa_Gaussian_sigma_1"], hyp["iaa_Gaussian_sigma_2"]))
        ),
        # 使用随机组合上面的数据增强来处理图片
    ], random_order=True)
    images_aug = seq.augment_image(img)  # 是处理多张图片augment_images
    return images_aug


def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
    # 将图片修改为长宽都能被32整除的形状
    # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
    shape = img.shape[:2]  # current shape [height, width]
    if isinstance(new_shape, int):
        new_shape = (new_shape, new_shape)

    # Scale ratio (new / old)
    r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])  # 取长宽比率的最小值
    if not scaleup:  # only scale down, do not scale up (for better test mAP)
        r = min(r, 1.0)  # 图片小就放大 scaleup为False那么图片小不放大

    # Compute padding
    ratio = r, r  # width, height ratios
    new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))  # 新图片大小，还没进行pad的时候的
    dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding
    if auto:  # minimum rectangle
        dw, dh = np.mod(dw, 64), np.mod(dh, 64)  # wh padding
    elif scaleFill:  # stretch
        dw, dh = 0.0, 0.0
        new_unpad = (new_shape[1], new_shape[0])
        ratio = new_shape[1] / shape[1], new_shape[0] / shape[0]  # width, height ratios

    dw /= 2  # divide padding into 2 sides
    dh /= 2

    if shape[::-1] != new_unpad:  # resize
        img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
    top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
    left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
    img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border
    return img, ratio, (dw, dh)


def random_perspective(img, degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)):
    # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
    # targets = [cls, xyxy]

    height = img.shape[0] + border[0] * 2  # shape(h,w,c)
    width = img.shape[1] + border[1] * 2

    # Center
    C = np.eye(3)
    C[0, 2] = -img.shape[1] / 2  # x translation (pixels)
    C[1, 2] = -img.shape[0] / 2  # y translation (pixels)

    # Perspective 透视图
    P = np.eye(3)
    P[2, 0] = random.uniform(-perspective, perspective)  # x perspective (about y)
    P[2, 1] = random.uniform(-perspective, perspective)  # y perspective (about x)

    # Rotation and Scale 旋转和缩放
    R = np.eye(3)
    a = random.uniform(-degrees, degrees)
    # a += random.choice([-180, -90, 0, 90])  # add 90deg rotations to small rotations
    s = random.uniform(1 - scale, 1 + scale)
    # s = 2 ** random.uniform(-scale, scale)
    R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)

    # Shear 剪切
    S = np.eye(3)
    S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180)  # x shear (deg)
    S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180)  # y shear (deg)

    # Translation
    T = np.eye(3)
    T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width  # x translation (pixels)
    T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height  # y translation (pixels)

    # Combined rotation matrix
    M = T @ S @ R @ P @ C  # order of operations (right to left) is IMPORTANT
    if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any():  # image changed
        if perspective:
            img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
        else:  # affine
            img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))

    # Visualize
    # import matplotlib.pyplot as plt
    # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
    # ax[0].imshow(img[:, :, ::-1])  # base
    # ax[1].imshow(img2[:, :, ::-1])  # warped

    return img
