import glob
import random
import os
import numpy as np
from PIL import Image
import cv2
import math
import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from torch.utils.data import Dataset
import torchvision.transforms as transforms

augment_affine = True
augment_fliplr = True
augment_hsv = True


def showImage(img: np.ndarray, bboxes=None, mode=None, normalized=False):
    fig, ax = plt.subplots(1)
    ax.axis("off")
    cmap = plt.get_cmap("tab20b")
    colors = [cmap(i) for i in np.linspace(0, 1, 20)]
    ax.imshow(img)
    if bboxes is not None:
        bbox = np.copy(bboxes)
        if normalized:
            h, w, c = img.shape
            bbox[:, 0] = bbox[:, 0] * w
            bbox[:, 1] = bbox[:, 1] * h
            bbox[:, 2] = bbox[:, 2] * w
            bbox[:, 3] = bbox[:, 3] * h
        if mode == "xyxy":
            bbox[:, 2] = bbox[:, 2] - bbox[:, 0]
            bbox[:, 3] = bbox[:, 3] - bbox[:, 1]
        elif mode == "cxcywh":
            bbox[:, 0] = bbox[:, 0] - bbox[:, 2] / 2
            bbox[:, 1] = bbox[:, 1] - bbox[:, 3] / 2
        for bb in bbox:
            ecolor = random.sample(colors, 1)[0]
            patchBBox = patches.Rectangle((bb[0], bb[1]), bb[2], bb[3], linewidth=1, facecolor="none", edgecolor=ecolor)
            ax.add_patch(patchBBox)
    plt.show()


def pad_to_square(img: np.ndarray, pad_value=(127, 127, 127)):
    """
    Padding image to square. opencv is required
    """
    h, w, _ = img.shape
    dim_diff = abs(h - w)
    lt = dim_diff // 2
    rb = dim_diff - lt
    pad = (0, lt, 0, rb) if h <= w else (lt, 0, rb, 0) # Order: [left, top, right, bottom]
    img = cv2.copyMakeBorder(img, pad[1], pad[3], pad[0], pad[2], cv2.BORDER_CONSTANT, value=pad_value)
    return img, pad


def random_affine(img: np.ndarray, bboxes=(), degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1),
                  shear=(-4, 4), border_value=(127, 127, 127)):
    """
    Random affine transform using opencv on both image and bboxes
    Same function as torchvision.transforms.RandomAffine() but more powerful
    input bboxes require [class, x1, y1, x2, y2] in integer type
    """

    if bboxes is None:
        bboxes = []
    border = 0  # width of added border (optional)
    height = max(img.shape[0], img.shape[1]) + border * 2

    # Rotation and Scale
    R = np.eye(3)
    a = random.random() * (degrees[1] - degrees[0]) + degrees[0] # get random rotate angle
    s = random.random() * (scale[1] - scale[0]) + scale[0] # get random scale factor
    R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)

    # Translation
    T = np.eye(3)
    T[0, 2] = (random.random() * 2 - 1) * translate[0] * img.shape[0] + border  # x translation (pixels)
    T[1, 2] = (random.random() * 2 - 1) * translate[1] * img.shape[1] + border  # y translation (pixels)

    # Shear
    S = np.eye(3)
    S[0, 1] = math.tan((random.random() * (shear[1] - shear[0]) + shear[0]) * math.pi / 180)  # x shear (deg)
    S[1, 0] = math.tan((random.random() * (shear[1] - shear[0]) + shear[0]) * math.pi / 180)  # y shear (deg)

    M = S @ T @ R  # Combined rotation matrix. ORDER IS IMPORTANT HERE!!
    imw = cv2.warpPerspective(img, M, dsize=(height, height), flags=cv2.INTER_LINEAR,
                              borderValue=border_value)  # BGR order borderValue

    # Return warped points also
    if len(bboxes) > 0:
        n = bboxes.shape[0]
        points = bboxes[:, 1:5].copy()
        area0 = (points[:, 2] - points[:, 0]) * (points[:, 3] - points[:, 1])

        # warp points
        xy = np.ones((n * 4, 3))
        xy[:, :2] = points[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(n * 4, 2)  # x1y1, x2y2, x1y2, x2y1
        xy = (xy @ M.T)[:, :2].reshape(n, 8)

        # create new boxes
        x = xy[:, [0, 2, 4, 6]]
        y = xy[:, [1, 3, 5, 7]]
        xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T

        # apply angle-based reduction of bounding boxes
        radians = a * math.pi / 180
        reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
        x = (xy[:, 2] + xy[:, 0]) / 2
        y = (xy[:, 3] + xy[:, 1]) / 2
        w = (xy[:, 2] - xy[:, 0]) * reduction
        h = (xy[:, 3] - xy[:, 1]) * reduction
        xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T

        # reject warped points outside of image
        np.clip(xy, 0, height, out=xy)
        w = xy[:, 2] - xy[:, 0]
        h = xy[:, 3] - xy[:, 1]
        area = w * h
        ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16))
        i = (w > 4) & (h > 4) & (area / (area0 + 1e-16) > 0.1) & (ar < 10)

        bboxes = bboxes[i]
        bboxes[:, 1:5] = xy[i]

    return imw, bboxes


def fliplr(image: np.ndarray, bboxes: np.ndarray):
    """
    Horizontal flip using opencv on both image and bboxes
    input bboxes require [class, x1, y1, x2, y2] in integer type
    """
    image = cv2.flip(image, 1)
    bbox = np.copy(bboxes)
    bbox[:, 1] = image.shape[1] - bboxes[:, 3]
    bbox[:, 3] = image.shape[1] - bboxes[:, 1]
    return image, bbox


class LoadImagesAndVideos(object):
    def __init__(self, path, img_size=416):
        self.img_size = img_size
        img_formats = ['.jpg', '.jpeg', '.png', '.tif']
        vid_formats = ['.mov', '.avi', '.mp4']
        files = []
        if os.path.isdir(path):
            files = sorted(glob.glob('%s/*.*' % path))
        elif os.path.isfile(path):
            files = [path]

        images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
        videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
        nI, nV = len(images), len(videos)

        self.files = images + videos
        self.nF = nI + nV  # number of files
        self.video_flag = [False] * nI + [True] * nV
        self.mode = 'images'
        if any(videos):
            self.new_video(videos[0])  # new video
        else:
            self.cap = None
        assert self.nF > 0, 'No images or videos found in ' + path
        cv2.setNumThreads(0)
        cv2.ocl.setUseOpenCL(False)

    def __iter__(self):
        self.count = 0
        return self

    def __len__(self):
        return self.nF  # number of files

    def __next__(self):
        if self.count == self.nF:
            raise StopIteration
        path = self.files[self.count]

        if self.video_flag[self.count]:
            # Read video
            self.mode = 'video'
            ret_val, image0 = self.cap.read()
            if not ret_val:
                self.count += 1
                self.cap.release()
                if self.count == self.nF:  # last video
                    raise StopIteration
                else:
                    path = self.files[self.count]
                    self.new_video(path)
                    ret_val, image0 = self.cap.read() # BGR

            self.frame += 1
            print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nF, self.frame, self.nframes, path), end='')

        else:
            # Read image
            self.count += 1
            image0 = cv2.imread(path, cv2.IMREAD_COLOR)  # BGR
            print('image %g/%g %s: ' % (self.count, self.nF, path), end='')

        # Padding image to square
        image, pad = pad_to_square(image0)
        padded_h, padded_w, _ = image.shape
        image = cv2.resize(image, dsize=(self.img_size, self.img_size), interpolation=cv2.INTER_LINEAR)
        # Convert to PIL image for transformation using torchvision
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        img = Image.fromarray(image)
        tf = transforms.Compose([transforms.ToTensor()])
        img = tf(img)
        img = img.unsqueeze(0)
        return path, img, image0, self.cap

    def new_video(self, path):
        self.frame = 0
        self.cap = cv2.VideoCapture(path)
        self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))


class YoloDataset(Dataset):
    def __init__(self, list_path, img_size=416, augment=True, multiscale=True, normalized_labels=True):
        with open(list_path, "r") as file:
            self.img_files = file.readlines()
        # reference label file name from image file name
        self.label_files = [path.replace("images", "labels").replace(".png", ".txt").replace(".jpg", ".txt").replace(".jpeg", ".txt")
                            for path in self.img_files]
        self.img_size = img_size
        self.min_size = self.img_size - 3 * 32
        self.max_size = self.img_size + 3 * 32
        self.max_objects = 100
        self.augment = augment
        self.multiscale = multiscale
        self.normalized_labels = normalized_labels
        self.batch_count = 0
        cv2.setNumThreads(0)
        cv2.ocl.setUseOpenCL(False)

    def __getitem__(self, index):
        """
        Get each image and annotation including augments
        """

        # Read original image
        img_path = self.img_files[index % len(self.img_files)].strip()
        image = cv2.imread(img_path, cv2.IMREAD_COLOR) # BGR
        h, w, c = image.shape
        h_factor, w_factor = (h, w) if self.normalized_labels else (1, 1)

        # Read labels
        label_path = self.label_files[index % len(self.img_files)].strip()
        boxes = np.loadtxt(label_path).reshape(-1, 5)
        # Extract coordinates for unpadded + unscaled image
        x1 = w_factor * (boxes[:, 1] - boxes[:, 3] / 2)
        y1 = h_factor * (boxes[:, 2] - boxes[:, 4] / 2)
        x2 = w_factor * (boxes[:, 1] + boxes[:, 3] / 2)
        y2 = h_factor * (boxes[:, 2] + boxes[:, 4] / 2)

        # HSV augment
        if self.augment and augment_hsv:
            fraction = 0.50
            img_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)    # hue, sat, val
            S = img_hsv[:, :, 1].astype(np.float32)
            V = img_hsv[:, :, 2].astype(np.float32)
            a = (random.random() * 2 - 1) * fraction + 1
            b = (random.random() * 2 - 1) * fraction + 1
            S *= a
            V *= b
            img_hsv[:, :, 1] = S if a < 1 else S.clip(None, 255)
            img_hsv[:, :, 2] = V if b < 1 else V.clip(None, 255)
            cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=image)

        # Padding image to square
        image, pad = pad_to_square(image)
        padded_h, padded_w, _ = image.shape
        image = cv2.resize(image, dsize=(self.img_size, self.img_size), interpolation=cv2.INTER_LINEAR)
        # Adjust for padding and scale
        boxes[:, 1] = (x1 + pad[0]) * self.img_size / padded_w
        boxes[:, 2] = (y1 + pad[1]) * self.img_size / padded_h
        boxes[:, 3] = (x2 + pad[0]) * self.img_size / padded_w
        boxes[:, 4] = (y2 + pad[1]) * self.img_size / padded_h

        # Apply augmentations
        if self.augment and augment_affine:
            image, boxes = random_affine(image, bboxes=boxes, degrees=(-2, 2),
                                         translate=(0.05, 0.05), scale=(0.90, 1.10), shear=(-1, 1),
                                         border_value=(127, 127, 127))
        if self.augment and augment_fliplr:
            if np.random.random() < 0.5:
                image, boxes = fliplr(image, boxes)

        # Convert to PIL image for transformation using torchvision
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        img = Image.fromarray(image)

        tf = transforms.Compose([transforms.RandomGrayscale(0.1),
                                 transforms.ToTensor()])
        img = tf(img)

        # convert bboxes to normalized [cx, cy, w, h]
        x1 = boxes[:, 1].copy()
        y1 = boxes[:, 2].copy()
        x2 = boxes[:, 3].copy()
        y2 = boxes[:, 4].copy()
        boxes[:, 1] = ((x1 + x2) / 2) / self.img_size
        boxes[:, 2] = ((y1 + y2) / 2) / self.img_size
        boxes[:, 3] = (x2 - x1 + 1) / self.img_size
        boxes[:, 4] = (y2 - y1 + 1) / self.img_size

        targets = torch.zeros((len(boxes), 6))
        targets[:, 1:] = torch.from_numpy(boxes) # targets = [batch_idx, class, cx, cy, w, h]

        return img, targets

    def __len__(self):
        return len(self.img_files)

    def collate_fn(self, batch):
        """
        数据收集函数，由DataLoader自动调用
        """
        imgs, targets = list(zip(*batch))
        for i, boxes in enumerate(targets):
            boxes[:, 0] = i # 更改label在该batch中所属的idx
        targets = torch.cat(targets, 0) # 标签在最高维堆叠
        # # 每100个batch就重新选择一个尺寸（多尺度输入，由于YOLO为全卷积网络，输入尺寸的改变不影响网络结构与参数）
        if self.multiscale and self.batch_count % 10 == 0:
            self.img_size = random.choice(range(self.min_size, self.max_size + 1, 32))
        imgs = torch.stack(imgs, 0)
        self.batch_count += 1
        # print(f"batch_count = {self.batch_count}")
        return imgs, targets


