import math
import os
import random
import shutil
import xml.etree.ElementTree as ET
from glob import glob

import cv2
import numpy as np
try:
    from xml_resolve import generateXML
except:
    from .xml_resolve import generateXML

def loadXML(xmlpath):
    with open(xmlpath) as fp:
        tree = ET.parse(fp)
        root = tree.getroot()
    filename = root.find('filename').text
    size = root.find('size')
    w = int(size.find('width').text)
    h = int(size.find('height').text)

    bboxes = []
    labels = []
    for obj in root.findall('object'):
        label = obj.find('name').text
        bnd_box = obj.find('bndbox')
        bbox = [
            int(bnd_box.find('xmin').text),
            int(bnd_box.find('ymin').text),
            int(bnd_box.find('xmax').text),
            int(bnd_box.find('ymax').text)]
        bboxes.append(bbox)
        labels.append(label)

    xmlInfo = {"filename": filename, "width": w, "height": h, "bboxes": bboxes,
               "labels": labels, "tree": tree, "root": root}
    return xmlInfo


def avgWH(bboxes):
    W = H = 0
    for box in bboxes:
        W += box[2] - box[0]
        H += box[3] - box[1]
    num = len(bboxes)
    return W / num, H / num


def load(xmlpath):
    dirname = os.path.dirname(xmlpath)
    xmlInfo = loadXML(xmlpath)
    imgName = xmlInfo["filename"]
    img = cv2.imread("%s/%s" % (dirname, imgName))
    labels = xmlInfo["labels"]
    bboxes = xmlInfo["bboxes"]
    return img, labels, bboxes


def sample_similar_image(benchXml, referXmls, scalethr=1.2):
    # 给定基准图片（xml），从参考图片中寻找平均sku宽高均在阈值范围内的图片加入返回列表
    bboxes = load(benchXml)[-1]
    benchW, benchH = avgWH(bboxes)
    samples4 = [benchXml]
    for ref in referXmls:
        bboxes = load(ref)[-1]
        refW, refH = avgWH(bboxes)
        sample = max(refH, benchH) / min(refH, benchH) > scalethr or \
                 max(refW, benchW) / min(refW, benchW) > scalethr
        if sample:
            samples4.append(ref)
        if len(samples4) >= 4:
            return samples4
    return samples4 * 4

def createMosaicAffineData(srcpath, destpath, mosaic_size=800, scalethr=1.2, affine_prob=0.2, areathr=0.4,
                           rotate_degree=10):
    # srcpath 给定xml文件夹路径，**需要包含对应的图片**
    # 离线生成mosaic增广的voc格式数据集
    # 做了优化：四张图片中sku平均宽高在一个阈值范围内才能拼接
    if os.path.exists(destpath):
        shutil.rmtree(destpath)
    os.makedirs(destpath)

    s = mosaic_size
    mosaic_border = [-s // 2, -s // 2]

    xmlFiles = glob("%s/*.xml" % srcpath)

    tbar = enumerate(xmlFiles)

    for j, xml in tbar:
        print(f"[{j + 1}/{len(xmlFiles)}] {xml}")

        bboxes4 = []
        labels4 = []
        oribboxes = []
        orilabels = []
        yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in mosaic_border]  # mosaic center x, y

        # 随机采样另外3张待拼接图像
        # samples = [xml] + random.sample(xmlFiles, 3)
        samples = sample_similar_image(xml, xmlFiles, scalethr)

        filename = os.path.basename(xml).replace(".xml", "")
        for path in samples[1:]:
            filename += "-%s" % os.path.basename(path).replace(".xml", "")

        for i, xml_s in enumerate(samples):
            img, labels, bboxes = load(xml_s)
            oribboxes.extend(bboxes)
            bboxes = np.array(bboxes)
            h, w = img.shape[:2]
            # place img in img4
            if i == 0:  # top left
                img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8)  # base image with 4 tiles
                x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc  # xmin, ymin, xmax, ymax (large image)
                x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h  # xmin, ymin, xmax, ymax (small image)
            elif i == 1:  # top right
                x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
                x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
            elif i == 2:  # bottom left
                x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
                x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
            elif i == 3:  # bottom right
                x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
                x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)

            img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b]  # img4[ymin:ymax, xmin:xmax]
            padw = x1a - x1b
            padh = y1a - y1b
            bboxes_new = bboxes.copy()
            bboxes_new[:, 0] = bboxes[:, 0] + padw
            bboxes_new[:, 1] = bboxes[:, 1] + padh
            bboxes_new[:, 2] = bboxes[:, 2] + padw
            bboxes_new[:, 3] = bboxes[:, 3] + padh
            bboxes_new = np.clip(bboxes_new, 0, 2 * s)
            bboxes4.extend(bboxes_new.tolist())
            labels4.extend(labels)
            orilabels.extend(labels)

        if random.random() < affine_prob:
            img4, bboxes4, labels4, oribboxes = random_affine(img4, bboxes4, labels4, oribboxes,
                                                              degrees=rotate_degree, translate=.1, scale=.1, shear=10,
                                                              border=(0, 0))

        # if len(oribboxes) != len(bboxes4):
        #     print(len(oribboxes), len(bboxes4), len(labels4))

        bboxes4, labels4 = filterLabelsByArea(oribboxes, bboxes4, labels4, areathr)

        imgName = "%s.jpg" % filename
        imgPath = "%s/%s" % (destpath, imgName)
        xmlPath = "%s/%s.xml" % (destpath, filename)
        cv2.imwrite(imgPath, img4)
        generateXML(xmlPath, imgName, s * 2, s * 2, bboxes4, labels4)

    print(f"[{i + 1}/{len(xmlFiles)}] All done.")


def filterLabelsByArea(bboxes1, bboxes2, labels2, areathr=0.003):
    # bboxes1是初始sku边框列表或数组，bboxes2是变化之后的边框
    # labels2对应变化后的标签
    # 根据变化后bboxes的面积比是否小于阈值，小于则去除，反之保留
    # 返回列表
    if isinstance(bboxes1, list):
        bboxes1 = np.array(bboxes1)
    if isinstance(bboxes2, list):
        bboxes2 = np.array(bboxes2)
    area1 = (bboxes1[:, 2] - bboxes1[:, 0]) * (bboxes1[:, 3] - bboxes1[:, 1])
    area2 = (bboxes2[:, 2] - bboxes2[:, 0]) * (bboxes2[:, 3] - bboxes2[:, 1])
    keepIndices = area2 >= (area1 * areathr)
    bboxes = []
    labels = []
    for i, k in enumerate(keepIndices):
        if k:
            bboxes.append(bboxes2[i])
            labels.append(labels2[i])
    return bboxes, labels


def random_affine(img, bboxes, labels, oribboxes, degrees=10, translate=.1, scale=.1, shear=10, border=(0, 0)):
    # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
    # https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4

    if isinstance(bboxes, list):
        bboxes = np.array(bboxes)
    if isinstance(oribboxes, list):
        oribboxes = np.array(oribboxes)

    height = img.shape[0] + border[0] * 2  # shape(h,w,c)
    width = img.shape[1] + border[1] * 2

    # Rotation and Scale
    R = np.eye(3)
    a = random.uniform(-degrees, degrees)
    # a += random.choice([-180, -90, 0, 90])  # add 90deg rotations to small rotations
    s = random.uniform(1 - scale, 1 + scale)
    # s = 2 ** random.uniform(-scale, scale)
    R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)

    # Translation
    T = np.eye(3)
    T[0, 2] = random.uniform(-translate, translate) * img.shape[1] + border[1]  # x translation (pixels)
    T[1, 2] = random.uniform(-translate, translate) * img.shape[0] + border[0]  # y translation (pixels)

    # Shear
    S = np.eye(3)
    S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180)  # x shear (deg)
    S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180)  # y shear (deg)

    # Combined rotation matrix
    M = S @ T @ R  # ORDER IS IMPORTANT HERE!!
    if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any():  # image changed
        img = cv2.warpAffine(img, M[:2], dsize=(width, height), flags=cv2.INTER_LINEAR, borderValue=(0, 0, 0))

    # Transform label coordinates
    n = len(bboxes)
    if n:
        # warp points
        xy = np.ones((n * 4, 3))
        xy[:, :2] = bboxes[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(n * 4, 2)  # x1y1, x2y2, x1y2, x2y1
        xy = (xy @ M.T)[:, :2].reshape(n, 8)

        # create new boxes
        x = xy[:, [0, 2, 4, 6]]
        y = xy[:, [1, 3, 5, 7]]
        xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T

        # # apply angle-based reduction of bounding boxes
        # radians = a * math.pi / 180
        # reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
        # x = (xy[:, 2] + xy[:, 0]) / 2
        # y = (xy[:, 3] + xy[:, 1]) / 2
        # w = (xy[:, 2] - xy[:, 0]) * reduction
        # h = (xy[:, 3] - xy[:, 1]) * reduction
        # xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T

        # reject warped points outside of image
        xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
        xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
        w = xy[:, 2] - xy[:, 0]
        h = xy[:, 3] - xy[:, 1]
        area = w * h
        area0 = (bboxes[:, 2] - bboxes[:, 0]) * (bboxes[:, 3] - bboxes[:, 1])
        ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16))  # aspect ratio
        i = (w > 2) & (h > 2) & (area / (area0 * s + 1e-16) > 0.2) & (ar < 20)

        bboxes = bboxes[i]
        bboxes[:] = xy[i]
        oribboxes = oribboxes[i]
        labels = [labels[ii] for ii, k in enumerate(i) if k]
    return img, bboxes, labels, oribboxes


if __name__ == "__main__":
    createMosaicAffineData("/mnt/f/Dataset/whh/train600LabelIntegration",
                           "/mnt/f/Dataset/whh/train600LabelIntegrationMosaicAug",
                           mosaic_size=800)
    # createMosaicAffineData(srcpath, destpath, mosaic_size=800, scalethr=1.2, affine_prob=0.2, areathr=0.4, rotate_degree=10)
