import os
import random

import cv2
import numpy as np
import albumentations as A
from glob import glob
from tqdm import tqdm
from shapely.geometry import Polygon

def get_img_path(img_dir, suffix= ("jpg", "png")):
    img_paths = []
    for sfx in suffix:
        for path in glob(os.path.join(img_dir, f"*.{sfx}")):
            img_paths.append(path)

    return img_paths


def laplacian_pyramid_blending(A, B, mask, num_levels=6):
    GA = A.copy()
    GB = B.copy()
    GM = mask.copy()
    gpA = [GA]
    gpB = [GB]
    gpM = [GM]
    for i in range(num_levels):
        GA = cv2.pyrDown(GA)
        GB = cv2.pyrDown(GB)
        GM = cv2.pyrDown(GM)
        gpA.append(np.float32(GA))
        gpB.append(np.float32(GB))
        gpM.append(np.float32(GM))
    lpA = [gpA[num_levels - 1]]
    lpB = [gpB[num_levels - 1]]
    gpMr = [gpM[num_levels - 1]]
    for i in range(num_levels - 1, 0, -1):
        size = (gpA[i - 1].shape[1], gpA[i - 1].shape[0])
        LA = np.subtract(gpA[i - 1], cv2.pyrUp(gpA[i], dstsize=size))
        LB = np.subtract(gpB[i - 1], cv2.pyrUp(gpB[i], dstsize=size))
        lpA.append(LA)
        lpB.append(LB)
        gpMr.append(gpM[i - 1])
    LS = []
    for la, lb, gm in zip(lpA, lpB, gpMr):
        ls = la * (1.0 - gm) + lb * gm
        LS.append(ls)

    blend = LS[0]
    for i in range(1, num_levels):
        size = (LS[i].shape[1], LS[i].shape[0])
        blend = cv2.pyrUp(blend, dstsize=size)
        blend = cv2.add(blend, LS[i], dtype=24)

    return blend


def poly_iou(poly1, poly2):
    poly1 = Polygon(poly1)
    poly2 = Polygon(poly2)
    return poly1.intersection(poly2).area / poly1.union(poly2).area

def normal_aug(img_path, txt_path, label_type, use_D4, use_D4_image_size=640):
    image = cv2.imdecode(np.fromfile(img_path, dtype=np.uint8), -1)
    h, w = image.shape[:2]
    with open(txt_path, "r") as f:
        lines = f.readlines()

    labels = []
    if label_type == "obb":
        # albumentations库未实现obb标签增广，此处用keypoints代替
        all_cls = []
        keypoints = []
        for j in range(len(lines)):
            line = lines[j]
            cls, x1, y1, x2, y2, x3, y3, x4, y4 = line.strip().split(" ")
            x1, y1, x2, y2, x3, y3, x4, y4 = map(float, [x1, y1, x2, y2, x3, y3, x4, y4])
            all_cls.append(cls)

            keypoints.extend([
                (x1 * w, y1 * h, 1),  # x, y, visibility(1可见/0不可见)
                (x2 * w, y2 * h, 1),
                (x3 * w, y3 * h, 1),
                (x4 * w, y4 * h, 1)
            ])
            labels.extend([j, j, j, j])  # 用keypoints的分组标签来存储单个obb的4个点所属的组(一个组=一个旋转框),该方法限制了数据中必须只有一个类

        # 增广策略
        if use_D4:
            transform = A.Compose([
                A.PadIfNeeded(min_height=use_D4_image_size, min_width=use_D4_image_size, p=1),  # 填充为正方形
                A.D4(p=1)
            ], keypoint_params=A.KeypointParams(
                format="xy",  # 关键点格式
                label_fields=["labels"],  # 关联分组标签字段
                remove_invisible=False)  # 是否移除不可见的关键点
            )
        else:
            transform = A.Compose([
                A.RandomBrightnessContrast(brightness_limit=0.3, contrast_limit=0.3, p=0.5),
                A.GaussNoise(std_range=(0.01, 0.05), p=0.5),
                A.Blur(blur_limit=(3, 3), p=0.5),
                A.Illumination(p=0.5),
                A.ImageCompression(quality_range=(90, 100), p=0.5),
                A.RandomGamma(p=0.5),
                A.RingingOvershoot(blur_limit=(3, 5), p=0.5)
            ], keypoint_params=A.KeypointParams(
                format="xy",  # 关键点格式
                label_fields=["labels"],  # 关联分组标签字段
                remove_invisible=False)  #是否移除不可见的关键点
            )

        # 应用随机增广
        transformed = transform(
            image=image,
            keypoints=keypoints,
            labels=labels,
        )
        transformed_image = transformed["image"]
        transformed_keypoints = transformed["keypoints"]
        transformed_labels = transformed["labels"]  # 分组标签保持不变
        th, tw = transformed_image.shape[:2]

        # 根据分组标签将关键点转为旋转框
        transformed_obbs = {}
        for kp, idx in zip(transformed_keypoints, transformed_labels):
            if idx not in transformed_obbs:
                transformed_obbs[idx] = []
            transformed_obbs[idx].append([kp[0] / tw, kp[1] / th])
    return transformed_image, transformed_obbs, all_cls


def fusion_aug(image, lines, material, material_lines, label_type):
    h, w = image.shape[:2]

    labels, polys, _polys = [], [], []  # 待增广目标标签、待增广目标polygon
    # mask = np.zeros((h, w), dtype=np.uint8)
    for i, line in enumerate(lines):
        line_data = line.replace('\n', '').split(' ')
        poly = np.array(list(map(float, line_data[1:9]))).reshape(4, 2)
        _poly = (poly * np.array([w, h])).astype(np.float32)
        labels.append(line_data[0])
        _polys.append(_poly)
        polys.append(poly)
        # mask = cv2.fillPoly(mask, [_poly.astype(np.int32)], 1)
    polys = np.array(polys)

    material_mask = np.zeros((h, w), dtype=np.uint8)
    material_labels, material_polys, _mpolys = [], [], []  # 素材标签、素材polygon
    for i, mline in enumerate(material_lines):
        mline_data = mline.replace('\n', '').split(' ')
        mpoly = np.array(list(map(float, mline_data[1:9]))).reshape(4, 2)
        _mpoly = (mpoly * np.array([w, h])).astype(np.float32)

        if _mpoly.min() < 0:
            continue

        # 跳过与原图重叠面积过大的素材
        drop = False
        for _p in _polys:
            iou = poly_iou(_p, _mpoly)
            if iou > 0.4:
                drop = True
        if drop:
            continue

        # 提取分割mask
        try:
            material_mask = cv2.fillPoly(material_mask, [_mpoly.astype(np.int32)], 1)
            brx, bry, brw, brh = cv2.boundingRect(_mpoly)
            mpoly_br = material[bry:bry + brh, brx:brx + brw]
            ret, th = cv2.threshold(mpoly_br, 200, 255, cv2.THRESH_BINARY)
            kernel = np.ones((3, 3), np.uint8)
            th = cv2.morphologyEx(th, cv2.MORPH_CLOSE, kernel)
            material_mask[bry:bry + brh, brx:brx + brw] = th / 255
        except:
            print('material_mask error')
            continue

        material_labels.append(mline_data[0])
        material_polys.append(mpoly)
        _mpolys.append(_mpoly)
    material_polys = np.array(material_polys)

    # 将素材与图片进行融合
    if material_mask.max() > 0 and len(material_polys) == 0:
        print("a")
    transformed_image = laplacian_pyramid_blending(image, material, material_mask, num_levels=3)
    # cv2.imencode(".png", transformed_image)[1].tofile(r"D:\lliujian\DeepLearning\datasets\aug_materials_yolo_aug\current.png")
    if len(polys) > 0 and len(material_polys) > 0:
        obbs = np.concatenate((polys, material_polys), axis=0)
    elif len(polys) > 0 and len(material_polys) == 0:
        obbs = polys
    elif len(polys) == 0 and len(material_polys) > 0:
        obbs = material_polys
    else:
        obbs = []
    transformed_obbs = {}
    for i, ply in enumerate(obbs):
        transformed_obbs[str(i)] = ply
    transformed_labels = material_labels + labels
    return transformed_image, transformed_obbs, transformed_labels

# yolo旋转框数据增强
def yolo_aug(img_dir, txt_dir, material_dir, aug_dir, label_type, repeat_num, use_D4, use_D4_image_size=640, fusion_prob=0, fusion_material_aug=False):
    if not os.path.exists(aug_dir):
        os.makedirs(aug_dir)

    img_paths = get_img_path(img_dir)
    material_paths = get_img_path(material_dir)
    for img_path in tqdm(img_paths):
        img_name = os.path.basename(img_path)
        suffix = os.path.splitext(img_path)[1]
        txt_path = os.path.join(txt_dir, img_name.replace(suffix, ".txt"))

        for k in range(repeat_num):
            if random.random() > 0.1:
                continue
                transformed_image, transformed_obbs, all_cls = normal_aug(img_path, txt_path, label_type, use_D4, use_D4_image_size)
            else:
                # 选择背景
                image = cv2.imdecode(np.fromfile(img_path, dtype=np.uint8), -1)
                if len(image.shape) > 2:
                    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
                with open(txt_path, "r") as f:
                    lines = f.readlines()

                # 选择贴图素材
                material_path = random.choice(material_paths)
                while material_path == img_path:
                    material_path = random.choice(material_paths)
                material_name = os.path.basename(material_path)
                material_suffix = os.path.splitext(material_name)[1]

                # 对素材进行增广 todo:fix
                if fusion_material_aug:
                    material, material_obbs, all_cls = normal_aug(img_path, txt_path, label_type, use_D4, use_D4_image_size)
                    material_lines = []
                    for idx, obb in material_obbs.items():
                        x1, y1, x2, y2, x3, y3, x4, y4 = obb[0][0], obb[0][1], obb[1][0], obb[1][1], obb[2][0], \
                        obb[2][1], obb[3][0], obb[3][1]
                        material_lines += [f"{all_cls[int(idx)]} {x1} {y1} {x2} {y2} {x3} {y3} {x4} {y4}\n"]
                else:
                    material = cv2.imdecode(np.fromfile(material_path, dtype=np.uint8), -1)
                    if len(material.shape) > 2:
                        material = cv2.cvtColor(material, cv2.COLOR_BGR2GRAY)
                    material_txt_path = os.path.join(material_dir, material_name.replace(material_suffix, ".txt"))
                    with open(material_txt_path, "r") as f:
                        material_lines = f.readlines()

                transformed_image, transformed_obbs, all_cls = fusion_aug(image, lines, material, material_lines, label_type)

            # 写入数据
            aug_img_name = os.path.basename(img_path).replace(suffix, f"_b{k}.png")
            aug_txt_name = os.path.basename(img_path).replace(suffix, f"_b{k}.txt")
            cv2.imencode(".png", transformed_image)[1].tofile(os.path.join(aug_dir, aug_img_name))
            with open(os.path.join(aug_dir, aug_txt_name), "w") as ff:
                for idx, obb in transformed_obbs.items():
                    x1, y1, x2, y2, x3, y3, x4, y4 = obb[0][0], obb[0][1], obb[1][0], obb[1][1], obb[2][0], obb[2][
                        1], obb[3][0], obb[3][1]
                    ff.write(f"{all_cls[int(idx)]} {x1} {y1} {x2} {y2} {x3} {y3} {x4} {y4}\n")


def main():
    img_dir = r"D:\lliujian\DeepLearning\datasets\FPC_20250301_ori\images\train"
    txt_dir = r"D:\lliujian\DeepLearning\datasets\FPC_20250301_ori\labels\train"
    material_dir = r"D:\lliujian\DeepLearning\datasets\aug_materials_yolo"

    aug_dir = r"D:\lliujian\DeepLearning\datasets\FPC_20250301_ori\images_aug"
    repeat_num = 1  # 随机增广次数
    fusion_prob = 0.3  # normal | D4 |fusion
    label_type = "obb"  # obb仅支持一个类别
    use_D4 = False
    use_D4_image_size = 640  # D4增广前将图像填充为正方形的尺寸
    fusion_material_aug = False
    yolo_aug(img_dir, txt_dir, material_dir, aug_dir, label_type, repeat_num, use_D4, use_D4_image_size, fusion_prob, fusion_material_aug)


if __name__ == "__main__":
    main()

