import os.path
from torch.utils.data import Dataset, DataLoader
import random
import numpy as np
import cv2
import math
import torch


def random_affine(img, targets=(), degrees=(-10, 10), translate=(0.1, 0.1),
                  scale=(0.9, 1.1), shear=(-2, 2), border=(0, 0)):
    """
    对图像和边界框进行随机仿射变换

    Args:
        img: 输入图像 (H, W, C)
        targets: 边界框标签 [n, 5] 格式为 [class, x1, y1, x2, y2] 或 [n, 6] 格式为 [batch_idx, class, x1, y1, x2, y2]
        degrees: 旋转角度范围 (min, max)
        translate: 平移比例范围 (x, y)
        scale: 缩放比例范围 (min, max)
        shear: 剪切角度范围 (min, max)
        border: 边框值，用于填充变换后的空白区域

    Returns:
        img: 变换后的图像
        targets: 变换后的边界框
    """
    height = img.shape[0] + border[0] * 2
    width = img.shape[1] + border[1] * 2

    # 中心点
    C = np.eye(3)
    C[0, 2] = -img.shape[1] / 2  # x translation (pixels)
    C[1, 2] = -img.shape[0] / 2  # y translation (pixels)

    # 旋转和缩放
    R = np.eye(3)
    a = random.uniform(degrees[0], degrees[1])
    s = random.uniform(scale[0], scale[1])
    R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)

    # 剪切
    S = np.eye(3)
    S[0, 1] = math.tan(random.uniform(shear[0], shear[1]) * math.pi / 180)  # x shear (deg)
    S[1, 0] = math.tan(random.uniform(shear[0], shear[1]) * math.pi / 180)  # y shear (deg)

    # 平移
    T = np.eye(3)
    T[0, 2] = random.uniform(translate[0], translate[1]) * width  # x translation (pixels)
    T[1, 2] = random.uniform(translate[0], translate[1]) * height  # y translation (pixels)

    # 组合变换矩阵
    M = T @ S @ R @ C  # 变换矩阵的顺序很重要

    # 应用仿射变换
    if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any():
        img = cv2.warpAffine(img, M[:2], dsize=(width, height),
                             flags=cv2.INTER_LINEAR, borderValue=(114, 114, 114))

    # 变换边界框
    n = len(targets)
    if n:
        # 将边界框从 [n, 5] 或 [n, 6] 转换为 [n, 4, 2] 格式 (4个角点)
        xy = np.ones((n * 4, 3))

        # 获取边界框的四个角点
        if targets.shape[1] == 5:  # [class, x1, y1, x2, y2]
            classes = targets[:, 0:1]
            boxes = targets[:, 1:5]
        else:  # [batch_idx, class, x1, y1, x2, y2]
            classes = targets[:, 1:2]
            boxes = targets[:, 2:6]

        # 四个角点: 左上, 右上, 右下, 左下
        xy[:, :2] = boxes[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(n * 4, 2)

        # 应用变换矩阵
        xy = xy @ M.T  # 变换

        # 恢复为边界框格式 [x1, y1, x2, y2]
        xy = xy[:, :2].reshape(n, 8)

        # 创建新的边界框
        x = xy[:, [0, 2, 4, 6]]
        y = xy[:, [1, 3, 5, 7]]

        # 找到变换后的最小/最大坐标
        xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T

        # 应用角度和剪切过滤
        radians = a * math.pi / 180
        sin = math.sin(radians)
        cos = math.cos(radians)
        abs_cos = abs(cos)
        abs_sin = abs(sin)

        # 计算新的宽度和高度
        nw = (img.shape[1] * abs_cos) + (img.shape[0] * abs_sin)
        nh = (img.shape[1] * abs_sin) + (img.shape[0] * abs_cos)

        # 调整边界框位置
        xy[:, [0, 2]] -= (nw - img.shape[1]) / 2
        xy[:, [1, 3]] -= (nh - img.shape[0]) / 2

        # 裁剪边界框到图像范围内
        xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, img.shape[1])
        xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, img.shape[0])

        # 过滤无效边界框
        w = xy[:, 2] - xy[:, 0]
        h = xy[:, 3] - xy[:, 1]
        area = w * h
        area0 = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])

        # 计算面积比例
        ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16))

        # 过滤条件
        i = (w > 2) & (h > 2) & (area / (area0 + 1e-16) > 0.2) & (ar < 20)

        targets = targets[i]
        targets[:, -4:] = xy[i]

    return img, targets


def random_affine_simple(img, targets=(), degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-2, 2)):
    """
    简化的随机仿射变换版本
    """
    height, width = img.shape[:2]

    # 生成变换矩阵
    # 中心点
    center = np.array([width / 2, height / 2])

    # 旋转角度
    angle = random.uniform(degrees[0], degrees[1])

    # 缩放比例
    scale_val = random.uniform(scale[0], scale[1])

    # 平移
    tx = random.uniform(translate[0], translate[1]) * width
    ty = random.uniform(translate[0], translate[1]) * height

    # 构建变换矩阵
    M = cv2.getRotationMatrix2D(tuple(center), angle, scale_val)
    M[0, 2] += tx
    M[1, 2] += ty

    # 应用变换
    img = cv2.warpAffine(img, M, (width, height), flags=cv2.INTER_LINEAR, borderValue=(114, 114, 114))

    # 变换边界框
    if len(targets) > 0:
        n = len(targets)

        # 提取边界框坐标
        if targets.shape[1] == 5:  # [class, x1, y1, x2, y2]
            boxes = targets[:, 1:5]
        else:  # [batch_idx, class, x1, y1, x2, y2]
            boxes = targets[:, 2:6]

        # 将边界框转换为四个角点
        corners = np.ones((n * 4, 3))
        corners[:, :2] = boxes[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(n * 4, 2)

        # 应用变换
        corners = corners @ M.T

        # 恢复为边界框格式
        corners = corners[:, :2].reshape(n, 8)
        x = corners[:, [0, 2, 4, 6]]
        y = corners[:, [1, 3, 5, 7]]

        # 计算新的边界框
        new_boxes = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T

        # 裁剪到图像范围内
        new_boxes[:, [0, 2]] = new_boxes[:, [0, 2]].clip(0, width)
        new_boxes[:, [1, 3]] = new_boxes[:, [1, 3]].clip(0, height)

        # 过滤无效边界框
        w = new_boxes[:, 2] - new_boxes[:, 0]
        h = new_boxes[:, 3] - new_boxes[:, 1]
        valid = (w > 2) & (h > 2)

        targets = targets[valid]
        if targets.shape[1] == 5:
            targets[:, 1:5] = new_boxes[valid]
        else:
            targets[:, 2:6] = new_boxes[valid]

    return img, targets


def random_affine_safe(img, targets=(), degrees=(-10, 10), translate=(0.1, 0.1),
                       scale=(0.9, 1.1), shear=(-2, 2), border_value=(114, 114, 114)):
    """
    安全的随机仿射变换，确保变换后图像内容完整保留在画布内
    """
    height, width = img.shape[:2]

    # 扩大画布以确保变换后内容完整
    expansion_factor = 1.5  # 根据实际情况调整
    new_height = int(height * expansion_factor)
    new_width = int(width * expansion_factor)

    # 创建扩展画布
    expanded_img = np.full((new_height, new_width, 3), border_value, dtype=img.dtype)

    # 将原图像放在扩展画布中心
    y_offset = (new_height - height) // 2
    x_offset = (new_width - width) // 2
    expanded_img[y_offset:y_offset + height, x_offset:x_offset + width] = img

    # 调整目标坐标
    adjusted_targets = targets.copy()
    if len(targets) > 0:
        if targets.shape[1] == 5:  # [class, x1, y1, x2, y2]
            adjusted_targets[:, 1] += x_offset / new_width
            adjusted_targets[:, 2] += y_offset / new_height
            adjusted_targets[:, 3] += x_offset / new_width
            adjusted_targets[:, 4] += y_offset / new_height
        else:  # [batch_idx, class, x1, y1, x2, y2]
            adjusted_targets[:, 2] += x_offset / new_width
            adjusted_targets[:, 3] += y_offset / new_height
            adjusted_targets[:, 4] += x_offset / new_width
            adjusted_targets[:, 5] += y_offset / new_height

    # 在扩展画布上进行仿射变换
    transformed_img, transformed_targets = random_affine_simple(
        expanded_img, adjusted_targets, degrees, translate, scale, shear
    )

    # 裁剪回原始尺寸（中心裁剪）
    start_y = (transformed_img.shape[0] - height) // 2
    start_x = (transformed_img.shape[1] - width) // 2
    cropped_img = transformed_img[start_y:start_y + height, start_x:start_x + width]

    # 调整目标坐标
    if len(transformed_targets) > 0:
        if transformed_targets.shape[1] == 5:  # [class, x1, y1, x2, y2]
            transformed_targets[:, 1] = (transformed_targets[:, 1] * new_width - start_x) / width
            transformed_targets[:, 2] = (transformed_targets[:, 2] * new_height - start_y) / height
            transformed_targets[:, 3] = (transformed_targets[:, 3] * new_width - start_x) / width
            transformed_targets[:, 4] = (transformed_targets[:, 4] * new_height - start_y) / height
        else:  # [batch_idx, class, x1, y1, x2, y2]
            transformed_targets[:, 2] = (transformed_targets[:, 2] * new_width - start_x) / width
            transformed_targets[:, 3] = (transformed_targets[:, 3] * new_height - start_y) / height
            transformed_targets[:, 4] = (transformed_targets[:, 4] * new_width - start_x) / width
            transformed_targets[:, 5] = (transformed_targets[:, 5] * new_height - start_y) / height

        # 过滤超出边界的框
        if transformed_targets.shape[1] == 5:
            x1, y1, x2, y2 = transformed_targets[:, 1], transformed_targets[:, 2], transformed_targets[:, 3], \
            transformed_targets[:, 4]
        else:
            x1, y1, x2, y2 = transformed_targets[:, 2], transformed_targets[:, 3], transformed_targets[:, 4], \
            transformed_targets[:, 5]

        # 计算框的中心和尺寸
        cx = (x1 + x2) / 2
        cy = (y1 + y2) / 2
        w = x2 - x1
        h = y2 - y1

        # 只保留中心点在图像内且尺寸合理的框
        valid = (cx >= 0) & (cx <= 1) & (cy >= 0) & (cy <= 1) & (w > 0.01) & (h > 0.01)
        transformed_targets = transformed_targets[valid]

    return cropped_img, transformed_targets
# YOLO标签文件和图像路径
def draw_yolo(label_path, img_path):
    with open(label_path, 'r') as f:
        lines = f.readlines()
    img = cv2.imread(img_path)
    # 解析标签并绘制边界框
    for line in lines:
        class_id, x_center, y_center, box_width, box_height = map(float, line.strip().split())
        # 将相对坐标转换为绝对坐标
        img_h, img_w, _ = img.shape
        x_center_abs = int(x_center * img_w)
        y_center_abs = int(y_center * img_h)
        box_width_abs = int(box_width * img_w)
        box_height_abs = int(box_height * img_h)
        # 中心坐标
        print("center_abs", x_center_abs, y_center_abs, box_width_abs, box_height_abs)
        # 计算边界框的左上角和右下角坐标
        x_min = int(x_center_abs - box_width_abs / 2)
        y_min = int(y_center_abs - box_height_abs / 2)
        x_max = int(x_center_abs + box_width_abs / 2)
        y_max = int(y_center_abs + box_height_abs / 2)

def xyxy2xywh(x):
    # 将xyxy转换为xywh
    y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
    y[:, 0] = (x[:, 0] + x[:, 2]) / 2  # x center
    y[:, 1] = (x[:, 1] + x[:, 3]) / 2  # y center
    y[:, 2] = x[:, 2] - x[:, 0]        # width
    y[:, 3] = x[:, 3] - x[:, 1]        # height
    return y

def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
    # Resize and pad image while meeting stride-multiple constraints
    shape = im.shape[:2]  # current shape [height, width]
    if isinstance(new_shape, int):
        new_shape = (new_shape, new_shape)

    # Scale ratio (new / old)
    r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
    if not scaleup:  # only scale down, do not scale up (for better val mAP)
        r = min(r, 1.0)

    # Compute padding
    ratio = r, r  # width, height ratios
    new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
    dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding
    if auto:  # minimum rectangle
        dw, dh = np.mod(dw, stride), np.mod(dh, stride)  # wh padding
    elif scaleFill:  # stretch
        dw, dh = 0.0, 0.0
        new_unpad = (new_shape[1], new_shape[0])
        ratio = new_shape[1] / shape[1], new_shape[0] / shape[0]  # width, height ratios

    dw /= 2  # divide padding into 2 sides
    dh /= 2

    if shape[::-1] != new_unpad:  # resize
        im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
    top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
    left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
    im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border
    return im, ratio, (dw, dh)

class LoadImageAndLabels(Dataset):

    # 进行参数初始化，包括图像路径和图像大小的部分
    def __init__(self,path,batch_size,img_size=416,augment=True,multi_scale=False,root_path=os.path.curdir):
        '''
        :param path: txt文件
        :param batch_size:
        :param img_size: 图像大小
        :param augment: 是否进行图像增强
        :param multi_scale: 是否进行多尺度训练
        :param root_path: 指定数据的根文件
        '''
        print('LoadImageAndLabels init: ',path)
        # 获取图像文件,wider_face_train_filelist.txt
        with open(path, 'r') as file:
            img_files = file.read().splitlines()
            img_files = list(filter(lambda x: len(x) > 0, img_files)) # 过滤空行
        # 打乱图像顺序，增加训练随机性
        np.random.shuffle(img_files)
        print("shuffle image...")
        self.img_files = img_files
        assert  len(self.img_files) > 0,'No images found in %s' % path
        self.img_size = img_size
        self.batch_size = batch_size
        self.multi_scale = multi_scale
        self.augment = augment
        self.scale_index = 0
        self.root_path = root_path
        if self.multi_scale:
            self.img_size = img_size # self.multi_scale
            print("Multi scale images training,init img_size",self.img_size)
        else:
            print("Fixed scale images,img_size",self.img_size)
        # 获取对应的图像标签
        self.label_files = [
            x.replace('images', 'labels').replace('.jpg', '.txt')
            for x in self.img_files]
    # 数据量
    def __len__(self):
        return len(self.img_files)

    #图像读取与增强
    def __getitem__(self, index):
        # 第一步：多尺度训练
        # 判断是否进行多尺度训练且是新的批次，并且批次不为0,保证同一批次中图像的尺度是一样的
        if self.multi_scale and (self.scale_index % self.batch_size == 0) and self.scale_index != 0:
            # 随机生成要进行检测图像的大小
            self.img_size = random.choice(range(11,19))*32
        # 多尺度训练时要对数据个数进行计数
        if self.multi_scale:
            self.scale_index += 1

        # 第二步：图像读取
        # 获取图像位置
        img_path = os.path.join(self.root_path, self.img_files[index][2:])
        img = cv2.imread(img_path)

        # 第三步：颜色增强,如果要进行图像的增强，则按照50%的概率对HSV色彩空间的S和V通道进行增强
        augment_hsv = random.random() < 0.5
        if self.augment and augment_hsv:
            # SV通道增强的概率是0.5
            fraction = 0.5
            # 颜色空间转换
            img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
            # 获取S和V两个通道的数据
            S = img_hsv[:, :, 1].astype(np.float32)
            V = img_hsv[:, :, 2].astype(np.float32)
            # 生成随机数a在[0.5,1.5]之间，对S通道进行处理
            a = (random.random()*2 - 1)*fraction+1
            S *= a
            if a > 1:
                np.clip(S, 0, 255, out=S)
            a = (random.random()*2 - 1)*fraction+1
            V *= a
            if a > 1:
                np.clip(V, 0, 255, out=V)
            # 赋值给原图像
            img_hsv[:, :, 1] = S
            img_hsv[:, :, 2] = V
            # 颜色空间转换为BGR，完成图像增强
            cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, img)

        # 第四步：图像尺寸调整
        img, ratio, (padw, padh) = letterbox(img, new_shape=self.img_size)
        # 获取图像标签
        # 获取标签
        h, w, _ = img.shape
        label_path = os.path.join(self.root_path, self.label_files[index][2:])
        labels = []
        if os.path.isfile(label_path):
            with open(label_path, 'r') as file:
                lines = file.read().splitlines()
            # 获取每一行中的标注信息
            x = np.array([x.split() for x in lines],dtype=np.float32)
            if x.size > 0:
                # 将归一化的xywh转换为xyxy的形式
                labels = x.copy()
                # 修改目标框的位置信息
                labels[:, 1] = ratio[0]* w * (x[:, 1] - x[:, 3] / 2) + padw  # x1
                labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + padh  # y1
                labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + padw  # x2
                labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + padh  # y2
        # 第五步：几何变换的增强并调整label值
        # 仿射变换
        if self.augment:
            # 经过randon_affine变换后的图像再填充到固定大小后只有图像的一部分了，不是完整图像
            img, labels = random_affine_safe(img, labels, degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1))
        nL = len(labels)
        if nL:
            labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) / self.img_size
        # 翻转
        # 水平翻转
        if self.augment:
            lr_flip = True
            if lr_flip and (random.random() > 0.5):
                img = np.fliplr(img)
            if nL:
                labels[:, 1] = 1 - labels[:, 1]

        # 第六步：获取图像和标注信息结果
        # 标注信息
        labels_out =torch.zeros(nL,6)
        if nL:
            labels_out[:,1:]=torch.from_numpy(labels)
        # 通道BGR to RGB ,表示形式转换为3x416x416(CHW)
        img = img[:,:,::-1].transpose(2,0,1)
        # 类型转换uint8 to float32
        img = np.ascontiguousarray(img,dtype=np.float32)
        # 归一化 0-255 to 0.0-1.0
        img /= 255.0
        # 返回结果
        return torch.from_numpy(img), labels_out,img_path,(h,w)

    @staticmethod
    def collate_fn(batch):
        '''
        实现自定义的batch的输出
        :param batch:
        :return:
        '''
        img,label,path,hw = list(zip(*batch))
        for i,l in enumerate(label):
            # 获取目标所属图片的ID
            l[:,0]=i
        return torch.stack(img,0), torch.cat(label,0),path,hw

    @staticmethod
    def custom_collate_fn(batch):
        """
        自定义collate函数，将不同尺寸的图像填充到统一尺寸
        """
        imgs, labels, paths, hw = zip(*batch)

        # 找到batch中的最大尺寸
        max_h = max(img.shape[1] for img in imgs)
        max_w = max(img.shape[2] for img in imgs)

        # 确保尺寸是stride的倍数（如32）
        max_h = (max_h + 31) // 32 * 32
        max_w = (max_w + 31) // 32 * 32

        # 填充所有图像到统一尺寸
        padded_imgs = []
        for img in imgs:
            c, h, w = img.shape
            padded_img = torch.full((c, max_h, max_w), 114 / 255.0, dtype=img.dtype)  # 用灰色填充
            padded_img[:, :h, :w] = img  # 将原图像放在左上角
            padded_imgs.append(padded_img)

        return torch.stack(padded_imgs, 0), torch.cat(labels, 0), paths, hw

# 数据获取测试
if __name__ == '__main__':
    train_path = "/home/rk/11-AI/4-face/face/wider_face_train_filelist.txt"
    root_path = "/home/rk/11-AI/4-face/face/datasets"
    batch_size = 2
    img_size = 416
    num_workers = 2
    # 创建数据对象
    dataset=LoadImageAndLabels(train_path,
                               batch_size,
                               img_size,
                               augment=True,
                               multi_scale=False,
                               root_path=root_path)
    print(dataset.__len__())
    # dataloader来获取数据
    dataloader = DataLoader(dataset,
                            batch_size=batch_size,
                            num_workers=num_workers,
                            shuffle=True,
                            collate_fn=dataset.custom_collate_fn)
    # 遍历loader
    for i ,(imgs,targets,img_path,_) in enumerate(dataloader):
        # 标注信息
        #print('标注信息',targets)
        # 遍历imgs获取每一幅图像进行展示
        for j in range(batch_size):
            # 对图像进行处理：反归一化，表示形式，通道，类型
            img_tmp = np.uint8(imgs[j].permute(1,2,0)*255.0)[:,:,::-1]
            # 显示
            cv2.imshow('result',img_tmp)
            out_path = os.path.join("/home/rk/11-AI/4-face/face/train_data",os.path.basename(img_path[j]))
            #cv2.imwrite(out_path,img_tmp)
            cv2.waitKey(0)
        cv2.destroyAllWindows()
        continue

