import os
import random
import numpy as np
import torch
import torchvision.transforms as T
from torch.utils.data import Dataset
import cv2


# 2. 构建数据加载器,负责加载数据、进行预处理和增强操作
class Yolo_Dataset(Dataset):
    # 默认图片大小
    image_size = 448

    def __init__(self, root, list_file, train=True, transforms=None):
        '''
        :param root:  根目录，比如`./data/`，这个目录必须包含真正的图片
        :param list_file: 即我们之前使用generate_txt_file生成的txt文件，这里刚好和本文件在同一目录
        :param train: 是否为训练集，默认为True
        :param transforms: 预处理方法，默认为None
        '''
        # 初始化各个参数
        self.root = root
        self.train = train
        self.transform = transforms
        self.fnames = []
        self.boxes = []
        self.labels = []
        self.mean = (123, 117, 104)  # RGB
        # 打开文件，读取内容
        with open(list_file) as f:
            lines = f.readlines()
        # 针对每一行进行处理
        for line in lines:
            splited = line.strip().split()
            self.fnames.append(splited[0])  # 文件名字
            # 读取文件的类别和box信息
            # 每五个数据表示一个对象，因此下面都是关于5的处理
            num_boxes = (len(splited) - 1) // 5  # 判断里面有几个物体 （长度-标签）// 5
            box = []
            label = []
            for i in range(num_boxes):
                x = float(splited[1 + 5 * i])
                y = float(splited[2 + 5 * i])
                x2 = float(splited[3 + 5 * i])
                y2 = float(splited[4 + 5 * i])
                c = splited[5 + 5 * i]  # c是种类标签
                box.append([x, y, x2, y2])
                label.append(int(c) + 1)
            self.boxes.append(torch.Tensor(box))
            self.labels.append(torch.LongTensor(label))
        # 记录一下对象个数
        self.num_samples = len(self.boxes)

    def __len__(self):
        # 返回对象个数
        return self.num_samples

    def __getitem__(self, idx):
        # 随便获取一张图片
        fname = self.fnames[idx]
        # 打开图片，记得把路径拼接完整
        # print(os.path.join(self.root + fname))
        img = cv2.imread(os.path.join(self.root, fname))
        # 获取图片的相关信息
        boxes = self.boxes[idx].clone()
        labels = self.labels[idx].clone()
        # 如果是训练模式，需要进行图像的增强
        # 需要注意的是，同时处理图像和box
        if self.train:
            img, boxes = self.random_flip(img, boxes)  # 随机翻转
            img, boxes = self.randomScale(img, boxes)  # 随机缩放
            img = self.randomBlur(img)  # 随机模糊
            img = self.RandomBrightness(img)  # 随机调整亮度
            img = self.RandomHue(img)  # 随机调整色调
            img = self.RandomSaturation(img)  # 随机调整饱和度
            img, boxes, labels = self.randomShift(img, boxes, labels)  # 随机移动
            img, boxes, labels = self.randomCrop(img, boxes, labels)  # 随机裁剪
        # 进行基本的处理
        h, w, _ = img.shape
        # 归一化
        boxes /= torch.Tensor([w, h, w, h]).expand_as(boxes)
        # 主要是CV2打开模式默认为BGR，因此需要转为RGB模式
        img = self.BGR2RGB(img)
        # 减去均值
        img = self.subMean(img, self.mean)
        # 将图片缩放到指定大小，即448*448
        img = cv2.resize(img, (self.image_size, self.image_size))
        # 需要特别处理，将各种信息变为yolov1需要的7*7*30
        target = self.encoder(boxes, labels)  # 7x7x30
        # 最后，进行预处理操作
        for t in self.transform:
            img = t(img)

        return img, target  # 最终的返回变量

    def encoder(self, boxes, labels):
        '''
        将边界框（boxes）和标签（labels）转换为YOLO模型所需的目标格式
        boxes (tensor) [[x1,y1,x2,y2],[]]
        labels (tensor) [...]
        return 7x7x30
        '''
        grid_num = 7
        # 先创建一个全为0的张量，后面进行填充即可
        target = torch.zeros((grid_num, grid_num, 30))  # 7，7，30
        # 缩放因子
        cell_size = 1. / grid_num
        # 计算出w、h和中心点坐标 boxes:[[x1, y1, x2, y2], ...],(x2, y2) 是右下角坐标
        # wh: 通过相减，计算得到每个边界框的宽度和高度，结果格式为 [[width, height], ...]，其中 width = x2 - x1，height = y2 - y1
        # 2:从第二号位开始，:2到第二号位结束
        wh = boxes[:, 2:] - boxes[:, :2]  # boxes[:, 2:]: 提取所有边界框的右下角坐标,boxes[:, :2]:提取所有边界框的左上角坐标
        cxcy = (boxes[:, 2:] + boxes[:, :2]) / 2  # cx = (x1 + x2) / 2，cy = (y1 + y2) / 2 tensor（1，2）
        for i in range(cxcy.size()[0]):
            cxcy_sample = cxcy[i]  # 中心坐标
            ij = (cxcy_sample / cell_size).ceil() - 1  # 左上角坐标，需要乘以缩放因子得到归一化后的坐标
            target[int(ij[1]), int(ij[0]), 4] = 1
            target[int(ij[1]), int(ij[0]), 9] = 1
            target[int(ij[1]), int(ij[0]), int(labels[i]) + 9] = 1
            # 匹配到的网格的左上角相对坐标
            xy = ij * cell_size
            # 相对偏移量
            delta_xy = (cxcy_sample - xy) / cell_size
            target[int(ij[1]), int(ij[0]), 2:4] = wh[i]  # 设置当前网格的宽度和高度 box1
            target[int(ij[1]), int(ij[0]), :2] = delta_xy  # 设置相对偏移量
            target[int(ij[1]), int(ij[0]), 7:9] = wh[i]  # 再次设置当前网格的宽度和高度 box2
            target[int(ij[1]), int(ij[0]), 5:7] = delta_xy
        return target

    # 下面是各种预处理算法，来自别人的代码直接拷贝过来的
    def BGR2RGB(self, img):
        return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    def BGR2HSV(self, img):
        return cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

    def HSV2BGR(self, img):
        return cv2.cvtColor(img, cv2.COLOR_HSV2BGR)

    # 随机调整图像的亮度
    def RandomBrightness(self, bgr):
        if random.random() < 0.5:
            hsv = self.BGR2HSV(bgr)
            h, s, v = cv2.split(hsv)
            adjust = random.choice([0.5, 1.5])
            v = v * adjust
            v = np.clip(v, 0, 255).astype(hsv.dtype)
            hsv = cv2.merge((h, s, v))
            bgr = self.HSV2BGR(hsv)
        return bgr

    # 随机调整图像的饱和度
    def RandomSaturation(self, bgr):
        if random.random() < 0.5:
            hsv = self.BGR2HSV(bgr)
            h, s, v = cv2.split(hsv)
            adjust = random.choice([0.5, 1.5])
            s = s * adjust
            s = np.clip(s, 0, 255).astype(hsv.dtype)
            hsv = cv2.merge((h, s, v))
            bgr = self.HSV2BGR(hsv)
        return bgr

    # 随机调整图像的色调
    def RandomHue(self, bgr):
        if random.random() < 0.5:
            hsv = self.BGR2HSV(bgr)
            h, s, v = cv2.split(hsv)
            adjust = random.choice([0.5, 1.5])
            h = h * adjust
            h = np.clip(h, 0, 255).astype(hsv.dtype)
            hsv = cv2.merge((h, s, v))
            bgr = self.HSV2BGR(hsv)
        return bgr

    # 随机对图像进行模糊处理
    def randomBlur(self, bgr):
        if random.random() < 0.5:
            bgr = cv2.blur(bgr, (5, 5))
        return bgr

    # 平移变换
    def randomShift(self, bgr, boxes, labels):
        center = (boxes[:, 2:] + boxes[:, :2]) / 2
        if random.random() < 0.5:
            height, width, c = bgr.shape
            after_shfit_image = np.zeros((height, width, c), dtype=bgr.dtype)
            after_shfit_image[:, :, :] = (104, 117, 123)  # bgr
            shift_x = random.uniform(-width * 0.2, width * 0.2)
            shift_y = random.uniform(-height * 0.2, height * 0.2)
            # print(bgr.shape,shift_x,shift_y)
            # 原图像的平移
            if shift_x >= 0 and shift_y >= 0:
                after_shfit_image[int(shift_y):, int(shift_x):, :] = bgr[:height - int(shift_y), :width - int(shift_x),
                                                                     :]
            elif shift_x >= 0 and shift_y < 0:
                after_shfit_image[:height + int(shift_y), int(shift_x):, :] = bgr[-int(shift_y):, :width - int(shift_x),
                                                                              :]
            elif shift_x < 0 and shift_y >= 0:
                after_shfit_image[int(shift_y):, :width + int(shift_x), :] = bgr[:height - int(shift_y), -int(shift_x):,
                                                                             :]
            elif shift_x < 0 and shift_y < 0:
                after_shfit_image[:height + int(shift_y), :width + int(shift_x), :] = bgr[-int(shift_y):,
                                                                                      -int(shift_x):, :]

            shift_xy = torch.FloatTensor([[int(shift_x), int(shift_y)]]).expand_as(center)
            center = center + shift_xy
            mask1 = (center[:, 0] > 0) & (center[:, 0] < width)
            mask2 = (center[:, 1] > 0) & (center[:, 1] < height)
            mask = (mask1 & mask2).view(-1, 1)
            boxes_in = boxes[mask.expand_as(boxes)].view(-1, 4)
            if len(boxes_in) == 0:
                return bgr, boxes, labels
            box_shift = torch.FloatTensor([[int(shift_x), int(shift_y), int(shift_x), int(shift_y)]]).expand_as(
                boxes_in)
            boxes_in = boxes_in + box_shift
            labels_in = labels[mask.view(-1)]
            return after_shfit_image, boxes_in, labels_in
        return bgr, boxes, labels

    # 随机缩放图像的宽度，保持高度不变
    def randomScale(self, bgr, boxes):
        # 固定住高度，以0.8-1.2伸缩宽度，做图像形变
        if random.random() < 0.5:
            scale = random.uniform(0.8, 1.2)
            height, width, c = bgr.shape
            bgr = cv2.resize(bgr, (int(width * scale), height))
            scale_tensor = torch.FloatTensor([[scale, 1, scale, 1]]).expand_as(boxes)
            boxes = boxes * scale_tensor
            return bgr, boxes
        return bgr, boxes

    # 随机裁剪图像
    def randomCrop(self, bgr, boxes, labels):
        if random.random() < 0.5:
            center = (boxes[:, 2:] + boxes[:, :2]) / 2
            height, width, c = bgr.shape
            h = random.uniform(0.6 * height, height)
            w = random.uniform(0.6 * width, width)
            x = random.uniform(0, width - w)
            y = random.uniform(0, height - h)
            x, y, h, w = int(x), int(y), int(h), int(w)

            center = center - torch.FloatTensor([[x, y]]).expand_as(center)
            mask1 = (center[:, 0] > 0) & (center[:, 0] < w)
            mask2 = (center[:, 1] > 0) & (center[:, 1] < h)
            mask = (mask1 & mask2).view(-1, 1)

            boxes_in = boxes[mask.expand_as(boxes)].view(-1, 4)
            if (len(boxes_in) == 0):
                return bgr, boxes, labels
            box_shift = torch.FloatTensor([[x, y, x, y]]).expand_as(boxes_in)

            boxes_in = boxes_in - box_shift
            boxes_in[:, 0] = boxes_in[:, 0].clamp_(min=0, max=w)
            boxes_in[:, 2] = boxes_in[:, 2].clamp_(min=0, max=w)
            boxes_in[:, 1] = boxes_in[:, 1].clamp_(min=0, max=h)
            boxes_in[:, 3] = boxes_in[:, 3].clamp_(min=0, max=h)

            labels_in = labels[mask.view(-1)]
            img_croped = bgr[y:y + h, x:x + w, :]
            return img_croped, boxes_in, labels_in
        return bgr, boxes, labels

    # 从图像中减去均值，用于图像标准化
    def subMean(self, bgr, mean):
        mean = np.array(mean, dtype=np.float32)
        bgr = bgr - mean
        return bgr

    # 随机水平翻转图像
    def random_flip(self, im, boxes):
        if random.random() < 0.5:
            im_lr = np.fliplr(im).copy()
            h, w, _ = im.shape
            xmin = w - boxes[:, 2]
            xmax = w - boxes[:, 0]
            boxes[:, 0] = xmin
            boxes[:, 2] = xmax
            return im_lr, boxes
        return im, boxes

    # 随机调整图像的亮度
    def random_bright(self, im, delta=16):
        alpha = random.random()
        if alpha > 0.3:
            im = im * alpha + random.randrange(-delta, delta)
            im = im.clip(min=0, max=255).astype(np.uint8)
        return im


# 3. 调试代码
def main():
    from torch.utils.data import DataLoader
    import torchvision.transforms as transforms
    file_root = 'D:/Pycharm/YOLOv1_pytorch/dataset/VOCtrainval_11-May-2012/VOCdevkit/VOC2012/JPEGImages/'  # 记得改为自己的路径
    train_dataset = Yolo_Dataset(root=file_root, list_file='voctrain.txt', train=True, transforms=[T.ToTensor()])
    train_loader = DataLoader(train_dataset, batch_size=8, shuffle=False, num_workers=0)
    train_iter = iter(train_loader)
    # torch.Size([1, 3, 448, 448])
    # torch.Size([1, 7, 7, 30])   这个就好比你真正要训练的东西。你要训练的是[1, 7, 7, 30]的这么一个list
    img, target = next(train_iter)
    print(img.shape)
    print(target.shape)


if __name__ == '__main__':
    main()
