# -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
-------------------------------------------------
   File Name：     datalist
   Description :   
   Author :       lth
   date：          2022/2/19
-------------------------------------------------
   Change Activity:
                   2022/2/19 6:13: create this script
-------------------------------------------------
"""
__author__ = 'lth'

import random

import numpy as np
import torch
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms

# used for train
data_transform = transforms.Compose([
    # transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.3),
    # transforms.RandomGrayscale(p=0.3),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

data_horizontal_flip = transforms.Compose([
    transforms.RandomHorizontalFlip(p=1)
])

# used for predict
predict_transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])


class CenterDataset(Dataset):
    def __init__(self, lines, image_size=None):
        super(CenterDataset, self).__init__()
        if image_size is None:
            image_size = [512, 512]
        self.lines = lines
        self.image_size = image_size

    def __len__(self):
        return len(self.lines)

    def __getitem__(self, index):
        """
        :returns
            box输出格式为 x1 y1 x2 y2 cls,  没有归一化，没有标准化
            img输出格式为 PIL RGB
        """
        line = self.lines[index].split('.jpg')

        # line_path = (line[0] + '.jpg').replace('G:','/media/luotianhang/My Passport1')
        # "G:/datasets/目标检测/mouse_detection/"
        line_path = "E:/Datasets/123/posetive_img/" + line[0] + '.jpg'
        line_label = line[-1].strip().split()

        image = Image.open(line_path).convert("RGB")

        # 图像内容增强处理
        # image_enhance = ImageEnhance.Contrast(image)
        # image = image_enhance.enhance(random.uniform(0.1, 1.9))
        #
        # image_sha = ImageEnhance.Sharpness(image)
        # image = image_sha.enhance(random.uniform(0.5, 3))

        box = []

        line_label = list(map(int, line_label))
        for i in range(0, len(line_label), 5):
            b = line_label[i:i + 5]
            box.append(b)
        # 通过这个操作把最小的面积排在最后，就可以完成论文中指出的当两个box落在一个center上时候，用面积最小的来表示
        box = sorted(box, key=lambda x: -(x[2] - x[0]) * (x[3] - x[1]))
        box = np.array(box, dtype=np.float)

        w, h = image.size

        ratio = w / h * random.uniform(0.75, 1.35)

        if ratio > 1:
            nw = self.image_size[1]
            nh = int(self.image_size[0] / ratio)
        else:
            nh = self.image_size[0]
            nw = int(self.image_size[1] * ratio)
        # image resize and box resize
        image = image.resize((nw, nh), Image.BICUBIC)
        box[:, [0, 2]] = box[:, [0, 2]] * (nw / w)
        box[:, [1, 3]] = box[:, [1, 3]] * (nh / h)

        # box[:, 2][box[:, 2] > w] = nw
        # box[:, 3][box[:, 3] > h] = nh

        if random.random() > 0.5:
            image = data_horizontal_flip(image)
            temp1 = nw - box[:, 0]
            temp2 = nw - box[:, 2]
            box[:, 0] = temp2
            box[:, 2] = temp1
        box[:, 0][box[:, 0] < 0] = 0
        box[:, 1][box[:, 1] < 0] = 0

        if np.sum(box < 0) > 0:
            print(":DSA")

        # why use 128 ,since 128 will be normalized into 0
        #                            width                height
        img = Image.new('RGB', (self.image_size[1], self.image_size[0]), (128, 128, 128))

        start_point_x, start_point_y = 0, 0
        img.paste(image, (start_point_x, start_point_y))
        box[:, 0] = box[:, 0] + start_point_x
        box[:, 1] = box[:, 1] + start_point_y
        box[:, 2] = box[:, 2] + start_point_x
        box[:, 3] = box[:, 3] + start_point_y

        box[:, 0][box[:, 0] < 0] = 0
        box[:, 1][box[:, 1] < 0] = 0

        box[:, 2][box[:, 2] > self.image_size[1]] = self.image_size[1]
        box[:, 3][box[:, 3] > self.image_size[0]] = self.image_size[0]

        if random.random() > 0.5:
            img = data_horizontal_flip(img)
            temp1 = self.image_size[1] - box[:, 0]
            temp2 = self.image_size[1] - box[:, 2]
            box[:, 0] = temp2
            box[:, 2] = temp1
        if np.sum(box < 0) > 0:
            print(":DSA")

        box = box[box[:, 0] < self.image_size[1]]
        box = box[box[:, 1] < self.image_size[0]]
        box[:, 2][box[:, 2] > self.image_size[1]] = self.image_size[1]
        box[:, 3][box[:, 3] > self.image_size[0]] = self.image_size[0]

        # region 图片box输出画图验证
        # from PIL.ImageDraw import ImageDraw
        # image_draw = ImageDraw(img)
        # for b in box:
        #     image_draw.rectangle((b[0], b[1], b[2], b[3]), outline="red", width=6)
        #
        # img.show()
        # endregion

        '''
        box输出格式为 x1 y1 x2 y2 cls,  没有归一化，没有标准化
        img输出格式为 PIL RGB
        '''
        img = data_transform(img)
        # if random.random()>0.5:
        #     img=self.drop_block(img)

        if np.sum(box < 0) > 0:
            print(":DSA")

        return img, box, line_path


def center_dataset_collate(batch):
    images = []
    boxes = []
    lines = []
    for img, box, line in batch:
        images.append(img)
        boxes.append(box)
        lines.append(line)
    images = torch.stack(images, dim=0)
    boxes = np.array(boxes)

    return images, boxes, lines
