# -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
-------------------------------------------------
   File Name：     datalist
   Description :   
   Author :       lth
   date：          2022/6/24
-------------------------------------------------
   Change Activity:
                   2022/6/24 18:17: create this script
-------------------------------------------------
"""
__author__ = 'lth'

import numpy as np
import torch
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms

# used for train
data_transform = transforms.Compose([
    transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.3),
    transforms.RandomGrayscale(p=0.3),
    # transforms.ToTensor(),
    # transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

train_transfrom = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

infer_transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])


class RetinaFaceDataset(Dataset):
    def __init__(self, label_path):
        self.label_path = label_path

        self.imgs_paths, self.lines = self.process_labels(label_path)
        assert len(self.imgs_paths) == len(self.lines), " the length should be same"

        self.input_shape = [840, 840]

    def __len__(self):
        return len(self.imgs_paths)

    def __getitem__(self, index):
        """

        :return:
            box输出格式为 x1 y1 x2 y2 lx1 ly1 lx2 ly2 lx3 ly3 lx4 ly4 lx5 ly5 include_ldm,  没有归一化，没有标准化
            img输出格式为 PIL RGB
        """
        abs_path = "E:/Datasets2/widerface/train/images/"
        lines = self.lines[index]
        image = Image.open(abs_path + self.imgs_paths[index]).convert('RGB')

        annotations = np.zeros((0, 15))
        for idx, line in enumerate(lines):
            annotation = np.zeros((1, 15))
            # x1 y1 x2 y2
            annotation[0, 0] = line[0]  # x1
            annotation[0, 1] = line[1]  # y1
            annotation[0, 2] = line[0] + line[2]  # x2
            annotation[0, 3] = line[1] + line[3]  # y2
            # land mark
            annotation[0, 4] = line[4]  # l0_x
            annotation[0, 5] = line[5]  # l0_y
            annotation[0, 6] = line[7]  # l1_x
            annotation[0, 7] = line[8]  # l1_y
            annotation[0, 8] = line[10]  # l2_x
            annotation[0, 9] = line[11]  # l2_y
            annotation[0, 10] = line[13]  # l3_x
            annotation[0, 11] = line[14]  # l3_y
            annotation[0, 12] = line[16]  # l4_x
            annotation[0, 13] = line[17]  # l4_y

            if annotation[0, 4] < 0:
                annotation[0, 14] = -1
            else:
                annotation[0, 14] = 1

            annotations = np.append(annotations, annotation, axis=0)

        target = np.array(annotations)

        height, width = image.height, image.width

        ratio = width / height
        if ratio > 1:
            nw = self.input_shape[1]
            nh = int(self.input_shape[0] / ratio)
        else:
            nh = self.input_shape[0]
            nw = int(self.input_shape[1] * ratio)

        image = image.resize((nw, nh), Image.BICUBIC)
        image = data_transform(image)

        new_image = Image.new("RGB", self.input_shape, (128, 128, 128))
        new_image.paste(image, [0, 0])

        ratio_h, ratio_w = height / nh, width / nw

        target[:, [0, 2, 4, 6, 8, 10, 12]] = target[:, [0, 2, 4, 6, 8, 10, 12]] / ratio_w
        target[:, [1, 3, 5, 7, 9, 11, 13]] = target[:, [1, 3, 5, 7, 9, 11, 13]] / ratio_h

        # from PIL import ImageDraw
        # image = ImageDraw.Draw(new_image)
        # for annotation in target:
        #     image.rectangle((annotation[0], annotation[1], annotation[2], annotation[3]), (255, 255, 0))

        return train_transfrom(new_image), target, abs_path + self.imgs_paths[index]

    @staticmethod
    def process_labels(path):
        imgs_path = []
        words = []
        f = open(path, 'r')
        lines = f.readlines()
        isFirst = True
        labels = []
        for line in lines:
            line = line.rstrip()
            if line.startswith('#'):
                if isFirst is True:
                    isFirst = False
                else:
                    labels_copy = labels.copy()
                    words.append(labels_copy)
                    labels.clear()
                path = line[2:]
                imgs_path.append(path)
            else:
                line = line.split(' ')
                label = [float(x) for x in line]
                labels.append(label)
        words.append(labels)
        return imgs_path, words


def retinaface_dataset_collate(batch):
    images = []
    boxes = []
    lines = []
    for img, box, line in batch:
        images.append(img)
        boxes.append(box)
        lines.append(line)
    images = torch.stack(images, dim=0)
    boxes = np.array(boxes)

    return images, boxes, lines


if __name__ == "__main__":
    data = RetinaFaceDataset(label_path="G:/datasets/WIDER_FACE/train/label.txt")
