import os
import torch
from PIL import Image
from torch.utils import data
import numpy as np
from torch.utils.data import DataLoader
from torchvision.transforms import transforms


class FBDataset(data.Dataset):#需要继承data.Dataset
    def __init__(self, anno_file, root_dir, im_sz, train=True, transform=None):
        # 1. Initialize file path or list of file names.
        self.root_dir = root_dir
        # self.annos_frame = anno_file
        self.annos_frame = []
        with open(os.path.join(self.root_dir, anno_file), 'r') as f:
            self.annos_frame = f.readlines()
        self.transform = transform
        self.im_sz = (im_sz, im_sz)
        self.train = train

    def __len__(self):
        return len(self.annos_frame)

    def __getitem__(self, idx):
        annotation = self.annos_frame[idx].strip().split(' ')
        img_name = annotation[0]
        image = Image.open(img_name)
        bbox_target = np.array(annotation[1:5]).astype(float)
        # from PIL import ImageDraw
        # draw = ImageDraw.Draw(image)
        # draw.line([(bbox_target[0], bbox_target[1]), (bbox_target[2], bbox_target[1]), (bbox_target[2], bbox_target[3]),
        #            (bbox_target[0], bbox_target[3]), (bbox_target[0], bbox_target[1])], width=3, fill='red')
        # image.save('before_resized.jpeg')

        bbox_target = resize_rect(image.size, self.im_sz, bbox_target)

        label = gen_label(self.im_sz, bbox_target)
        label = torch.Tensor(label)
        # image = image.resize(self.im_sz, Image.ANTIALIAS)
        # draw.line([(bbox_target[0], bbox_target[1]), (bbox_target[2], bbox_target[1]), (bbox_target[2], bbox_target[3]),
        #            (bbox_target[0], bbox_target[3]), (bbox_target[0], bbox_target[1])], width=3, fill='red')
        # image.save('after_resized.jpeg')
        #
        # a = Image.fromarray((label.numpy()*255).astype('uint8')).convert('RGB')
        # a.save("label.jpeg")

        sample = {'image': None, 'label': label}
        if self.transform:
            sample['image'] = self.transform(image)
        return sample

def gen_label(img_sz, rect):
    # get the ground-truth gaussian reponse...
    # get the shape of the image..
    width, height = img_sz
    sigma = min(width, height)/2
    # get the mesh grid...
    xx, yy = np.meshgrid(np.arange(width), np.arange(height))
    # get the center of the object...
    center_x = rect[0] + 0.5 * rect[2]
    center_y = rect[1] + 0.5 * rect[3]
    # cal the distance...
    dist = (np.square(xx - center_x) + np.square(yy - center_y)) / (2 * sigma)
    # get the response map...
    response = np.exp(-dist)

    # normalize...
    # response = self.linear_mapping(response)
    return response[np.newaxis, :, :]

# used for linear mapping...
def linear_mapping(images):
    max_value = 1
    min_value = 0

    parameter_a = 1 / (max_value - min_value)
    parameter_b = 1 - max_value * parameter_a

    image_after_mapping = parameter_a * images + parameter_b

    return image_after_mapping

def resize_rect(before_imsz, after_imsz, rect):
    before_w, before_h = before_imsz
    after_w, after_h = after_imsz
    ratio1 = after_w / before_w
    ratio2 = after_h / before_h
    x1, y1, x2, y2 = rect
    new_x1 = ratio1 * x1
    new_x2 = ratio1 * x2
    new_y1 = ratio2 * y1
    new_y2 = ratio2 * y2
    return [new_x1, new_y1, new_x2, new_y2]
