from torch.utils.data import Dataset
from PIL import Image
import numpy as np
from skimage.transform import resize
import torch
import os
import glob


class ImageFolder(Dataset):
    def __init__(self, folder_path, img_size=416):
        self.files = sorted(glob.glob('%s/*.*' % folder_path))
        self.img_shape = (img_size, img_size)

    def __getitem__(self, index):
        img_path = self.files[index % len(self.files)]
        img = np.array(Image.open(img_path))
        h,w,_ = img.shape
        dim_diff = np.abs(h - w)
        pad1, pad2 = dim_diff//2, dim_diff - dim_diff//2
        pad = ((pad1, pad2), (0,0), (0,0)) if h <= w else ((0,0), (pad1, pad2), (0,0))
        input_img = np.pad(img, pad, 'constant', constant_values=127.5) / 255.
        # Resize and normalize
        input_img = resize(input_img, (*self.img_shape, 3), mode='reflect')  # *tuple将tuple中的数值取出
        # Channels-first
        input_img = np.transpose(input_img, (2, 0, 1))
        # As pytorch tensor
        input_img = torch.from_numpy(input_img).float()
        # 返回当前index的路径和图片数据
        return img_path, input_img

    def __len__(self):
        return len(self.files)


class ListDataset(Dataset):
    def __init__(self, list_path, img_size=416):
        with open(list_path, 'r') as file:
            self.img_files = file.readlines()
        self.label_files = [path.replace('images', 'labels').replace('.jpg', '.yaml') for path in self.img_files]
        self.img_shape = (img_size, img_size)

    def __getitem__(self, index):

        #--------
        #  Image
        #--------

        img_path = self.img_files[index % len(self.img_files)].split('\n')[0].rstrip()
        img = np.array(Image.open(img_path))
        while len(img.shape) != 3:
            index += 1
            img_path = self.img_files[index % len(self.img_files)].rstrip()
            img = np.array(Image.open(img_path))

        h, w, _ = img.shape
        # print('==========================h is ' + str(h) + ' and w is ' + str(w))
        dim_diff = np.abs(h-w)
        pad1, pad2 = dim_diff // 2, dim_diff - dim_diff//2
        pad_flag = h <= w
        pad = ((pad1, pad2), (0,0), (0,0)) if pad_flag else ((0,0), (pad1, pad2), (0,0))
        input_img = np.pad(img, pad, 'constant', constant_values=128) / 255.
        paded_h, paded_w, _ = input_img.shape
        input_img = resize(input_img, (*self.img_shape, 3), mode = 'reflect')
        # Channel-first
        input_img = np.transpose(input_img, (2, 0, 1))
        input_img = torch.from_numpy(input_img).float()
        # print('======================' + str(input_img.shape) + '===================')

        #--------
        #  Label
        #--------

        label_path = self.label_files[index %len(self.img_files)].split('\n')[0].rstrip()
        labels = np.zeros((2,4))
        if os.path.exists(label_path):
            with open(label_path, 'r') as f:
                lines = f.readlines()
                points = lines[3].split(':')[-1].split()
            if pad_flag:
                # h<=w 那么就补列，变的是y
                x1 = int(points[0])
                y1 = int(points[1]) + pad1
                x2 = int(points[2])
                y2 = int(points[3]) + pad1
                x4 = int(points[4])
                y4 = int(points[5]) + pad1
                x3 = int(points[6])
                y3 = int(points[7]) + pad1
            else:
                x1 = int(points[0]) + pad1
                y1 = int(points[1])
                x2 = int(points[2]) + pad1
                y2 = int(points[3])
                x4 = int(points[4]) + pad1
                y4 = int(points[5])
                x3 = int(points[6]) + pad1
                y3 = int(points[7])
            points = [x1, y1, x2, y2, x4, y4, x3, y3]
            if paded_h >= self.img_shape[0]:
                labels = list(map(int, [point/paded_h*self.img_shape[0] for point in points]))
            else:
                diff = self.img_shape[0] - paded_h
                labels = [point + diff for point in points]
            labels = [[labels[0], labels[2], labels[4], labels[6]], [labels[1], labels[3], labels[5], labels[7]]]
            labels = torch.from_numpy(np.array(labels))
            return img_path, input_img, labels

    def __len__(self):
         return len(self.img_files)






