import torchvision
import torchvision.transforms as transforms
from torch.utils.data.dataset import Dataset
import config
import numpy as np
import torch
from PIL import Image
from torch.utils.data.dataset import Dataset


def preprocess_input(image):
    image /= 255.0
    return image


def to_rgb(image):
    if len(np.shape(image)) == 3 and np.shape(image)[2] == 3:
        return image
    else:
        image = image.convert('RGB')
        return image

    # 加载VOC数据


# DataLoader中的collate函数
def voc_dataset_collate(batch):
    images = []
    bboxes = []
    labels = []
    for img, box, label in batch:
        images.append(img)
        bboxes.append(box)
        labels.append(label)
    images = torch.from_numpy(np.array(images))
    return images, bboxes, labels


# 定义VOC数据集处理
class VOCDataset(Dataset):
    def __init__(self, annotation_lines, input_shape=None):
        if input_shape is None:
            input_shape = [600, 600]
        self.annotation_lines = annotation_lines
        self.length = len(annotation_lines)
        self.input_shape = input_shape

    def __len__(self):
        return self.length

    def __getitem__(self, index):
        index = index % self.length

        image, y = self.get_random_data(self.annotation_lines[index])
        image = np.transpose(preprocess_input(np.array(image, dtype=np.float32)),
                             (2, 0, 1))  # 将images size从 600*600*3 变成 3*600*600 便于torch处理
        box_data = np.zeros((len(y), 5))
        if len(y) > 0:
            box_data[:len(y)] = y

        box = box_data[:, :4]
        label = box_data[:, -1]
        return image, box, label

    # 读取数据并进行处理
    def get_random_data(self, annotation_line):
        line = annotation_line.split()  # 切分文件路径 bbox label

        image = Image.open(line[0])
        image = to_rgb(image)  # 将图片转化为统一的rgb格式

        iw, ih = image.size
        h, w = self.input_shape  # 目标 h w

        box = np.array([np.array(list(map(int, box.split(',')))) for box in line[1:]])  # 从line中读取出bbox，bbox可以有多个

        scale = min(w / iw, h / ih)
        nw = int(iw * scale)
        nh = int(ih * scale)
        dx = (w - nw) // 2
        dy = (h - nh) // 2

        # 在多余部分添加颜色
        image = image.resize((nw, nh), Image.BICUBIC)
        new_image = Image.new('RGB', (w, h), (128, 128, 128))
        new_image.paste(image, (dx, dy))
        image_data = np.array(new_image, np.float32)

        # 对bbox进行调整
        if len(box) > 0:
            np.random.shuffle(box)  # 随机打乱一下bbox，提高健壮性
            box[:, [0, 2]] = box[:, [0, 2]] * nw / iw + dx
            box[:, [1, 3]] = box[:, [1, 3]] * nh / ih + dy
            box[:, 0:2][box[:, 0:2] < 0] = 0
            box[:, 2][box[:, 2] > w] = w
            box[:, 3][box[:, 3] > h] = h
            box_w = box[:, 2] - box[:, 0]
            box_h = box[:, 3] - box[:, 1]
            box = box[np.logical_and(box_w > 1, box_h > 1)]

        return image_data, box


with open(config.TRAIN_DATA_PATH, encoding='utf-8') as f:
    train_lines = f.readlines()
with open(config.TEST_DATA_PATH, encoding='utf-8') as f:
    test_lines = f.readlines()

train_set = VOCDataset(train_lines)
train_data = torch.utils.data.DataLoader(train_set, batch_size=config.BATCH_SIZE, shuffle=True,
                                         num_workers=4, collate_fn=voc_dataset_collate)  # 2线程读取数据

test_set = VOCDataset(test_lines)

test_data = torch.utils.data.DataLoader(test_set, batch_size=2 * config.BATCH_SIZE, shuffle=False, num_workers=4,
                                        collate_fn=voc_dataset_collate)
