from torch.utils.data import Dataset
import os
import cv2
import numpy as np
from torchvision import transforms
import matplotlib.pyplot as plt


class MyDataset(Dataset):
    def __init__(self, is_train=True, is_aug=True, dataset_path="", input_size=448, num_bbox=None, classes=None):
        """

        :param is_train: 训练  or 测试
        :param is_aug: 是否进行数据曾挂
        """

        self.filenames = []

        if is_train:
            with open(os.path.join(dataset_path, "train.txt"), 'r') as f:
                self.filenames = [x.strip() for x in f]
        else:
            with open(os.path.join(dataset_path, "val.txt"), 'r') as f:
                self.filenames = [x.strip() for x in f]
        self.img_path = os.path.join(dataset_path, "images")
        self.label_path = os.path.join(dataset_path, 'labels')
        self.is_aug = is_aug
        self.input_size = input_size
        self.num_bbox = num_bbox
        self.classes = classes

    def __len__(self):
        return len(self.filenames)

    def __getitem__(self, item):
        img = cv2.imread(os.path.join(self.img_path, self.filenames[item] + ".jpg"))
        img_h, img_w = img.shape[0:2]
        input_size = self.input_size
        pad_w, pad_h = 0, 0
        if img_h > img_w:
            pad_w = (img_h - img_w) // 2
            img = np.pad(img, ((0, 0), (pad_w, pad_w), (0, 0)), 'constant', constant_values=0)
        elif img_w > img_h:
            pad_h = (img_w - img_h) // 2
            img = np.pad(img, ((pad_h, pad_h), (0, 0), (0, 0)), 'constant', constant_values=0)
        img = cv2.resize(img, (input_size, input_size), interpolation=cv2.INTER_AREA)

        if self.is_aug:
            aug = transforms.Compose([
                transforms.ToTensor()
            ])
            img = aug(img)
       

        with open(os.path.join(self.label_path, self.filenames[item] + '.txt')) as f:
            bbox = f.read().split('\n')
        bbox = [x.split() for x in bbox]
        bbox = [float(x) for y in bbox for x in y]
        if len(bbox) % 5 != 0:
            raise ValueError("File:" + self.labelpath + self.filenames[item] + ".txt" + "——bbox Extraction Error!")

        # 根据padding、图像增广等操作，将原始的bbox数据转换为修改后图像的bbox数据
        for i in range(len(bbox) // 5):
            if pad_w != 0:
                bbox[i * 5 + 1] = (bbox[i * 5 + 1] * img_w + pad_w) / img_h
                bbox[i * 5 + 3] = (bbox[i * 5 + 3] * img_w) / img_h
            elif pad_h != 0:
                bbox[i * 5 + 2] = (bbox[i * 5 + 2] * img_h + pad_h) / img_w
                bbox[i * 5 + 4] = (bbox[i * 5 + 4] * img_h) / img_w

        labels = convert_bbox2labels(bbox, num_bbox=self.num_bbox, classes=self.classes)
        labels = transforms.ToTensor()(labels)
#         print(labels)
        plt.imshow(img)
        return img, labels


def convert_bbox2labels(bbox, num_bbox, classes):
    """
    :param bbox:
    :return:
    将bbox的(cls, x, y, w, h)数据转换为寻来你是方便计算Loss的数据形式(7, 7, 5*B + cls_num)
    注意，输入的bbox的信息是(xc,yc,w,h)格式的，转换为labels后，bbox的信息转换为了(px,py,w,h)格式
    """

    gridsize = 1.0 / 7
    labels = np.zeros((7, 7, 5 * num_bbox + len(classes)))  # 注意，此处需要根据不同数据集的类别个数进行修改
    for i in range(len(bbox) // 5):
        gridx = int(bbox[i * 5 + 1] // gridsize)  # 当前bbox中心落在第gridx个网格,列
        gridy = int(bbox[i * 5 + 2] // gridsize)  # 当前bbox中心落在第gridy个网格,行
        # (bbox中心坐标 - 网格左上角点的坐标)/网格大小  ==> bbox中心点的相对位置
        gridpx = bbox[i * 5 + 1] / gridsize - gridx
        gridpy = bbox[i * 5 + 2] / gridsize - gridy
        # 将第gridy行，gridx列的网格设置为负责当前ground truth的预测，置信度和对应类别概率均置为1
        labels[gridy, gridx, 0:5] = np.array([gridpx, gridpy, bbox[i * 5 + 3], bbox[i * 5 + 4], 1])
        labels[gridy, gridx, 5:10] = np.array([gridpx, gridpy, bbox[i * 5 + 3], bbox[i * 5 + 4], 1])
        labels[gridy, gridx, 10 + int(bbox[i * 5])] = 1
    return labels
