#!/usr/bin/env python

from torch.utils.data import Dataset
from torchvision.io import read_image, torch
import torchvision.datasets as datasets
import xml.etree.ElementTree as ET

# VOC_CLASSES = [
#         '__background__',
#         'aeroplane' , 'bicycle'   , 'bird'        , 'boat'        ,
#         'bottle'    , 'bus'       , 'car'         , 'cat'         ,
#         'chair'     , 'cow'       , 'diningtable' , 'dog'         ,
#         'horse'     , 'motorbike' , 'person'      , 'pottedplant' ,
#         'sheep'     , 'sofa'      , 'train'       , 'tvmonitor'   ,
#     ]
VOC_CLASSES = [ '__background__', 'person' ]

def class2int(name: str):
    if name in VOC_CLASSES:
        return VOC_CLASSES.index(name)
    else:
        return -1

class PascalVOCDataset(Dataset):

    def __init__(self, root, year, image_set, transform=None, target_transform=None):
        self.voc_dataset = datasets.VOCDetection(
                root=root,
                year=year,
                image_set=image_set,
                download=False
                )
        self.transform = transform
        self.target_transform = target_transform

    def __len__(self):
        return len(self.voc_dataset)


    def __getitem__(self, index):
        image_path = self.voc_dataset.images[index]
        annotation_file_path = self.voc_dataset.annotations[index]
        
        root = ET.ElementTree().parse(open(annotation_file_path, 'r'))
        objs = self.voc_dataset.parse_voc_xml(node=root)['annotation']['object']

        labels = []
        boxes = []

        for item in objs:
            name = item['name']
            bbox = item['bndbox']
            xmin, ymin = int(bbox['xmin']), int(bbox['ymin'])
            xmax, ymax = int(bbox['xmax']), int(bbox['ymax'])
            bbox = torch.tensor([xmin, ymin, xmax, ymax]).to(torch.float64)
            labels.append(name)
            boxes.append(bbox)

        # 将标签从字符串转化为数字
        labels = torch.tensor([ class2int(x) for x in labels]).to(torch.int64)
        image = read_image(image_path)
        image = image / 255
        boxes = torch.stack(boxes)
        target = {'labels': labels, 'boxes': boxes, 'path': image_path}
        if self.transform != None:
            image = self.transform(image)
        if self.target_transform != None:
            target = self.target_transform(target)

        return image, target
