"""
ultralytics dataset format
    This dataset is for yolo series, where num_classes in model definition
    is equal to the actual number of classes, i.e. no background label is needed.
"""
import os
import torch.utils.data as data


class UltralyticsDataset(data.Dataset):
    supported_formats = ['jpg', 'png', 'jpeg', ]
    def __init__(self, image_folder, transform=None):
        # load image and annotations
        self.image_paths = []
        self.label_paths = []
        for name in os.listdir(image_folder):
            ext = name.split('.')[-1]
            if ext not in self.supported_formats:
                continue
            image_path = os.path.join(image_folder, name)
            label_path = image_path.replace('images', 'labels')
            self.image_paths.append(image_path)
            self.label_paths.append(label_path)
        
        self.transform = transform

    def __getitem__(self, index):
        image_path = self.image_paths[index]
        label_path = self.label_paths[index]
        image = Image.open(image_path, mode='r')
        image = image.convert('RGB')

        boxes, labels = self.load_annotation(label_path)
        if self.transform is not None:
            image, boxes, labels = self.transform(image, boxes, labels)
        return image, boxes, labels

    def __len__(self):
        return len(self.image_paths)

    def load_annotation(self, label_path):
        boxes = []
        labels = []
        with open(label_path, 'r') as f:
            for line in f.readlines():
                vec = line.strip().split()
                if len(vec) != 5:
                    continue
                label = int(vec[0])
                cx, cy, w, h = vec[1:]
                x1 = cx - w / 2
                y1 = cy - h / 2
                x2 = cx + w / 2
                y2 = cy + h / 2
                boxes.append([x1, y1, x2, y2])
                labels.append(label)
        return boxes, labels

    def collate_fn(self, batch):
        images = []
        boxes = []
        labels = []
        for b in batch:
            images.append(b[0])
            boxes.append(b[1])
            labels.append(b[2])
        images = torch.stack(images, dim=0)
        return images, boxes, labels
