"""
implementation of dataset format as keras-yolo3
    a single text file for annotations where each line for image path and box coordinates.
"""
import os
import cv2
import numpy as np
from PIL import Image
import torch.utils.data as data


class YoloV3Dataset(data.Dataset):
    """
    TODO: add random image augmentation.
    """
    def __init__(self, image_text, image_size, mosaic=True, is_train=True):
        """
        dataset format as Keras-Yolov3 
        """
        self.data = self.read_data(image_text)

        self.image_size = image_size
        self.mosaic = mosaic
    
    def __getitem__(self, index):
        image_path, boxes = self.data[index]
        return self.get_data(image_path, boxes, self.image_size, augment=False)
    
    def letterbox_resize(self, image, boxes, input_size=(608, 608)):
        iw, ih = image.size
        w, h = input_size
        scale = min(w/iw, h/ih)
        nw = int(iw * scale)
        nh = int(ih * scale)
        dx = (w - nw) // 2
        dy = (h - nh) // 2
        image = image.resize((nw, nh), Image.BICUBIC)
        new_image = Image.new('RGB', (w, h), (128, 128, 128))
        new_image.paste(image, (dx, dy))
        image_data = np.array(new_image, np.float32)
        
        nboxes = len(boxes)
        box_data = np.zeros(nboxes, 5)
        if nboxes > 0:
            np.random.shuffle(boxes)
            boxes[:, [0, 2]] = boxes[:, [0, 2]] * nw / iw + dx
            boxes[:, [1, 3]] = boxes[:, [1, 3]] * nh / ih + dy
            boxes[:, 0:2][box[:, 0:2] < 0] = 0
            boxes[:, 2][boxes[:, 2] >= w] = w - 1
            boxes[:, 3][boxes[:, 3] >= h] = h - 1
            box_w = box[:, 2] - box[:, 0]
            box_h = box[:, 3] - box[:, 1]
            boxes = boxes[np.logical_and(box_w > 1, box_h > 1)]
            box_data[:len(boxes)] = boxes
        return image_data, box_data 

    def get_data(self, image_path, boxes, input_size, augment=True):
        # real time data augmentation
        image = Image.open(image_path)
        return self.letterbox_resize(image, boxes, input_size)
    
    def read_data(self, text_path):
        data = []
        with open(text_path, 'r') as f:
            for line in f.readlines():
                vec = line.strip().split()
                image_path = vec[0]
                if not os.path.exists(image_path):
                    continue
                boxes = np.array([np.array(list(map(int, b.split(',')))) for b in vec[1:]])
                data.append([image_path, boxes])
        return data
