# -*- coding: utf-8 -*-

import numpy as np
import torch
from torch.utils.data import DataLoader as torch_DataLoader

from voc_dataset import VOCDataset
from utils.rpn.generate_anchor_base import generate_anchor_base
from utils.rpn.enumerate_shifted_anchor import enumerate_shifted_anchor
from utils.rpn.anchor_target_creator import AnchorTargetCreator
from utils.rpn.backbone_network_output_size import get_backbone_network_output_size

from faster_rcnn_config import Config


class VOCCustomCollateFN(object):
    def __init__(self):
        self.anchor_base = generate_anchor_base()
        self.anchor_target_creator = AnchorTargetCreator()

    @staticmethod
    def get_bboxes_from_annotations(image_size, annotations):
        bbox = np.zeros(shape=[annotations.shape[0], 4], dtype=np.float32)
        h, w = image_size[0], image_size[1]
        # change annotations format to (y1, x1, y2, x2)
        a = annotations[:, 1:3]
        b = annotations[:, 3:5]
        temp = np.hstack([a - b / 2, a + b / 2])
        bbox[:, 0] = temp[:, 1] * h
        bbox[:, 1] = temp[:, 0] * w
        bbox[:, 2] = temp[:, 3] * h
        bbox[:, 3] = temp[:, 2] * w
        return bbox

    def __call__(self, batch_data):
        batch_images, batch_labels, batch_targets = list(), list(), list()
        for data in batch_data:
            image, annotations = data
            input_image_size = (image.size(1), image.size(2))
            bboxes = self.get_bboxes_from_annotations(input_image_size, annotations.numpy())
            output_feature_map_size = get_backbone_network_output_size(input_image_size)
            anchors = enumerate_shifted_anchor(self.anchor_base,
                                               feat_stride=16,
                                               height=output_feature_map_size[0],
                                               width=output_feature_map_size[1])
            loc, label = self.anchor_target_creator(bboxes, anchors, input_image_size)
            batch_images.append(image)
            batch_labels.append(torch.from_numpy(label).long())
            batch_targets.append(torch.from_numpy(loc))
        batch_labels = torch.cat(batch_labels, 0)
        batch_targets = torch.cat(batch_targets, 0)
        return batch_images, batch_labels, batch_targets


def get_voc_data_loader(batch_size):
    root_folder = Config.ROOT_FOLDER
    collate_fn = VOCCustomCollateFN()
    data_loader = torch_DataLoader(
        dataset=VOCDataset(root_folder),
        batch_size=batch_size,
        shuffle=False,
        drop_last=False,
        collate_fn=collate_fn)
    return data_loader
