"""
Model Inference using images
"""
import os
from PIL import Image, ImageDraw, ImageFont
import torch
import numpy as np
from torchvision import transforms
from vortex.models import get_model
from vortex.utils.visualize import visualize_detections_pil

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# ___________________________________________________________
# TEMPORARY
from vortex.data.labels import VOC_LABELS
label_map = {k: v + 1 for v, k in enumerate(VOC_LABELS)}
label_map['background'] = 0

rev_label_map = {v: k for k, v in label_map.items()}  # Inverse mapping

# Color map for bounding boxes of detected objects from https://sashat.me/2017/01/11/list-of-20-simple-distinct-colors/
distinct_colors = ['#e6194b', '#3cb44b', '#ffe119', '#0082c8', '#f58231', '#911eb4', '#46f0f0', '#f032e6',
                   '#d2f53c', '#fabebe', '#008080', '#000080', '#aa6e28', '#fffac8', '#800000', '#aaffc3', '#808000',
                   '#ffd8b1', '#e6beff', '#808080', '#FFFFFF']
label_color_map = {k: distinct_colors[i] for i, k in enumerate(label_map.keys())}


class ModelInference(object):
    def __init__(self, opts):
        # load model
        self.model = get_model(opts.MODEL)

        if opts.INFER.TRAINED_WEIGHTS:
            # load trained weights
            state_dict = torch.load(opts.INFER.TRAINED_WEIGHTS, map_location='cpu')
            self.model.load_state_dict(state_dict)
        self.model.to(device)

        # load image transforms
        self.transforms = self.model.get_inference_transform()

        # construct label maps
        self.labels = opts.DATA.CLASS_NAMES
        if opts.DATA.BG0:
            self.labels.insert(0, 'background')
        
    def detect(self, image_path):
        original_image = Image.open(image_path, mode='r')
        original_image = original_image.convert('RGB')

        image = self.transforms(original_image)
        image = image.to(device)
        inputs = image.unsqueeze(0)

        det_boxes, det_labels, det_scores = self.model.detect(inputs)

        # convert percent coordinates to pixel coordinates.
        original_dims = np.array([original_image.width, original_image.height, 
                                  original_image.width, original_image.height])
        det_boxes = det_boxes * original_dims

        # Decode class integer labels
        det_labels = [self.labels[l] for l in det_labels]

        # If no objects found, the detected labels will be set to ['0.'], i.e. ['background'] in SSD300.detect_objects() in model.py
        if det_labels == ['background']:
            # Just return original image
            return original_image

        display_image = visualize_detections_pil(original_image, det_boxes, det_labels, label_color_map)
        display_image.show()

        return det_boxes, det_labels
