import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../'))
import torch
from torchvision import transforms
from PIL import Image, ImageDraw, ImageFont
from vortex.data.labels import VOC_LABELS
from vortex.models.ssd300 import SSD300


device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

label_map = {k: v + 1 for v, k in enumerate(VOC_LABELS)}
label_map['background'] = 0

rev_label_map = {v: k for k, v in label_map.items()}  # Inverse mapping

# Color map for bounding boxes of detected objects from https://sashat.me/2017/01/11/list-of-20-simple-distinct-colors/
distinct_colors = ['#e6194b', '#3cb44b', '#ffe119', '#0082c8', '#f58231', '#911eb4', '#46f0f0', '#f032e6',
                   '#d2f53c', '#fabebe', '#008080', '#000080', '#aa6e28', '#fffac8', '#800000', '#aaffc3', '#808000',
                   '#ffd8b1', '#e6beff', '#808080', '#FFFFFF']
label_color_map = {k: distinct_colors[i] for i, k in enumerate(label_map.keys())}


class SSDInference(object):
    def __init__(self, checkpoint_path):
        # load model
        if checkpoint_path.endswith('.pth.tar'):
            # Load model checkpoint
            checkpoint = torch.load(checkpoint_path, map_location='cpu')
            start_epoch = checkpoint['epoch'] + 1
            # print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)
            model = checkpoint['model']
            model = model.to(device)
            model.eval()
        else:
            model = SSD300(n_classes=len(VOC_LABELS) + 1)
            state_dict = torch.load(checkpoint_path, map_location='cpu')
            model.load_state_dict(state_dict)
            model.eval()
        
        self.model = model

        # Transforms
        self.resize = transforms.Resize((300, 300))
        self.to_tensor = transforms.ToTensor()
        self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])
        
        # parameters
        self.min_score = 0.2
        self.max_overlap = 0.5
        self.top_k = 200

    def detect(self, image_path, suppress=None):
        original_image = Image.open(image_path, mode='r')
        original_image = original_image.convert('RGB')
        image = self.normalize(self.to_tensor(self.resize(original_image)))
        image = image.to(device)
        image = image.unsqueeze(0)
        print(image)

        # Forward prop.
        predicted_locs, predicted_scores = self.model(image)

        # Detect objects in SSD output
        det_boxes, det_labels, det_scores = self.model.detect_objects(predicted_locs, predicted_scores, min_score=self.min_score,
                                                                 max_overlap=self.max_overlap, top_k=self.top_k)

        # Move detections to the CPU
        det_boxes = det_boxes[0].to('cpu')

        # Transform to original image dimensions
        original_dims = torch.FloatTensor(
            [original_image.width, original_image.height, original_image.width, original_image.height]).unsqueeze(0)
        det_boxes = det_boxes * original_dims

        # Decode class integer labels
        det_labels = [rev_label_map[l] for l in det_labels[0].to('cpu').tolist()]

        # If no objects found, the detected labels will be set to ['0.'], i.e. ['background'] in SSD300.detect_objects() in model.py
        if det_labels == ['background']:
            # Just return original image
            return original_image

        # Annotate
        annotated_image = original_image
        draw = ImageDraw.Draw(annotated_image)
        font = ImageFont.truetype("./calibril.ttf", 15)

        # Suppress specific classes, if needed
        for i in range(det_boxes.size(0)):
            if suppress is not None:
                if det_labels[i] in suppress:
                    continue

            # Boxes
            box_location = det_boxes[i].tolist()
            draw.rectangle(xy=box_location, outline=label_color_map[det_labels[i]])
            draw.rectangle(xy=[l + 1. for l in box_location], outline=label_color_map[
                det_labels[i]])  # a second rectangle at an offset of 1 pixel to increase line thickness
            # draw.rectangle(xy=[l + 2. for l in box_location], outline=label_color_map[
            #     det_labels[i]])  # a third rectangle at an offset of 1 pixel to increase line thickness
            # draw.rectangle(xy=[l + 3. for l in box_location], outline=label_color_map[
            #     det_labels[i]])  # a fourth rectangle at an offset of 1 pixel to increase line thickness

            # Text
            text_size = font.getsize(det_labels[i].upper())
            text_location = [box_location[0] + 2., box_location[1] - text_size[1]]
            textbox_location = [box_location[0], box_location[1] - text_size[1], box_location[0] + text_size[0] + 4.,
                                box_location[1]]
            draw.rectangle(xy=textbox_location, fill=label_color_map[det_labels[i]])
            draw.text(xy=text_location, text=det_labels[i].upper(), fill='white',
                      font=font)
        del draw

        return annotated_image


if __name__ == '__main__':
    img_path = 'assets/000015.jpg'
    weights_path = 'checkpoints/ssd300_voc.pth'
    # weights_path = 'checkpoints/model_epoch0350_new.pth'
    # weights_path = 'checkpoints/SSD_epoch0350.pth'
    detector = SSDInference(weights_path)
    result = detector.detect(img_path)
    result.show()
