import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../'))
import cv2
import numpy as np
import torch
import time
from PIL import Image
from torchvision import transforms as FT
from vortex.utils.bbox import non_max_suppression
from vortex.network.layers.yolo_body import YoloBody
from vortex.network.head.yolov3 import YoloDetect


class Yolov3Inference(object):
    def __init__(self, weights_path, num_classes=80):
        anchors = [[[116, 90], [156, 198], [373, 326]],
                    [[30, 61], [62, 45], [59, 119]],
                    [[10, 13], [16, 30], [33, 23]]]
        image_size = (416, 416)
        
        # construct model
        self.model = YoloBody(anchors, num_classes)
        state_dict = torch.load(weights_path, map_location='cpu')
        self.model.load_state_dict(state_dict)

        self.heads = []
        for i in range(3):
            self.heads.append(YoloDetect(anchors[i], num_classes, image_size))
        
        self.image_size = image_size
        self.num_classes = num_classes

        # construct image transformations
        resize = FT.Resize(self.image_size)
        # resize = Resize(self.image_size)
        to_tensor = FT.ToTensor()
        normalize = FT.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        self.transforms = FT.Compose([resize, to_tensor, normalize])

    def preprocess(self, image):
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        image = cv2.resize(image, (self.image_size[0], self.image_size[1]), 
                           interpolation=cv2.INTER_LINEAR)
        image = image.astype(np.float32)
        image = image / 255.0
        image = np.transpose(image, (2, 0, 1))
        image = torch.from_numpy(image)
        image = image.unsqueeze(0)
        return image

    def detect(self, image_path, method='cv', display=True):
        # load 
        if method == 'cv':
            image = cv2.imread(image_path)
            inputs = self.preprocess(image)
        else:
            image = Image.open(image_path, mode='r')
            image = image.convert('RGB')
            inputs = self.transforms(image)
            inputs = inputs.unsqueeze(0)
            # convert image to cv2
            image = np.array(image)
            image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)

        original_height, original_width = image.shape[:2]

        start = time.time()
        with torch.no_grad():
            outputs = self.model(inputs)
        
        output_list = []
        for i in range(3):
            output_list.append(self.heads[i](outputs[i]))
        output = torch.cat(output_list, 1)
        batch_detections = non_max_suppression(output, self.num_classes, 
            conf_thres=0.5, nms_thres=0.45)
        outputs = batch_detections[0].cpu().numpy()
        
        end = time.time()
        print('detection time: {:.04f}'.format(end - start))

        if display:
            # for det in outputs:
            for x1, y1, x2, y2, conf, cls_conf, cls_pred in outputs:
                # print(x1, y1, x2, y2, conf, cls_conf, cls_pred)
                # get coordinates on original image
                x1 = int(x1 / self.image_size[0] * original_width)
                y1 = int(y1 / self.image_size[1] * original_height)
                x2 = int(x2 / self.image_size[0] * original_width)
                y2 = int(y2 / self.image_size[1] * original_height)
                cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
            cv2.imshow('image', image)
            cv2.waitKey(0)
        return outputs


if __name__ == '__main__':
    weights_path = './checkpoints/yolov3_pretrain.pth'
    infer = Yolov3Inference(weights_path, num_classes=80)
    image_path = './assets/000015.jpg'
    infer.detect(image_path, method='pil')
