# -*- encoding:utf-8 -*-
import os

import cv2
import numpy as np

import torch
from torch.autograd import Variable
import torchvision.transforms as transforms

from backbones.resnet import resnet50

os.environ['CUDA_VISIBLE_DEVICES']='2'

VOC_CLASSES = (
    'aeroplane', 'bicycle', 'bird', 'boat',
    'bottle', 'bus', 'car', 'cat', 'chair',
    'cow', 'diningtable', 'dog', 'horse',
    'motorbike', 'person', 'pottedplant',
    'sheep', 'sofa', 'train', 'tvmonitor'
    )

Color = [
            [0, 0, 0],
            [128, 0, 0],
            [0, 128, 0],
            [128, 128, 0],
            [0, 0, 128],
            [128, 0, 128],
            [0, 128, 128],
            [128, 128, 128],
            [64, 0, 0],
            [192, 0, 0],
            [64, 128, 0],
            [192, 128, 0],
            [64, 0, 128],
            [192, 0, 128],
            [64, 128, 128],
            [192, 128, 128],
            [0, 64, 0],
            [128, 64, 0],
            [0, 192, 0],
            [128, 192, 0],
            [0, 64, 128]
         ]


def decoder(pred, conf_thresh, prob_thresh):
    """ Decode tensor into box coordinates, box_conf and cls_label
    Args:
        pred (tensor): tensor to decode sized [S, S, 5xB + C], 5=(x,y,w,h,conf)
    Returns:
        boxes: (tensor) [[x1, y1, x2, y2]_obj1, ...] Normalized from 0.0 to 1.0. image width/height, sized [n_boxes, 4]
        confidences: (tensor) objectness confidences for each detected box, sized [n_boxes, ]
        cls_scores: (tensor) scores for most likely cls for each detected box, sized [n_boxes, ]
    """
    S, B, C = 7, 2, 20
    boxes, labels, confidences, cls_scores = [], [], [], []

    cell_size = 1.0 / float(S)
    pred = pred.data
    pred = pred.squeeze(0) #7x7x30
    conf = pred[:, :, 4].unsqueeze(2)  # [s, s, 1]
    for b in range(1, B):
        conf = torch.cat((conf, pred[:, :, 5*b + 4].unsqueeze(2)), 2)
    conf_mask = conf > conf_thresh  # [S, S, B]

    # TBM, further optimization may be possible by replacing
    # the following for-loops with tensor operations.
    for i in range(S):
        for j in range(S):
            cls_score, cls_label = torch.max(pred[j, i, 5*B:], 0)
            print(cls_score)
            for b in range(B):
                conf = pred[j, i, 5*b + 4]
                print(conf)
                prob = cls_score
                if float(prob) < prob_thresh:
                    continue

                # compute box corner (x1, y1, x2, y2) from tensor
                box = pred[j, i, 5*b:5*b+4]
                # cell left_top corner. normalized from 0.0 to 1.0
                x0y0_normalized = torch.FloatTensor([i, j]) * cell_size
                # box center. normalized from 0.0 to 1.0
                xy_normalized = box[:2] * cell_size + x0y0_normalized
                wh_normalized = box[2:]
                box_xyxy = torch.FloatTensor(4)
                box_xyxy[:2] = xy_normalized - 0.5 * wh_normalized
                box_xyxy[2:] = xy_normalized + 0.5 * wh_normalized

                # Append result to the lists
                boxes.append(box_xyxy)
                labels.append(cls_label)
                confidences.append(conf)
                cls_scores.append(cls_score)
        if len(boxes) > 0:
            boxes = torch.stack(boxes, 0)  # [n_boxes, 4]
            labels = torch.stack(labels, 0)  # [n_boxes, ]
            confidences = torch.stack(confidences, 0)  # [n_boxes, ]
            cls_scores = torch.stack(cls_scores, 0)  # [n_boxes, ]
        else:
            # If no box found, return empty tensors.
            boxes = torch.FloatTensor(0, 4)
            labels = torch.LongTensor(0)
            confidences = torch.FloatTensor(0)
            cls_scores = torch.FloatTensor(0)

        return boxes, labels, confidences, cls_scores



def nms(bboxes, scores, threshold=0.5):
    """
    :param bboxes: (tensor) [N, 4]
    :param scores: (tensor) [N,]
    :param threshold: iou_thresh
    :return: retain: (tensor) [n,]
    """
    x1 = bboxes[:, 0]
    y1 = bboxes[:, 1]
    x2 = bboxes[:, 2]
    y2 = bboxes[:, 3]
    boxes_area = (x2 - x1) * (y2 - y1)

    _, order = scores.sort(0, descending=True)
    retain = []
    while order.numel() > 0:
        idx = order[0]
        retain.append(idx)

        if 1 == order.numel():
            break

        overlap_x_lt = x1[order[1:]].clamp(min=x1[idx])
        overlap_y_lt = y1[order[1:]].clamp(min=y1[idx])
        overlap_x_rb = x2[order[1:]].clamp(max=x2[idx])
        overlap_y_rb = y2[order[1:]].clamp(max=y2[idx])

        overlap_w = (overlap_x_rb - overlap_x_lt).clamp(min=0)
        overlap_h = (overlap_y_rb - overlap_y_lt).clamp(min=0)

        overlap_area = overlap_w * overlap_h

        overlap_ratio = overlap_area / (boxes_area[idx] + boxes_area[order[1:]] - overlap_area)

        remain_ids = (overlap_ratio <= threshold).nonzero().squeeze()

        if 0 == remain_ids.numel():
            break

        order = order[remain_ids + 1]

    return torch.LongTensor(retain)


def detect_gpu(model, img_name, root_path):
    result = []
    img = cv2.imread(root_path + img_name)
    h, w, _ = img.shape
    img = cv2.resize(img, (224, 224))
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    mean = (123, 117, 104)   #RGB
    img = img - np.array(mean, dtype = np.float32)

    transform = transforms.Compose([transforms.ToTensor(), ])
    img = transform(img)
    img = Variable(img[None, :, :, :])
    img = img.cuda()
    with torch.no_grad():
        pred = model(img)
    pred = pred.cpu()
    boxes, cls_indexs, confs, probs = decoder(pred, 0.1, 0.1)

    for i, box in enumerate(boxes):
        x_1 = int(box[0] * w)
        x_2 = int(box[2] * w)
        y_1 = int(box[1] * h)
        y_2 = int(box[3] * h)
        cls_index = cls_indexs[i]
        cls_index = int(cls_index)  # convert LongTensor to int
        prob = float(probs[i])
        result.append([(x_1, y_1), (x_2, y_2), VOC_CLASSES[cls_index], img_name, prob])
    return result



if __name__ == '__main__':
    model = resnet50()
    print("==== load model ====")
    model.load_state_dict(torch.load('yolov1_0712.pth'))
    model.eval()
    model.cuda()
    img_name = 'dog.jpg'
    img = cv2.imread(img_name)
    detect_result = detect_gpu(model, img_name, './')
    for lt, rb, cls_name, _, prob in detect_result:
        color = Color[VOC_CLASSES.index(cls_name)]
        cv2.rectangle(img, lt, rb, color, 2)
        label = cls_name + str(round(prob, 2))
        text_size, baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.4, 1)
        pos = (lt[0], lt[0]-text_size[1])
        cv2.rectangle(img, (pos[0]-2//2, pos[1]-2-baseline), (pos[0]+text_size[0], pos[1]+text_size[1]), color, -1)
        cv2.putText(img, label, (pos[0], pos[1]+baseline), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1, 8)

    cv2.imwrite('detect_result.jpg', img)

