import sys
sys.path.append("../acllite")
import os
import numpy as np
import acl
import cv2 as cv
import cv2
from PIL import Image
import constants as const
from acllite_model import AclLiteModel
from acllite_resource import AclLiteResource
# from collections import defaultdict
import math
import random


labels =["person",  "bicycle", "car", "motorbike", "aeroplane",
        "bus", "train", "truck", "boat", "traffic light",
        "fire hydrant", "stop sign", "parking meter", "bench","bird", 
        "cat", "dog", "horse", "sheep", "cow", 
        "elephant",  "bear", "zebra", "giraffe", "backpack",
        "umbrella", "handbag",  "tie", "suitcase", "frisbee",
        "skis", "snowboard", "sports ball",  "kite", "baseball bat",
        "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle",
        "wine glass", "cup", "fork", "knife", "spoon",
        "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog",
        "pizza", "donut", "cake", "chair", "sofa", "potted plant", "bed", "dining table",
        "toilet", "TV monitor", "laptop", "mouse", "remote", "keyboard", "cell phone",
        "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase",
        "scissors", "teddy bear", "hair drier", "toothbrush"]

img_size = 640
conf_thres = 0.2
iou_thres = 0.3


def detect(prediction, conf_thres=0.25, iou_thres=0.45):
    nc = prediction.shape[2] - 5          # number of classes
    xc = prediction[..., 4] > conf_thres  # candidates
    min_wh, max_wh = 2, 4096  # (pixels) minimum and maximum box width and height
    max_det = 300  # maximum number of detections per image
    max_nms = 30000  # maximum number of boxes into torchvision.ops.nms()

    output = [np.zeros((0, 6))] * prediction.shape[0]
    for xi, x in enumerate(prediction):  # image index, image inference
        x = x[xc[xi]]  # confidence

        # If none remain process next image
        if not x.shape[0]:
            continue

        # Scale class with conf
        if nc == 1:
            x[:, 5:5+nc] = x[:, 4:5]  # signle cls no need to multiplicate.
        else:
            x[:, 5:5+nc] *= x[:, 4:5]  # conf = obj_conf * cls_conf

        # Box (center x, center y, width, height) to (x1, y1, x2, y2)
        box = xywh2xyxy(x[:, :4])

        # Detections matrix nx6 (xyxy, conf, cls)
        i, j = (x[:, 5:5+nc] > conf_thres).nonzero()
        x = np.concatenate((box[i], x[i, j + 5, None], j[:, None].astype(np.float32)), 1)

        # Check shape
        n = x.shape[0]  # number of boxes
        if not n:  # no boxes
            continue
        elif n > max_nms:  # excess boxes
            x = x[x[:, 4].argsort()[-max_nms:]]  # sort by confidence

        # Batched NMS
        c = x[:, 5:6] * max_wh  # classes
        boxes, scores = x[:, :4] + c, x[:, 4]  # boxes (offset by class), scores

        i = nms(boxes, scores, iou_thres)  # NMS for per sample

        if i.shape[0] > max_det:  # limit detections
            i = i[:max_det]
        output[xi] = x[i]
    return output


def nms(xyxys, scores, threshold):
    x1 = xyxys[:, 0]
    y1 = xyxys[:, 1]
    x2 = xyxys[:, 2]
    y2 = xyxys[:, 3]
    scores = scores
    # areas = (x2 - x1 + 1) * (y2 - y1 + 1)
    areas = (x2 - x1) * (y2 - y1)
    order = scores.argsort()[::-1]
    reserved_boxes = []
    while order.size > 0:
        i = order[0]
        reserved_boxes.append(i)
        max_x1 = np.maximum(x1[i], x1[order[1:]])
        max_y1 = np.maximum(y1[i], y1[order[1:]])
        min_x2 = np.minimum(x2[i], x2[order[1:]])
        min_y2 = np.minimum(y2[i], y2[order[1:]])

        # intersect_w = np.maximum(0.0, min_x2 - max_x1 + 1)
        # intersect_h = np.maximum(0.0, min_y2 - max_y1 + 1)
        intersect_w = np.maximum(0.0, min_x2 - max_x1)
        intersect_h = np.maximum(0.0, min_y2 - max_y1)
        intersect_area = intersect_w * intersect_h

        ovr = intersect_area / (areas[i] + areas[order[1:]] - intersect_area + 1e-6)
        indexes = np.where(ovr <= threshold)[0]
        order = order[indexes + 1]
    return np.array(reserved_boxes)


def box_iou(box1, box2):
    def box_area(box):
        # box = 4xn
        return (box[2] - box[0]) * (box[3] - box[1])

    area1 = box_area(box1.T)
    area2 = box_area(box2.T)

    # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
    inter = (
        (np.minimum(box1[:, None, 2:], box2[:, 2:]) - np.maximum(box1[:, None, :2], box2[:, :2])).clip(0, None).prod(2)
    )
    return inter / (area1[:, None] + area2 - inter)  # iou = inter / (area1 + area2 - inter)

def xyxy2xywh(x):
    # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
    y = np.copy(x)
    y[:, 0] = (x[:, 0] + x[:, 2]) / 2  # x center
    y[:, 1] = (x[:, 1] + x[:, 3]) / 2  # y center
    y[:, 2] = x[:, 2] - x[:, 0]  # width
    y[:, 3] = x[:, 3] - x[:, 1]  # height
    return y

def xywh2xyxy(x):
    # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
    y = np.copy(x)
    y[:, 0] = x[:, 0] - x[:, 2] / 2  # top left x
    y[:, 1] = x[:, 1] - x[:, 3] / 2  # top left y
    y[:, 2] = x[:, 0] + x[:, 2] / 2  # bottom right x
    y[:, 3] = x[:, 1] + x[:, 3] / 2  # bottom right y
    return y

def main():
    image_path = "../data/test.jpg"
    img = cv.imread(image_path)
    ori_img = img.copy()

    h_ori, w_ori = img.shape[:2]
    interp = cv2.INTER_AREA
    img = cv2.resize(img, (img_size, img_size), interpolation=interp)
    h, w = img.shape[:2]

    n_img = img.astype(np.float32).copy()
    n_img = n_img.transpose(2, 0, 1) / 255.0
    
    acl_resource = AclLiteResource()
    acl_resource.init()
    model = AclLiteModel("../model/yolov7.om")

    result = model.execute([n_img,])
    out = detect(result[0], conf_thres=conf_thres, iou_thres=iou_thres)
    
    total_category_ids, total_bboxes, total_scores = [], [], []

    for si, pred in enumerate(out):
        if len(pred) == 0:
            continue
        predn = np.copy(pred)
        box = xyxy2xywh(predn[:, :4])  # xywh
        box[:, :2] -= box[:, 2:] / 2  # xy center to top-left corner
        category_ids, bboxes, scores = [], [], []
        for p, b in zip(pred.tolist(), box.tolist()):
            category_ids.append(int(p[5]))
            bboxes.append([round(x, 3) for x in b])
            scores.append(round(p[4], 5))

        total_category_ids.extend(category_ids)
        total_bboxes.extend(bboxes)
        total_scores.extend(scores)
    
    # 画图，后处理的结果，与业务系统对接（可以生成json的方式）
    for i in range(len(total_bboxes)):
        x_l, y_t, w, h = total_bboxes[i][:]
        x_l,w = x_l * w_ori / img_size, w * w_ori / img_size
        y_t,h = y_t * h_ori / img_size, h * h_ori / img_size

        x_r, y_b = x_l + w, y_t + h
        x_l, y_t, x_r, y_b = int(x_l), int(y_t), int(x_r), int(y_b)
        _color = [random.randint(0, 255) for _ in range(3)]
        cv2.rectangle(ori_img, (x_l, y_t), (x_r, y_b), tuple(_color), 2)

        class_name_index = total_category_ids[i]
        class_name = labels[class_name_index] 
        text = f"{class_name}: {total_scores[i]}"
        (text_w, text_h), baseline = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 2)
        cv2.rectangle(ori_img, (x_l, y_t - text_h - baseline), (x_l + text_w, y_t), tuple(_color), -1)
        cv2.putText(ori_img, text, (x_l, y_t - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2)
    
    cv2.imwrite("../out/" + "out.jpg", ori_img)
    
if __name__ == '__main__':
    main()