# -*- coding: utf-8 -*-
"""
@author  : 秦丹峰
@contact : 1083610012@qq.com
@license : (C) Copyright, 广州海格星航信息科技有限公司
@time    : 2023/4/6 19:55
@file    : onnx_infer.py
@desc    :
"""
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import argparse
import os
import sys
import os.path as osp
import cv2
import torch
import numpy as np
import onnxruntime as ort
from math import exp
import torchvision.transforms as transforms
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from glob import glob
import json
import copy
import io
import contextlib
import itertools
from tabulate import tabulate
from tqdm import tqdm

ROOT = os.getcwd()
if str(ROOT) not in sys.path:
    sys.path.append(str(ROOT))


CLASSES = ['face', 'hand', 'cigarette', 'cellphone']

meshgrid = []

class_num = len(CLASSES)
headNum = 3
strides = [8, 16, 32]
# mapSize = [[80, 80], [40, 40], [20, 20]]  # [h, w]
mapSize = [[24, 40], [12, 20], [6, 10]]  # [h, w]
nmsThresh = 0.6
objectThresh = 0.05

# input_imgH = 640
# input_imgW = 640
input_imgH = 192
input_imgW = 320


class DetectBox:
    def __init__(self, classId, score, xmin, ymin, xmax, ymax):
        self.classId = classId
        self.score = score
        self.xmin = xmin
        self.ymin = ymin
        self.xmax = xmax
        self.ymax = ymax


def GenerateMeshgrid():
    for index in range(headNum):
        for i in range(mapSize[index][0]):
            for j in range(mapSize[index][1]):
                meshgrid.append(j + 0.5)
                meshgrid.append(i + 0.5)


def IOU(xmin1, ymin1, xmax1, ymax1, xmin2, ymin2, xmax2, ymax2):
    xmin = max(xmin1, xmin2)
    ymin = max(ymin1, ymin2)
    xmax = min(xmax1, xmax2)
    ymax = min(ymax1, ymax2)

    innerWidth = xmax - xmin
    innerHeight = ymax - ymin

    innerWidth = innerWidth if innerWidth > 0 else 0
    innerHeight = innerHeight if innerHeight > 0 else 0

    innerArea = innerWidth * innerHeight

    area1 = (xmax1 - xmin1) * (ymax1 - ymin1)
    area2 = (xmax2 - xmin2) * (ymax2 - ymin2)

    total = area1 + area2 - innerArea

    return innerArea / total


def NMS(detectResult):
    predBoxs = []

    sort_detectboxs = sorted(detectResult, key=lambda x: x.score, reverse=True)

    for i in range(len(sort_detectboxs)):
        xmin1 = sort_detectboxs[i].xmin
        ymin1 = sort_detectboxs[i].ymin
        xmax1 = sort_detectboxs[i].xmax
        ymax1 = sort_detectboxs[i].ymax
        classId = sort_detectboxs[i].classId

        if sort_detectboxs[i].classId != -1:
            predBoxs.append(sort_detectboxs[i])
            for j in range(i + 1, len(sort_detectboxs), 1):
                if classId == sort_detectboxs[j].classId:
                    xmin2 = sort_detectboxs[j].xmin
                    ymin2 = sort_detectboxs[j].ymin
                    xmax2 = sort_detectboxs[j].xmax
                    ymax2 = sort_detectboxs[j].ymax
                    iou = IOU(xmin1, ymin1, xmax1, ymax1, xmin2, ymin2, xmax2, ymax2)
                    if iou > nmsThresh:
                        sort_detectboxs[j].classId = -1
    return predBoxs


def sigmoid(x):
    return 1 / (1 + exp(-x))


def postprocess(out, img_h, img_w):
    print('postprocess ... ')

    detectResult = []
    output = []
    for i in range(len(out)):
        print(out[i].shape)
        output.append(out[i].reshape((-1)))

    scale_h = img_h / input_imgH
    scale_w = img_w / input_imgW

    gridIndex = -2

    for index in range(headNum):
        reg = output[index * 2 + 0]
        cls = output[index * 2 + 1]

        for h in range(mapSize[index][0]):
            for w in range(mapSize[index][1]):
                gridIndex += 2

                for cl in range(class_num):

                    cls_val = sigmoid(cls[cl * mapSize[index][0] * mapSize[index][1] + h * mapSize[index][1] + w])
                    if cls_val > objectThresh:
                        x1 = (meshgrid[gridIndex + 0] - reg[0 * mapSize[index][0] * mapSize[index][1] + h * mapSize[index][1] + w]) * strides[index]
                        y1 = (meshgrid[gridIndex + 1] - reg[1 * mapSize[index][0] * mapSize[index][1] + h * mapSize[index][1] + w]) * strides[index]
                        x2 = (meshgrid[gridIndex + 0] + reg[2 * mapSize[index][0] * mapSize[index][1] + h * mapSize[index][1] + w]) * strides[index]
                        y2 = (meshgrid[gridIndex + 1] + reg[3 * mapSize[index][0] * mapSize[index][1] + h * mapSize[index][1] + w]) * strides[index]

                        xmin = x1 * scale_w
                        ymin = y1 * scale_h
                        xmax = x2 * scale_w
                        ymax = y2 * scale_h

                        xmin = xmin if xmin > 0 else 0
                        ymin = ymin if ymin > 0 else 0
                        xmax = xmax if xmax < img_w else img_w
                        ymax = ymax if ymax < img_h else img_h

                        box = DetectBox(cl, cls_val, xmin, ymin, xmax, ymax)
                        print(cl, cls_val, xmin, ymin, xmax, ymax)
                        detectResult.append(box)
    # NMS
    print('detectResult:', len(detectResult))
    predBox = NMS(detectResult)

    return predBox


def precess_image(img_src, resize_w, resize_h):
    image = cv2.resize(img_src, (resize_w, resize_h), interpolation=cv2.INTER_LINEAR)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    image = image.astype(np.float32)
    image /= 255
    return image


def get_resize_matrix(raw_shape, dst_shape, keep_ratio):
    """
    Get resize matrix for resizing raw img to input size
    :param raw_shape: (width, height) of raw image
    :param dst_shape: (width, height) of input image
    :param keep_ratio: whether keep original ratio
    :return: 3x3 Matrix
    """
    r_w, r_h = raw_shape
    d_w, d_h = dst_shape
    Rs = np.eye(3)
    if keep_ratio:
        C = np.eye(3)
        C[0, 2] = - r_w / 2
        C[1, 2] = - r_h / 2

        if r_w / r_h < d_w / d_h:
            ratio = d_h / r_h
        else:
            ratio = d_w / r_w
        Rs[0, 0] *= ratio
        Rs[1, 1] *= ratio

        T = np.eye(3)
        T[0, 2] = 0.5 * d_w
        T[1, 2] = 0.5 * d_h
        return T @ Rs @ C
    else:
        Rs[0, 0] *= d_w / r_w
        Rs[1, 1] *= d_h / r_h
        return Rs


def to_numpy(tensor):
    return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()

# MAPE和SMAPE需要自己实现
def MAPE(y_true, y_pred):
    return np.mean(np.abs((y_pred - y_true) / (y_true+0.00000001))) * 100

def SMAPE(y_true, y_pred):
    return 2.0 * np.mean(np.abs(y_pred - y_true) / (np.abs(y_pred) + np.abs(y_true)++0.00000001)) * 100  # 2*(真实值-预测值)/(真实值+预测值)


def detect(img_path):
    from sklearn.metrics import mean_squared_error
    img = cv2.imread(img_path)
    ori_img = img.copy()
    img_height, img_width, _ = img.shape
    ResizeM = get_resize_matrix((img_width, img_height), (320,192), True)
    img = cv2.warpPerspective(img, ResizeM, dsize=(320,192))
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    print("ResizeM=", ResizeM)
    # cv2.imwrite("input.jpg", img)
    to_tensor = transforms.ToTensor()
    img = to_tensor(img)
    img = img.unsqueeze_(0)
    image = to_numpy(img)

    ort_session = ort.InferenceSession('./weights/yolov8n_ZQ.onnx')
    pred_results = (ort_session.run(None, {'data': image}))

    out_txt = ["boxes1", "cls1", "boxes2", "cls2", "boxes3", "cls3"]
    for i, res in enumerate(out_txt):
        with open(f"./runs/{res}.txt") as f:
            fea = f.read().splitlines()
            fea = [float(i) for i in fea]
            fea = np.array(fea)
            # print(fea)
            error = mean_squared_error(pred_results[i].flatten(), fea)
            mape = MAPE(pred_results[i].flatten(), fea)
            smape = SMAPE(pred_results[i].flatten(), fea)
            print(f"out{i} shape:{pred_results[i].shape}\t->\terror:{error:3f}\tmape:{mape:3f}\tsmape:{smape:3f}")

    out = []
    for i in range(len(pred_results)):
        out.append(pred_results[i])
    predbox = postprocess(out, img_height, img_width)

    print('obj num is :', len(predbox))

    for i in range(len(predbox)):
        xmin = int(predbox[i].xmin)
        ymin = int(predbox[i].ymin)
        xmax = int(predbox[i].xmax)
        ymax = int(predbox[i].ymax)
        classId = predbox[i].classId
        score = predbox[i].score

        cv2.rectangle(ori_img, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2)
        ptext = (xmin, ymin)
        title = CLASSES[classId] + "%.2f" % score
        cv2.putText(ori_img, title, ptext, cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2, cv2.LINE_AA)

    cv2.imwrite('./test_onnx_result.jpg', ori_img)


def get_effect(src_w, src_h, dst_w, dst_h, is_pad):
    eff_rect = {"x": 0, "y": 0, "w": dst_w, "h": dst_h, "isPad": False}
    src_ratio = float(src_w) / float(src_h)
    dst_ratio = float(dst_w) / float(dst_h)

    if is_pad and src_ratio > dst_ratio:
        # If the source image is wider than the target image, pad the height
        eff_rect["w"] = dst_w
        eff_rect["h"] = int(float(dst_w) / float(src_w) * src_h)
        if eff_rect["h"] % 2 > 0:
            eff_rect["h"] = eff_rect["h"] - 1
        eff_rect["x"] = 0
        eff_rect["y"] = int((dst_h - eff_rect["h"]) / 2.0)
        eff_rect["isPad"] = True
    elif is_pad and src_ratio < dst_ratio:
        # If the source image is taller than the target image, pad the width
        eff_rect["h"] = dst_h
        eff_rect["w"] = int(float(dst_h) / float(src_h) * src_w)
        if eff_rect["w"] % 2 > 0:
            eff_rect["w"] = eff_rect["w"] - 1
        eff_rect["y"] = 0
        eff_rect["x"] = int((dst_w - eff_rect["w"]) / 2.0)
        eff_rect["isPad"] = True

    return eff_rect

def get_mAP(pre_anno_path, gt_anno_path, classes=('face', 'hand', 'cigarette', 'cellphone')):
    # 创建标注数据集和预测结果数据集的实例
    cocoGt = COCO(gt_anno_path)
    coco_dets = cocoGt.loadRes(pre_anno_path)
    coco_eval = COCOeval(
        copy.deepcopy(cocoGt), copy.deepcopy(coco_dets), "bbox"
    )
    coco_eval.evaluate()
    coco_eval.accumulate()

    # use logger to log coco eval results
    redirect_string = io.StringIO()
    with contextlib.redirect_stdout(redirect_string):
        coco_eval.summarize()
    print("\n" + redirect_string.getvalue())

    # print per class AP
    headers = ["class", "AP50", "mAP"]
    colums = 6
    per_class_ap50s = []
    per_class_maps = []
    precisions = coco_eval.eval["precision"]
    # dimension of precisions: [TxRxKxAxM]
    # precision has dims (iou, recall, cls, area range, max dets)
    assert len(classes) == precisions.shape[2]

    for idx, name in enumerate(classes):
        # area range index 0: all area ranges
        # max dets index -1: typically 100 per image
        precision_50 = precisions[0, :, idx, 0, -1]
        precision_50 = precision_50[precision_50 > -1]
        ap50 = np.mean(precision_50) if precision_50.size else float("nan")
        per_class_ap50s.append(float(ap50 * 100))

        precision = precisions[:, :, idx, 0, -1]
        precision = precision[precision > -1]
        ap = np.mean(precision) if precision.size else float("nan")
        per_class_maps.append(float(ap * 100))

    num_cols = min(colums, len(classes) * len(headers))
    flatten_results = []
    for name, ap50, mAP in zip(classes, per_class_ap50s, per_class_maps):
        flatten_results += [name, ap50, mAP]

    row_pair = itertools.zip_longest(
        *[flatten_results[i::num_cols] for i in range(num_cols)]
    )
    table_headers = headers * (num_cols // len(headers))
    table = tabulate(
        row_pair,
        tablefmt="pipe",
        floatfmt=".1f",
        headers=table_headers,
        numalign="left",
    )
    print("\n" + table)

class ONNXDetector():
    def __init__(self, model_path='./runs/detect/train/weights/best_dict.onnx', input_shape=(320, 192)):
        self.ort_session = ort.InferenceSession(model_path)
        self.input_shape = input_shape
        GenerateMeshgrid()

    def __call__(self, img, is_path=False):
        ori_img, eff_rect, pred_results = self.infer(img, is_path)
        predBox, ori_img = self.post(ori_img, eff_rect, pred_results)
        return predBox, ori_img

    def infer(self, img, is_path=False):
        if is_path:
            img = cv2.imread(img)

        ori_img = img.copy()
        img_height, img_width, _ = img.shape
        eff_rect = get_effect(img_width, img_height, self.input_shape[0], self.input_shape[1], True)
        img = cv2.resize(img, (eff_rect["w"], eff_rect["h"]))
        img = cv2.copyMakeBorder(img, eff_rect["y"], eff_rect["y"], eff_rect["x"], eff_rect["x"], cv2.BORDER_CONSTANT, value=(0, 0, 0))

        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        cv2.imwrite("input.jpg", img)
        to_tensor = transforms.ToTensor()
        img = to_tensor(img)
        img = img.unsqueeze_(0)
        image = to_numpy(img)
        # print(image.shape)

        pred_results = (self.ort_session.run(None, {'data': image}))

        return ori_img, eff_rect, pred_results

    def post(self, ori_img, eff_rect, out):
        detectResult = []
        output = []
        for i in range(len(out)):
            output.append(out[i].reshape((-1)))

        img_h, img_w, _ = ori_img.shape
        scale_h = img_h / eff_rect["h"]
        scale_w = img_w / eff_rect["w"]

        gridIndex = -2

        for index in range(headNum):
            reg = output[index * 2 + 0]
            cls = output[index * 2 + 1]

            for h in range(mapSize[index][0]):
                for w in range(mapSize[index][1]):
                    gridIndex += 2

                    for cl in range(class_num):

                        cls_val = sigmoid(cls[cl * mapSize[index][0] * mapSize[index][1] + h * mapSize[index][1] + w])
                        if cls_val > objectThresh:
                            x1 = (meshgrid[gridIndex + 0] - reg[
                                0 * mapSize[index][0] * mapSize[index][1] + h * mapSize[index][1] + w]) * strides[index]
                            y1 = (meshgrid[gridIndex + 1] - reg[
                                1 * mapSize[index][0] * mapSize[index][1] + h * mapSize[index][1] + w]) * strides[index]
                            x2 = (meshgrid[gridIndex + 0] + reg[
                                2 * mapSize[index][0] * mapSize[index][1] + h * mapSize[index][1] + w]) * strides[index]
                            y2 = (meshgrid[gridIndex + 1] + reg[
                                3 * mapSize[index][0] * mapSize[index][1] + h * mapSize[index][1] + w]) * strides[index]

                            xmin = (x1 - eff_rect["x"]) * scale_w
                            ymin = (y1 - eff_rect["y"]) * scale_h
                            xmax = (x2 - eff_rect["x"]) * scale_w
                            ymax = (y2 - eff_rect["y"]) * scale_h

                            xmin = xmin if xmin > 0 else 0
                            ymin = ymin if ymin > 0 else 0
                            xmax = xmax if xmax < img_w else img_w
                            ymax = ymax if ymax < img_h else img_h

                            box = DetectBox(cl, cls_val, xmin, ymin, xmax, ymax)
                            detectResult.append(box)
        # NMS
        predbox = NMS(detectResult)

        for i in range(len(predbox)):
            xmin = int(predbox[i].xmin)
            ymin = int(predbox[i].ymin)
            xmax = int(predbox[i].xmax)
            ymax = int(predbox[i].ymax)
            classId = predbox[i].classId
            score = predbox[i].score

            cv2.rectangle(ori_img, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2)
            ptext = (xmin, ymin)
            title = CLASSES[classId] + "%.2f" % score
            cv2.putText(ori_img, title, ptext, cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2, cv2.LINE_AA)

        return predbox, ori_img

    def eval(self):
        imgs_dir = '/home/chenpengfei/dataset/DSM_Dataset_class4_20220211_fukang/val/image'
        json_dir = '/home/chenpengfei/dataset/DSM_Dataset_class4_20220211_fukang/val/val.json'
        pred_dir = "./runs/detect/train/cocoval/pred_result_onnx_danfeng.json"

        annotation_id = 0
        anno = []
        with open(json_dir, 'r') as f:
            new_dict = json.load(f)
            print(new_dict.keys())
            for tt in tqdm(new_dict['images']):
                filename = tt['file_name']
                image_id = tt['id']
                img_dir = os.path.join(imgs_dir, filename)
                img = cv2.imread(img_dir)
                ori_img, eff_rect, pred_results = self.infer(img)
                predBox, ori_img = self.post(ori_img, eff_rect, pred_results)
                for i in range(len(predBox)):
                    xmin = int(predBox[i].xmin)
                    ymin = int(predBox[i].ymin)
                    xmax = int(predBox[i].xmax)
                    ymax = int(predBox[i].ymax)
                    classId = predBox[i].classId
                    score = predBox[i].score
                    annotation_info = {'image_id': image_id,
                                       'category_id': classId + 1,
                                       'bbox': [xmin, ymin, xmax - xmin, ymax - ymin],
                                       'score': score,
                                       'iscrowd': 0
                                       }
                    anno.append(annotation_info)
                    annotation_id += 1
        with open(pred_dir, 'w', encoding='utf-8') as f:
            json.dump(anno, f, indent=4)

        get_mAP(pred_dir, json_dir)


if __name__ == '__main__':
    # print('This is main ....')
    # GenerateMeshgrid()
    # img_path = "/home/qindanfeng/datasets/227_data/wheel_phone_hand_data/test_data/jjc_30.jpg"
    # detect(img_path)

    img_path = "/home/qindanfeng/datasets/227_data/wheel_phone_hand_data/test_data/jjc_30.jpg"
    onnx_detector = ONNXDetector()
    onnx_detector.eval()