# -*- coding: utf-8 -*-
# @Time : 2023/4/7 15:43
# @Author : 陈鹏飞
# @Email ： 2578925789@qq.com
# @File : detect_replaceDFL
# @Description : 

#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import argparse
import os
import sys
import os.path as osp
import cv2
import torch
import numpy as np
import onnxruntime as ort
from math import exp
import json
from tqdm import tqdm
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
import torch
import torchvision
import torch.nn as nn

ROOT = os.getcwd()
if str(ROOT) not in sys.path:
    sys.path.append(str(ROOT))

CLASSES = ['face', 'hand', 'cigarette', 'cellphone']

meshgrid = []

class_num = len(CLASSES)
headNum = 4
strides = [4, 8, 16, 32]
nmsThresh = 0.8
objectThresh = 0.15

input_imgH = 192
input_imgW = 320
mapSize = [[int(input_imgH/strides[0]), int(input_imgW/strides[0])], [int(input_imgH/strides[1]), int(input_imgW/strides[1])], [int(input_imgH/strides[2]), int(input_imgW/strides[2])], [int(input_imgH/strides[3]), int(input_imgW/strides[3])]]

REG_MAX_V1 = 16

class DFL(nn.Module):
    # Integral module of Distribution Focal Loss (DFL)
    # Proposed in Generalized Focal Loss https://ieeexplore.ieee.org/document/9792391
    def __init__(self, c1=16):
        super().__init__()
        self.conv = nn.Conv2d(c1, 1, 1, bias=False).requires_grad_(False)
        x = torch.arange(c1, dtype=torch.float)
        self.conv.weight.data[:] = nn.Parameter(x.view(1, c1, 1, 1))
        self.c1 = c1

    def forward(self, x):
        b, c, a = x.shape  # batch, channels, anchors
        return self.conv(x.view(b, 4, self.c1, a).transpose(2, 1).softmax(1)).view(b, 4, a)

def comput_mAP(json_pr, json_gt):
    coco_gt = COCO(json_gt)

    coco_dt = coco_gt.loadRes(json_pr)

    cocoEval = COCOeval(coco_gt, coco_dt, 'bbox')
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()

    #  评估特定类别
    for classID in [1, 2, 3, 4]:
        print('-------------------{}-------------------\n'.format(str(classID)))
        cocoEval.params.catIds = [classID]
        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize()

def evaluate(pred_dir):
    imgs_dir = '/home/chenpengfei/dataset/DSM_Dataset_class4_20220211_fukang/val/image'
    json_dir = '/home/chenpengfei/dataset/DSM_Dataset_class4_20220211_fukang/val/val.json'
    # 统计平均置信度，衡量置信度变化情况
    face_confs = 0.0
    face_num = 0
    hand_confs = 0.0
    hand_num = 0
    cigarette_confs = 0.0
    cigarette_num = 0
    cellphone_confs = 0.0
    cellphone_num = 0
    ''''''
    ort_session = ort.InferenceSession('./runs/detect/train35/weights/best_dict.onnx')

    annotation_id = 0
    anns = []
    with open(json_dir, 'r') as f:
        new_dict = json.load(f)
        print(new_dict.keys())
        for tt in tqdm(new_dict['images']):
            filename = tt['file_name']
            image_id = tt['id']
            img_dir = os.path.join(imgs_dir, filename)
            orig = cv2.imread(img_dir)
            img_h, img_w = orig.shape[:2]

            image, top_pad, left_pad = precess_image_letter(orig, input_imgW, input_imgH)

            image = np.expand_dims(image, axis=0)
            image = image[:, :1, :, :]  # 单通道

            pred_results = (ort_session.run(None, {'data': image}))

            out = []
            for i in range(len(pred_results)):
                out.append(pred_results[i])
            predbox = postprocess(out, img_h, img_w, top_pad, left_pad)

            for i in range(len(predbox)):
                xmin = int(predbox[i].xmin)
                ymin = int(predbox[i].ymin)
                xmax = int(predbox[i].xmax)
                ymax = int(predbox[i].ymax)
                classId = predbox[i].classId
                score = predbox[i].score
                if classId == 0:
                    face_num += 1
                    face_confs += score
                elif classId == 1:
                    hand_num += 1
                    hand_confs += score
                elif classId == 2:
                    cigarette_num += 1
                    cigarette_confs += score
                elif classId == 3:
                    cellphone_num += 1
                    cellphone_confs += score

                annotation_info = {'image_id': image_id,
                                   'category_id': classId + 1,
                                   'bbox': [xmin, ymin, xmax - xmin, ymax - ymin],
                                   'score': score,
                                   'iscrowd': 0
                                   }
                anns.append(annotation_info)
                annotation_id += 1
    with open(pred_dir, 'w', encoding='utf-8') as f:
        json.dump(anns, f, indent=4)

    comput_mAP(pred_dir, json_dir)
    face_confs_ave = face_confs / face_num
    hand_confs_ave = hand_confs / hand_num
    cigarette_confs_ave = cigarette_confs / cigarette_num
    cellphone_confs_ave = cellphone_confs / cellphone_num
    print("face_confs_ave: " + str(face_confs_ave))
    print("hand_confs_ave: " + str(hand_confs_ave))
    print("cigarette_confs_ave: " + str(cigarette_confs_ave))
    print("cellphone_confs_ave: " + str(cellphone_confs_ave))

class LetterBox:
    """Resize image and padding for detection, instance segmentation, pose"""

    def __init__(self, new_shape=(640, 640), auto=False, scaleFill=False, scaleup=True, stride=32):
        self.new_shape = new_shape
        self.auto = auto
        self.scaleFill = scaleFill
        self.scaleup = scaleup
        self.stride = stride

    def __call__(self, labels=None, image=None):
        if labels is None:
            labels = {}
        img = labels.get('img') if image is None else image
        shape = img.shape[:2]  # current shape [height, width]
        new_shape = labels.pop('rect_shape', self.new_shape)
        if isinstance(new_shape, int):
            new_shape = (new_shape, new_shape)

        # Scale ratio (new / old)
        r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
        if not self.scaleup:  # only scale down, do not scale up (for better val mAP)
            r = min(r, 1.0)

        # Compute padding
        ratio = r, r  # width, height ratios
        new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
        dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding
        if self.auto:  # minimum rectangle
            dw, dh = np.mod(dw, self.stride), np.mod(dh, self.stride)  # wh padding
        elif self.scaleFill:  # stretch
            dw, dh = 0.0, 0.0
            new_unpad = (new_shape[1], new_shape[0])
            ratio = new_shape[1] / shape[1], new_shape[0] / shape[0]  # width, height ratios

        dw /= 2  # divide padding into 2 sides
        dh /= 2
        if labels.get('ratio_pad'):
            labels['ratio_pad'] = (labels['ratio_pad'], (dw, dh))  # for evaluation

        if shape[::-1] != new_unpad:  # resize
            img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
        top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
        left, right = int(round(dw - 0.1)), int(round(dw + 0.1))

        img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT,
                                 value=(114, 114, 114))  # add border

        if len(labels):
            labels = self._update_labels(labels, ratio, dw, dh)
            labels['img'] = img
            labels['resized_shape'] = new_shape
            return labels
        else:
            return img, top, left

    def _update_labels(self, labels, ratio, padw, padh):
        """Update labels"""
        labels['instances'].convert_bbox(format='xyxy')
        labels['instances'].denormalize(*labels['img'].shape[:2][::-1])
        labels['instances'].scale(*ratio)
        labels['instances'].add_padding(padw, padh)
        return labels

class DetectBox:
    def __init__(self, classId, score, xmin, ymin, xmax, ymax):
        self.classId = classId
        self.score = score
        self.xmin = xmin
        self.ymin = ymin
        self.xmax = xmax
        self.ymax = ymax


def GenerateMeshgrid():
    for index in range(headNum):
        for i in range(mapSize[index][0]):
            for j in range(mapSize[index][1]):
                meshgrid.append(j + 0.5)
                meshgrid.append(i + 0.5)


def IOU(xmin1, ymin1, xmax1, ymax1, xmin2, ymin2, xmax2, ymax2):
    xmin = max(xmin1, xmin2)
    ymin = max(ymin1, ymin2)
    xmax = min(xmax1, xmax2)
    ymax = min(ymax1, ymax2)

    innerWidth = xmax - xmin
    innerHeight = ymax - ymin

    innerWidth = innerWidth if innerWidth > 0 else 0
    innerHeight = innerHeight if innerHeight > 0 else 0

    innerArea = innerWidth * innerHeight

    area1 = (xmax1 - xmin1) * (ymax1 - ymin1)
    area2 = (xmax2 - xmin2) * (ymax2 - ymin2)

    total = area1 + area2 - innerArea

    return innerArea / total


def NMS(detectResult):
    predBoxs = []

    sort_detectboxs = sorted(detectResult, key=lambda x: x.score, reverse=True)

    for i in range(len(sort_detectboxs)):
        xmin1 = sort_detectboxs[i].xmin
        ymin1 = sort_detectboxs[i].ymin
        xmax1 = sort_detectboxs[i].xmax
        ymax1 = sort_detectboxs[i].ymax
        classId = sort_detectboxs[i].classId

        if sort_detectboxs[i].classId != -1:
            predBoxs.append(sort_detectboxs[i])
            for j in range(i + 1, len(sort_detectboxs), 1):
                if classId == sort_detectboxs[j].classId:
                    xmin2 = sort_detectboxs[j].xmin
                    ymin2 = sort_detectboxs[j].ymin
                    xmax2 = sort_detectboxs[j].xmax
                    ymax2 = sort_detectboxs[j].ymax
                    iou = IOU(xmin1, ymin1, xmax1, ymax1, xmin2, ymin2, xmax2, ymax2)
                    if iou > nmsThresh:
                        sort_detectboxs[j].classId = -1
    return predBoxs


def sigmoid(x):
    return 1 / (1 + exp(-x))


def postprocess(out, img_h, img_w, top, left):
    # print('postprocess ... ')

    detectResult = []
    output = []
    for i in range(len(out)):
        # print(out[i].shape)
        output.append(out[i].reshape((-1)))

    scale_h = img_h / (input_imgH - top * 2)
    scale_w = img_w / (input_imgW - left * 2)

    gridIndex = -2

    for index in range(headNum):
        reg = output[index * 2 + 0]
        cls = output[index * 2 + 1]

        for h in range(mapSize[index][0]):
            for w in range(mapSize[index][1]):
                gridIndex += 2

                for cl in range(class_num):
                    cls_val = sigmoid(cls[cl * mapSize[index][0] * mapSize[index][1] + h * mapSize[index][1] + w])

                    if cls_val > objectThresh:
                        x1 = (meshgrid[gridIndex + 0] - reg[0 * mapSize[index][0] * mapSize[index][1] + h * mapSize[index][1] + w]) * strides[index] - left
                        y1 = (meshgrid[gridIndex + 1] - reg[1 * mapSize[index][0] * mapSize[index][1] + h * mapSize[index][1] + w]) * strides[index] - top
                        x2 = (meshgrid[gridIndex + 0] + reg[2 * mapSize[index][0] * mapSize[index][1] + h * mapSize[index][1] + w]) * strides[index] - left
                        y2 = (meshgrid[gridIndex + 1] + reg[3 * mapSize[index][0] * mapSize[index][1] + h * mapSize[index][1] + w]) * strides[index] - top

                        xmin = x1 * scale_w
                        ymin = y1 * scale_h
                        xmax = x2 * scale_w
                        ymax = y2 * scale_h

                        xmin = xmin if xmin > 0 else 0
                        ymin = ymin if ymin > 0 else 0
                        xmax = xmax if xmax < img_w else img_w
                        ymax = ymax if ymax < img_h else img_h

                        box = DetectBox(cl, cls_val, xmin, ymin, xmax, ymax)
                        detectResult.append(box)
    # NMS
    # print('detectResult:', len(detectResult))
    predBox = NMS(detectResult)

    return predBox

def postprocess_without_DFL(out, img_h, img_w, top, left):
    # print('postprocess ... ')

    detectResult = []
    output = []
    for i in range(len(out)):
        # print(out[i].shape)
        output.append(out[i].reshape((-1)))

    scale_h = img_h / (input_imgH - top * 2)
    scale_w = img_w / (input_imgW - left * 2)

    gridIndex = -2
    dfl = DFL(16)
    for index in range(headNum):
        reg = torch.from_numpy(output[index * 2 + 0])
        reg = dfl(reg.view(1, 16 * 4, -1)).view(-1)
        cls = output[index * 2 + 1]

        for h in range(mapSize[index][0]):
            for w in range(mapSize[index][1]):
                gridIndex += 2

                for cl in range(class_num):
                    cls_val = sigmoid(cls[cl * mapSize[index][0] * mapSize[index][1] + h * mapSize[index][1] + w])

                    if cls_val > objectThresh:
                        x1 = (meshgrid[gridIndex + 0] - reg[0 * mapSize[index][0] * mapSize[index][1] + h * mapSize[index][1] + w]) * strides[index] - left
                        y1 = (meshgrid[gridIndex + 1] - reg[1 * mapSize[index][0] * mapSize[index][1] + h * mapSize[index][1] + w]) * strides[index] - top
                        x2 = (meshgrid[gridIndex + 0] + reg[2 * mapSize[index][0] * mapSize[index][1] + h * mapSize[index][1] + w]) * strides[index] - left
                        y2 = (meshgrid[gridIndex + 1] + reg[3 * mapSize[index][0] * mapSize[index][1] + h * mapSize[index][1] + w]) * strides[index] - top

                        xmin = x1 * scale_w
                        ymin = y1 * scale_h
                        xmax = x2 * scale_w
                        ymax = y2 * scale_h

                        xmin = xmin if xmin > 0 else 0
                        ymin = ymin if ymin > 0 else 0
                        xmax = xmax if xmax < img_w else img_w
                        ymax = ymax if ymax < img_h else img_h

                        box = DetectBox(cl, cls_val, xmin, ymin, xmax, ymax)
                        detectResult.append(box)
    # NMS
    # print('detectResult:', len(detectResult))
    predBox = NMS(detectResult)

    return predBox

def precess_image(img_src, resize_w, resize_h):
    image = cv2.resize(img_src, (resize_w, resize_h), interpolation=cv2.INTER_LINEAR)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    image = image.astype(np.float32)
    image = image.transpose((2, 0, 1))
    image /= 255
    return image

def precess_image_letter(img_src, resize_w, resize_h):
    im, top_pad, left_pad = LetterBox([320, 320], auto=True, stride=32)(image=img_src)  # 固定尺寸resize并padding为32倍数
    im = im.transpose((2, 0, 1))[::-1]  # HWC to CHW, BGR to RGB
    im = np.ascontiguousarray(im)  # contiguous
    image = im.astype(np.float32)
    image /= 255
    return image, top_pad, left_pad


def detect(img_path):

    orig = cv2.imread(img_path)
    img_h, img_w = orig.shape[:2]
    # image = precess_image(orig, input_imgW, input_imgH)
    image, top_pad, left_pad = precess_image_letter(orig, input_imgW, input_imgH)

    image = np.expand_dims(image, axis=0)
    image = image[:, :1, :, :]

    ort_session = ort.InferenceSession('./runs/detect/train35/weights/best_no_dfl.onnx')
    pred_results = (ort_session.run(None, {'images': image}))

    out = []
    for i in range(len(pred_results)):
        out.append(pred_results[i])
    predbox = postprocess_without_DFL(out, img_h, img_w, top_pad, left_pad)

    print('obj num is :', len(predbox))

    for i in range(len(predbox)):
        xmin = int(predbox[i].xmin)
        ymin = int(predbox[i].ymin)
        xmax = int(predbox[i].xmax)
        ymax = int(predbox[i].ymax)
        classId = predbox[i].classId
        score = predbox[i].score

        cv2.rectangle(orig, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2)
        ptext = (xmin, ymin)
        title = CLASSES[classId] + "%.2f" % score
        cv2.putText(orig, title, ptext, cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2, cv2.LINE_AA)

    img_path_new = "./runs/detect/train35/test/" + img_path.split("/")[-1][:-4] + "_onnx.jpg"
    cv2.imwrite(img_path_new, orig)


if __name__ == '__main__':
    print('This is main ....')
    GenerateMeshgrid()

    img_path = '/home/chenpengfei/ultralytics/runs/detect/train35/test/20211231_xinB15500_1357_96.jpg'
    detect(img_path)

    # # 推理验证集
    # imgs_dir = '/home/chenpengfei/dataset/DSM_Dataset_class4_20220211_fukang/val/image'
    # json_dir = '/home/chenpengfei/dataset/DSM_Dataset_class4_20220211_fukang/val/val.json'
    # with open(json_dir, 'r') as f:
    #     new_dict = json.load(f)
    #     print(new_dict.keys())
    #     for tt in tqdm(new_dict['images']):
    #         name = tt['file_name']
    #         img_dir = os.path.join(imgs_dir, name)
    #         detect(img_dir)

    # pred_dir = "./runs/detect/train/cocoval/pred_result.json"
    # evaluate(pred_dir)
