# -*- coding: utf-8 -*-
"""
# --------------------------------------------------------
# @Author :
# @E-mail :
# @Date   : 2018-04-03 18:38:34
# --------------------------------------------------------
"""
import os, sys

sys.path.insert(0, os.path.dirname(__file__))

import cv2
import torch
import argparse
import numpy as np
from detection import ssd_inference, yolo_Inference
from detection.anchor_utils import anchor_utils
from detection.anchor_utils.nms import py_cpu_nms
from pybaseutils import image_utils

root = os.path.dirname(__file__)


class Detector(ssd_inference.SSDInference):
    def __init__(self, cfg, padding=True, freeze_header=False, device="cpu"):
        """
        :param model_file: model file
        :param net_name:"RFB",
        :param input_size:input_size,
        :param network: Backbone network mobile0.25 or slim or RFB
        :param conf_thresh: confidence_threshold
        :param iou_thresh: nms_threshold
        :param top_k: keep top_k results. If k <= 0, keep all the results.
        :param keep_top_k:
        :param device:
        """
        super(Detector, self).__init__(cfg, padding=padding, freeze_header=freeze_header, device=device)

    def build_model(self, net_type, priors_type, version="v2"):
        return super().build_model(net_type, priors_type, version)

    def pose_process(self, output, image_size):
        """
        bboxes, conf, landms = output
        bboxes = torch.Size([1, num_anchors, 4])
        bboxes = torch.Size([1, num_anchors, 2])
        bboxes = torch.Size([1, num_anchors, 10])
        """
        bboxes, scores, landms = output
        if self.padding:
            bboxes_scale = np.asarray(self.input_size * 2)
            landms_scale = np.asarray(self.input_size * 5)
        else:
            bboxes_scale = np.asarray(image_size * 2)
            landms_scale = np.asarray(image_size * 5)
        if not self.prior_boxes.freeze_header:
            # get boxes
            variances = [self.prior_boxes.center_variance, self.prior_boxes.size_variance]
            bboxes = anchor_utils.decode(bboxes, self.priors, variances)
            # get landmarks
            landms = anchor_utils.decode_landm(landms, self.priors, variances)

        batch_dets = []
        batch_landms = []
        for i in range(len(bboxes)):
            bboxes = bboxes[i].cpu().numpy()
            scores = scores[i].cpu().numpy()
            landms = landms[i].cpu().numpy()
            bboxes = bboxes * bboxes_scale
            landms = landms * landms_scale
            scores = scores[:, 1:]  # scores[:, 0:]是背景，无需nms
            dets, labels, landms = py_cpu_nms.bboxes_landm_nms(bboxes, scores, landms,
                                                               prob_threshold=self.prob_threshold,
                                                               iou_threshold=self.iou_threshold,
                                                               top_k=self.top_k,
                                                               keep_top_k=self.keep_top_k)
            labels = labels + 1  # index+1
            landms = landms.reshape(len(landms), 5, 2)
            dets = dets.reshape(len(dets), 5)
            if self.padding and len(dets) > 0:
                image_utils.image_boxes_resize_padding_inverse(image_size,
                                                               self.input_size,
                                                               dets[:, 0:4],
                                                               landms)
            # （xmin,ymin,xmax,ymax,score,label）
            if len(dets) > 0: dets = np.concatenate([dets, labels.reshape(len(labels), 1)], axis=1)
            batch_dets.append(dets)
            batch_landms.append(landms)
        return batch_dets, batch_landms

    def detect(self, rgb_image, vis=False):
        """
        :param rgb_image:
        :return:
        bboxes: <np.ndarray>: (num_boxes, 4)
        scores: <np.ndarray>: (num_boxes, 1)
        scores: <np.ndarray>: (num_boxes, 5, 2)
        """
        shape = rgb_image.shape
        input_tensor = self.pre_process(rgb_image, input_size=self.input_size)
        output = self.inference(input_tensor)
        dets, landms = self.pose_process(output, image_size=[shape[1], shape[0]])
        dets, landms = dets[0], landms[0]
        if vis:
            self.show_image(rgb_image, dets, landms)
        return dets, landms


def get_parser():
    input_size = [416, 416]
    image_dir = "../data/test_image"
    image_dir = "/media/PKing/新加卷1/SDK/face-recognition/Face-Recognition/Face-Recognition-Python/data/test_image"
    # model_file = "/home/dm/data3/FaceDetector/torch-Slim-Detection-Landmark/work_space/RFB_landms_v2/RFB_landm1.0_face_320_320_wider_face_add_lm_10_10_dmai_data_FDDB_v2_ssd_20210624145405/model/best_model_RFB_landm_183_loss7.6508.pth"
    # model_file = "../work_space/VOCLandm0/SSD_rfb_landm_1.0_face_320_320_padding_20231218_115512_9738/model/best_model_094_7.0735.pth"
    model_file = "/home/PKing/nasdata/release/infrastructure/DMDetection/work_space/VOCLandm/SSD_rfb_landm_1.0_face_416_416_padding_20231219_091128_6086/model/best_model_097_6.7321.pth"
    net_type = "rfb_landm"
    priors_type = "face"

    parser = argparse.ArgumentParser(description='Detection Test')
    # parser.add_argument("-c", "--config_file", help="configs file", default="./configs/config.yaml", type=str)
    parser.add_argument('-m', '--model_file', type=str, default=model_file, help='model file path')
    parser.add_argument('--net_type', type=str, default=net_type, help='Backbone network mobile0.25 or slim or RFB')
    parser.add_argument('--width_mult', type=float, default=1.0, help='Backbone network mobile0.25 or slim or RFB')
    parser.add_argument('--priors_type', type=str, default=priors_type,
                        help='Backbone network mobile0.25 or slim or RFB')
    parser.add_argument('--prob_threshold', type=float, default=0.5, help='confidence_threshold')
    parser.add_argument('--iou_threshold', type=float, default=0.3, help='iou_threshold')
    parser.add_argument('--image_dir', type=str, default=image_dir, help='directory or image path')
    parser.add_argument('--input_size', nargs='+', help="--input size [600(W),600(H)]", type=int,
                        default=input_size)
    parser.add_argument('--num_classes', help="num_classes", type=int, default=2)
    parser.add_argument('--device', type=str, default="cuda:0", help='device')
    args = parser.parse_args()
    return args


if __name__ == '__main__':
    args = get_parser()
    det = Detector(args, device=args.device)
    det.detect_image_dir(args.image_dir, vis=True)
