#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @Author: renjin@bit.edu.cn
# @Date  : 2024-08-14


"""
【节点名称】：
    YOLOv5v7DetNode
【依赖项安装】：
    git clone https://gitee.com/jario-jin/yolov5-v7.git
    pip install -r requirements.txt
    注意要把yolov5-v7的路径填写到本脚本的【48】行以及赋值给 yolov5v7_path 参数
【订阅类型】：
    sensor_msgs::CompressedImage （输入图像）
【发布类型】：
    spirecv_msgs::2DTargets （检测结果）
    sensor_msgs::CompressedImage （可视化结果，需借助可视化工具）
    std_msgs::Boolean （如果输入节点是数据集，则检测完成发布该话题让输入节点继续工作）
【构造参数说明】：
    parameter_file (str): 全局参数文件
    sms_shutdown (bool): 是否接收全局关闭信号，如果需要长期后台执行，建议设置为False
    specified_input_topic (str): 指定输入的话题地址
    specified_output_topic (str): 指定输出的话题地址
    realtime_det (bool): 是否是实时检测器，设置为True可以降低延迟，但可能会产生丢帧
【节点参数】：
    confidence (float): 目标得分阈值
    nms_thresh (float): NMS后处理参数
    dataset_name (str): 数据集名称
    pt_model (str): 加载模型名称
    yolov5v7_path (str): yolov5-v7的安装路径
    yolov5v7_data (str): 模型数据的yaml文件
【备注】：
    无
"""

import threading
import sys
import time
from queue import Queue
from typing import Union
import cv2
import numpy as np
import torch
from spirems import Subscriber, Publisher, cvimg2sms, sms2cvimg, def_msg, QoS
from spirecv.base.BaseNode import BaseNode

sys.path.append("/home/jario/deep/yolov5-v7")
from models.common import DetectMultiBackend
from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr,
                           cv2, increment_path, non_max_suppression, print_args, scale_boxes, scale_segments,
                           strip_optimizer, xyxy2xywh)
from utils.plots import Annotator, colors, save_one_box
from utils.segment.general import masks2segments, process_mask
from utils.torch_utils import select_device, smart_inference_mode
from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms,
                                 copy_paste,
                                 letterbox, mixup, random_perspective)
import argparse


def trans_det_results(det_results, h, w, names):
    sms_results = def_msg('spirecv_msgs::2DTargets')

    sms_results["file_name"] = ""
    sms_results["height"] = int(h)
    sms_results["width"] = int(w)
    sms_results["targets"] = []
    det_results = det_results.astype(np.float64)
    for i in range(len(det_results)):
        ann = dict()
        ann["category_name"] = names[int(det_results[i, 5])].strip().replace(' ', '_').lower()
        ann["category_id"] = int(det_results[i, 5])
        ann["score"] = round(det_results[i, 4], 3)
        ann["bbox"] = [round(det_results[i, 0], 3), round(det_results[i, 1], 3), round(det_results[i, 2] - det_results[i, 0], 3), round(det_results[i, 3] - det_results[i, 1], 3)]
        # ann["bbox"][0] = ann["bbox"][0] - ann["bbox"][2] / 2
        # ann["bbox"][1] = ann["bbox"][1] - ann["bbox"][3] / 2
        sms_results["targets"].append(ann)

    return sms_results


def init_yolov5_detector(
    yolov5v7_path,
    weights,
    img_size=640,
    confidence=0.05,
    nms_thresh=0.6,
    device='',
    dnn=False,
    data='coco128.yaml',
    half=False,  # use FP16 half-precision inference
):
    data = yolov5v7_path + '/data/' + data
    imgsz = (img_size, img_size)

    # Load model
    device = select_device(device)
    model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
    stride, names, pt = model.stride, model.names, model.pt
    imgsz = check_img_size(imgsz, s=stride)  # check image size

    # Run inference
    bs = 1  # batch_size
    model.warmup(imgsz=(1 if pt or model.triton else bs, 3, *imgsz))  # warmup

    return {'model': model, 'device': device, 'half': half, 'img_size': img_size, 'stride': stride,
            'confidence': confidence, 'nms_thresh': nms_thresh}


def inference_yolov5_detector(model, img0):
    # Padded resize
    img = letterbox(img0, model['img_size'], stride=model['stride'])[0]

    # Convert
    img = img[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, to 3x416x416
    img = np.ascontiguousarray(img)

    img = torch.from_numpy(img).to(model['device'])
    img = img.half() if model['half'] else img.float()  # uint8 to fp16/32
    img /= 255  # 0 - 255 to 0.0 - 1.0
    if len(img.shape) == 3:
        img = img[None]  # expand for batch dim

    # Inference
    pred = model['model'](img, augment=False, visualize=False)[:2]

    # Apply NMS
    max_det = 1000
    classes = None  # filter by class: --class 0, or --class 0 2 3
    agnostic_nms = False  # class-agnostic NMS
    pred = non_max_suppression(pred, model['confidence'], model['nms_thresh'], classes, agnostic_nms, max_det=max_det)

    det = pred[0]
    if len(det):
        # Rescale boxes from img_size to im0 size
        det[:, :4] = scale_boxes(img.shape[2:], det[:, :4], img0.shape).round()

    return det.cpu().detach().numpy()


class YOLOv5v7DetNode(threading.Thread, BaseNode):
    def __init__(
        self,
        job_name: str,
        ip: str = '127.0.0.1',
        port: int = 9094,
        param_dict_or_file: Union[dict, str] = None,
        sms_shutdown: bool = True,
        launch_next_emit: bool = True,
        specified_input_topic: str = None,
        specified_output_topic: str = None,
        realtime_det: bool = False,
        remote_ip: str = '127.0.0.1',
        remote_port: int = 9094
    ):
        threading.Thread.__init__(self)
        BaseNode.__init__(
            self,
            self.__class__.__name__,
            job_name,
            ip=ip,
            port=port,
            param_dict_or_file=param_dict_or_file,
            sms_shutdown=sms_shutdown
        )
        input_url = '/' + job_name + '/sensor/image_raw'
        if specified_input_topic is not None:
            input_url = specified_input_topic

        output_url = '/' + job_name + '/detector/results'
        if specified_output_topic is not None:
            output_url = specified_output_topic

        self.realtime_det = realtime_det
        self.job_queue = Queue()
        self.queue_pool.append(self.job_queue)

        self._image_reader = Subscriber(
            input_url, 'sensor_msgs::CompressedImage', self.image_callback,
            ip=ip, port=port, qos=QoS.Reliability
        )
        self._result_writer = Publisher(
            output_url, 'spirecv_msgs::2DTargets',
            ip=remote_ip, port=remote_port, qos=QoS.Reliability
        )
        self._show_writer = Publisher(
            '/' + job_name + '/detector/image_results', 'sensor_msgs::CompressedImage',
            ip=ip, port=port
        )
        self.launch_next_emit = launch_next_emit
        if self.launch_next_emit:
            self._next_writer = Publisher(
                '/' + job_name + '/launch_next', 'std_msgs::Boolean',
                ip=ip, port=port, qos=QoS.Reliability
            )

        self.confidence = self.get_param("confidence", 0.001)
        self.nms_thresh = self.get_param("nms_thresh", 0.60)
        self.imgsz = self.get_param("imgsz", 1280)
        self.dataset_name = self.get_param("dataset_name", "visdrone2019_det")
        self.pt_model = self.get_param("pt_model", "G:/dataset/spirecv2_models/yolov5v7/yolov5l6-v7_1280_100e_b4_visdrone_240813_2.pt")
        self.yolov5v7_path = self.get_param("yolov5v7_path", "G:/deep/yolov5-v7")
        self.yolov5v7_data = self.get_param("yolov5v7_data", "VisDrone.yaml")

        sys.path.append(self.yolov5v7_path)

        self.model = init_yolov5_detector(
            self.yolov5v7_path, self.pt_model,
            img_size=self.imgsz, confidence=self.confidence, nms_thresh=self.nms_thresh,
            data=self.yolov5v7_data
        )
        self.infer_n_ims = 0
        self.infer_time3 = 0.0
        self.infer_time2 = 0.0
        self.infer_time1 = 0.0
        self.start()

    def release(self):
        BaseNode.release(self)
        self._image_reader.kill()
        self._result_writer.kill()
        self._show_writer.kill()
        self._next_writer.kill()
        self._image_reader.join()
        self._result_writer.join()
        self._show_writer.join()
        self._next_writer.join()

    def image_callback(self, msg):
        if self.realtime_det:
            if not self.job_queue.empty():
                self.job_queue.queue.clear()
        self.job_queue.put(msg)

    def run(self):
        while self.is_running():
            msg = self.job_queue.get(block=True)
            if msg is None:
                break
            t1 = time.time()

            t1 = time.time()
            file_name = msg['file_name'] if 'file_name' in msg else ''
            img = sms2cvimg(msg)
            self.infer_n_ims += 1
            self.infer_time1 += time.time() - t1
            print('avg-infer time1: {}'.format(self.infer_time1 / self.infer_n_ims))

            t1 = time.time()
            # DO Object Detection
            results = inference_yolov5_detector(self.model, img)
            res_msg = trans_det_results(results, img.shape[0], img.shape[1], self.model['model'].names)
            res_msg['file_name'] = file_name
            res_msg['dataset'] = self.dataset_name
            if 'client_id' in msg:
                res_msg['client_id'] = msg['client_id']
            if 'file_name' in msg:
                res_msg['file_name'] = msg['file_name']
            if 'img_id' in msg:
                res_msg['img_id'] = msg['img_id']
            if 'img_total' in msg:
                res_msg['img_total'] = msg['img_total']

            res_msg['time_used'] = time.time() - t1
            self.infer_time2 += time.time() - t1
            print('avg-infer time2: {}'.format(self.infer_time2 / self.infer_n_ims))

            t1 = time.time()
            self._result_writer.publish(res_msg)

            if 'img_total' in msg and self.launch_next_emit:
                next_msg = def_msg('std_msgs::Boolean')
                next_msg['data'] = True
                self._next_writer.publish(next_msg)

            msg['spirecv_msgs::2DTargets'] = res_msg
            self._show_writer.publish(msg)

            self.infer_time3 += time.time() - t1
            print('avg-infer time3: {}'.format(self.infer_time3 / self.infer_n_ims))
            # END

        self.release()
        print('{} quit!'.format(self.__class__.__name__))


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument(
        'config',
        help='SpireCV2 Config (.json)')
    parser.add_argument(
        '--job-name',
        type=str,
        default='live',
        help='SpireCV Job Name')
    parser.add_argument(
        '--input-topic',
        type=str,
        default='',
        help='Specified Input Topic')
    parser.add_argument(
        '--output-topic',
        type=str,
        default='',
        help='Specified Output Topic')
    parser.add_argument(
        '--ip',
        type=str,
        default='127.0.0.1',
        help='SpireMS Core IP')
    parser.add_argument(
        '--port',
        type=int,
        default=9094,
        help='SpireMS Core Port')
    args = parser.parse_args()
    print("config:", args.config)
    print("job-name:", args.job_name)

    specified_input_topic = args.input_topic if len(args.input_topic) > 0 else None
    specified_output_topic = args.output_topic if len(args.output_topic) > 0 else None

    print("input-topic:", specified_input_topic)
    print("output-topic:", specified_output_topic)

    node = YOLOv5v7DetNode(
        args.job_name, 
        launch_next_emit=False,
        realtime_det=True, 
        param_dict_or_file=args.config,
        specified_input_topic=specified_input_topic,
        specified_output_topic=specified_output_topic
    )
