# -*- coding: utf-8 -*-
"""
@Time ： 2023/10/24 10:01
@Auth ： 张发伟

"""
import base64
import datetime
import json
import os
import time
import redis
from HKCam import HKCam
from log.log import log, cfg
import cv2
from PIL import Image, ImageDraw, ImageFont
import numpy as np
# from utils import *


# 下左对闸门
# 10.24.131.234
# admin hacz123456
def put_chinese_string(img, x, y, string):
    """
    img: imread读取的图片;
    x,y:字符起始绘制的位置;
    string: 显示的文字;
    return: img
    """
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    img = Image.fromarray(img)
    draw = ImageDraw.Draw(img)
    # simhei.ttf 是字体，你如果没有字体，需要下载
    font = ImageFont.truetype("simhei.ttf", 40, encoding="utf-8")
    draw.text((x, y), string, (0, 0, 255), font=font)
    img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
    return img


class DetectPerson:
    def __init__(self):
        self.person_conf_thres = cfg['ModelPersonDetection']['person_conf_thres']
        self.person_iou_thres = cfg['ModelPersonDetection']['person_iou_thres']
        self.onnx_path = cfg['ModelPersonDetection']['person_weights_path']
        self.camIP = cfg['HKCam']['camIP']
        self.camUsername = cfg['HKCam']['username']
        self.camPassword = cfg['HKCam']['password']
        self.camDevport = cfg['HKCam']['devport']
        # # 上闸门上方检测区域
        self.up_x1 = cfg['UpRoi']['up_x1']
        self.up_x2 = cfg['UpRoi']['up_x2']
        self.up_y1 = cfg['UpRoi']['up_y1']
        self.up_y2 = cfg['UpRoi']['up_y2']

        # # 上闸门下方检测区域
        self.down_x1 = cfg['UpRoi']['down_x1']
        self.down_x2 = cfg['UpRoi']['down_x2']
        self.down_y1 = cfg['UpRoi']['down_y1']
        self.down_y2 = cfg['UpRoi']['down_y2']

        # video_path = r"D:\A-work\10_行人识别\test2.mp4"  # 替换为您的视频文件路径
        # 建立redis链接对象
        self.host = cfg['Redis']['host']
        self.port = cfg['Redis']['port']
        self.db = cfg['Redis']['db']
        self.password = cfg['Redis']['password']
        self.redis = redis.Redis(host=self.host, port=self.port, db=self.db, password=self.password,
                                 decode_responses=True)
        self.hkcam = HKCam(self.camIP, self.camUsername, self.camPassword, self.camDevport)
        self.current_path = os.getcwd()
        self.SaveDir = os.path.join(self.current_path, cfg['Basic']['SaveDirDown'])
        self.SaveRaw = os.path.join(self.current_path, cfg['Basic']['SaveRaw'])
        self.res_task_key_name_frame = cfg["Redis"]["res_task_key"]

    def letterbox(self, im, new_shape, color=(114, 114, 114), ):
        # Resize and pad image while meeting stride-multiple constraints
        shape = im.shape[:2]  # current shape [height, width]
        if isinstance(new_shape, int):
            new_shape = (new_shape, new_shape)
        # Scale ratio (new / old)
        r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
        # Compute padding
        ratio = r, r  # width, height ratios
        new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
        dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding
        dw /= 2  # divide padding into 2 sides
        dh /= 2

        if shape[::-1] != new_unpad:  # resize
            im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
        top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
        left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
        img = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border
        img = img.transpose((2, 0, 1))[::-1]  # HWC to CHW, BGR to RGB
        img = np.ascontiguousarray(img)
        img = img.astype(np.float32, order='C')
        img = img / 255
        if len(img.shape) == 3:
            img = img[None]  # expand for batch dim

        return img, ratio, (dw, dh)

    def infer(self, frame, onnx_path, conf_thres, iou_thres):
        sub_image = frame.copy()
        net = cv2.dnn.readNetFromONNX(onnx_path)
        net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
        net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
        img, ratio, shapes = self.letterbox(sub_image, [640, 640])  # padded resize
        # img = img.transpose((2, 0, 1))[::-1]  # HWC to CHW, BGR to RGB
        # img = np.ascontiguousarray(img)  # contiguous
        # img = img.astype(np.float32, order='C')
        # img = img / 255
        # if len(img.shape) == 3:
        #     img = img[None]  # expand for batch dim
        net.setInput(img)
        pred = net.forward()

        confidences = []
        boxes = []
        class_ids = []
        boxes_num = pred.shape[1]
        data = pred[0]
        for i in range(boxes_num):
            da = data[i]  # [box,conf,cls]
            confidence = da[4]
            if confidence > 0.1:
                score = da[5:] * confidence
                _, _, _, max_score_index = cv2.minMaxLoc(score)  #
                max_cls_id = max_score_index[1]
                if score[max_cls_id] > 0.1:
                    confidences.append(confidence)
                    class_ids.append(max_cls_id)
                    x, y, w, h = da[0].item(), da[1].item(), da[2].item(), da[3].item()
                    gain = min(ratio)
                    nx = int(x - w / 2.0)  # 0
                    ny = int(y - h / 2.0)  # 188
                    nw = int(x + w / 2.0)  # 638
                    nh = int(y + h / 2.0)  # 310
                    nx = int((nx - shapes[0]) / gain)
                    nw = int((nw - shapes[0]) / gain)
                    ny = int((ny - shapes[1]) / gain)
                    nh = int((nh - shapes[1]) / gain)
                    boxes.append(np.array([nx, ny, nw, nh]))

        indexes = cv2.dnn.NMSBoxes(boxes, confidences, conf_thres, iou_thres)

        res_ids = []
        res_confs = []
        res_boxes = []
        for i in indexes:
            res_ids.append(class_ids[i])
            res_confs.append(confidences[i])
            res_boxes.append(boxes[i])
        return res_ids, res_confs, res_boxes

    def inf_from_frame(self, frame, onnx_path, person_conf_thres, person_iou_thres):
        flag = 0
        img = frame.copy()
        ids, confs, boxes = self.infer(img, onnx_path, person_conf_thres, person_iou_thres)
        # log.logger.info("ids: {}".format(ids))
        if 0 in ids:
            # if 1:

            # 找到 ids 为 0 对应的 boxes
            write_current_time = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
            current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")

            # formatted_date = dt.strftime("%Y%m%d%H%M%S")
            if not os.path.exists(self.SaveDir):
                os.makedirs(self.SaveDir)
                log.logger.info("新建文件目录：{}".format(self.SaveDir))
            if not os.path.exists(self.SaveRaw):
                os.makedirs(self.SaveRaw)
                log.logger.info("新建文件目录：{}".format(self.SaveRaw))

            save_img_name = os.path.join(self.SaveDir, '行人检测-{}.jpg'.format(write_current_time))

            save_raw_name = os.path.join(self.SaveRaw, '行人检测-{}.jpg'.format(write_current_time))
            det_boxes = []
            for i in range(len(ids)):
                if ids[i] == 0:
                    if (self.up_x1 <= boxes[i][0] <= self.up_x2 and self.up_y2 >= boxes[i][3] >= self.up_y1) or (
                            self.down_x1 <= boxes[i][0] <= self.down_x2 and self.down_y1 <= boxes[i][
                        3] <= self.down_y2):
                        x1, x2, y1, y2 = boxes[i][0], boxes[i][2], boxes[i][1], boxes[i][3]
                        cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255), 2)
                        img = put_chinese_string(img, x2 + 3, y1 + 3, 'person')
                        det_boxes = det_boxes.append(boxes[i])
                        flag = 1
                        log.logger.info("检测到人")

            if flag == 1:
                cv2.imencode('.jpg', img)[1].tofile(save_img_name.split('/')[-1])
                cv2.imencode('.jpg', frame)[1].tofile(save_raw_name.split('/')[-1])
                # frame_box_dict = {
                #     "boxName": "person",
                #     "currentTime": write_current_time,
                #     "detBox": det_boxes}
                # # 视频
                # 换入新的字段
                json_obj_1 = {
                    "device": "上闸门",  # 设备名称
                    "warnType": "闸门运行区检测",  # 预警类型
                    "warnName": "闸门上有行人",  # 预警名称
                    "warnTime": "{}".format(current_time),  # 预警时间
                    "imgUrl": "{}".format(save_img_name)  # 图片路径
                }
                self.redis.set(cfg['Redis']['key'], json.dumps(json_obj_1, ensure_ascii=False))
                log.logger.info("redis,推送成功")

                if int(cfg['frame_box_put_activate']) == 1:
                    # 开启数据推送
                    # 推送画框的图
                    retval, buffer = cv2.imencode('.jpg', img)
                    jpg_as_text = base64.b64encode(buffer).decode()
                    result = {
                        "frame_detect_box": {},
                        "jpg_as_text": jpg_as_text
                    }
                    log.logger.debug("推送到内部的redis数据为：{}".format(result["frame_detect_box"]))
                    self.redis.hset(self.res_task_key_name_frame, str(1), json.dumps(result))



            else:
                if int(cfg['frame_box_put_activate']) == 1:
                    # 开启数据推送
                    # 推送画框的原图
                    retval, buffer = cv2.imencode('.jpg', frame)
                    jpg_as_text = base64.b64encode(buffer).decode()
                    result = {
                        "frame_detect_box": {},
                        "jpg_as_text": jpg_as_text
                    }
                    # log.logger.debug("推送到内部的redis数据为：{}".format(result["frame_detect_box"]))
                    self.redis.hset(self.res_task_key_name_frame, str(1), json.dumps(result))

        else:
            log.logger.info("未检测到行人")
            if int(cfg['frame_box_put_activate']) == 1:
                # 开启数据推送
                # 推送画框的原图
                retval, buffer = cv2.imencode('.jpg', frame)
                jpg_as_text = base64.b64encode(buffer).decode()
                result = {
                    "frame_detect_box": {},
                    "jpg_as_text": jpg_as_text
                }
                # log.logger.debug("推送到内部的redis数据为：{}".format(result["frame_detect_box"]))
                self.redis.hset(self.res_task_key_name_frame, str(1), json.dumps(result))
def main():
    dp = DetectPerson()
    while True:
        n_stamp, recent_img = dp.hkcam.read()
        current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        log.logger.info("当前时间： {}".format(current_time))
        dp.inf_from_frame(recent_img, dp.onnx_path, dp.person_conf_thres, dp.person_iou_thres)


if __name__ == '__main__':
    main()
