#! /usr/bin/python3
# -*- coding: utf-8 -*-
"""
Created on 2019/12/31 14:29 星期二

@author: jyz
"""
import multiprocessing as mp
import os
import time

import tensorflow as tf

from utils.polygon_judge import *

srp = SecondRecordPosition()
"""
脱岗检测
1 利用已有模型检测出人框
2 如果摄像头里的人框个数少于设定的阈值，即开始计时
3 一旦超过时间阈值t，则提示脱岗
人框个数的阈值和计时的时间阈值，由客户传入。
在视频帧里画出待检测区域的框，根据框来检测
"""

# class offsiteDetection(object):
    # 数据输入
# def queue_img_put(q_put, video_path):
#     cap = cv2.VideoCapture(video_path)
#     if cap.isOpened():
#         print('摄像头读取成功')
#     while not cap.isOpened():
#         print('摄像头读取失败，重新获取摄像头...')
#         cap = cv2.VideoCapture(video_path)
#     fps_count = 0
#     while cap.isOpened():
#         fps_count = fps_count + 1
#         if fps_count % 10 == 0:
#             ret, frame = cap.read()
#             if ret:
#                 q_put.put(frame)
#                 q_put.get() if q_put.qsize() > 1 else None
#     # 释放cap
#     cap.release()

# 模型加载和推理
# def tf_model(origin_img_q, result_img_q, model_path, gpu_memory_limit):
#     # gpu_limit_rate = gpu_memory_limit / int(os.popen('nvidia-smi').readlines()[8].split('/')[2].split('MiB')[0])
#
#     # 加载模型
#     timer = time.time()
#     '''loading the TensorFlow model'''
#     ckpt_pwd = os.path.join(model_path, 'frozen_inference_graph.pb')
#     detection_graph = tf.Graph()
#     with detection_graph.as_default():
#         od_graph_def = tf.GraphDef()
#         with tf.gfile.GFile(ckpt_pwd, 'rb') as fid:
#             serialized_graph = fid.read()
#             od_graph_def.ParseFromString(serialized_graph)
#             tf.import_graph_def(od_graph_def, name='')
#
#     '''run the TensorFlow model'''
#     with detection_graph.as_default():
#         '''limit the GPU Memory'''
#         # tf_config = tf.ConfigProto(gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=gpu_limit_rate))
#         tf_config = tf.ConfigProto(allow_soft_placement=True)
#         with tf.Session(graph=detection_graph, config=tf_config) as sess:
#             image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
#             num_detections = detection_graph.get_tensor_by_name('num_detections:0')
#             detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
#             detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
#             detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
#             print("||| Loading TF model time:", time.time() - timer)
#             while True:
#                 if origin_img_q.qsize() == 0:
#                     time.sleep(0.1)
#                 image = origin_img_q.get()
#                 if image is not None:
#                     img = image.copy()
#                     img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#                     (boxes, scores, label_id, nums) = sess.run(
#                         [detection_boxes, detection_scores, detection_classes, num_detections],
#                         feed_dict={image_tensor: img[np.newaxis, :, :, :]},
#                     )
#                     result_img_q.put((image, boxes, scores, label_id, nums))

# 处理输出数据


def deal_with_tf_output_data(boxes, scores, label_id, label_dict):
    boxes, scores, label_id = np.squeeze(boxes), np.squeeze(scores), np.squeeze(label_id).astype(np.int32)
    box_info = []
    for box, score, idx in zip(boxes, scores, label_id):
        label = label_dict[idx]
        label_bool = label == 'person'
        if score > 0.7 and label_bool:
            box_info.append([box, score, label])
    box_info_np = np.array(box_info)
    box_info_np = box_info_np[np.argsort(box_info_np[:, 1], axis=0)] if len(box_info_np.shape) > 1 else []
    return box_info_np


def queue_img_get(q_get, model_path, camera_ip, personNum, time_threshold):
    time_wait = False
    time_threshold_start = 0
    pbtxt_name = [f for f in os.listdir(model_path) if f.find('.pbtxt') >= 0][0]
    with open(os.path.join(model_path, pbtxt_name), 'r') as f:  # get label_dict
        lines = f.readlines()
        ids = [int(line.split(':')[-1]) for line in lines if line.find('id:') > 0]
        names = [line.split(':')[-1][2:-2] for line in lines if line.find('display_name:') > 0]
        label_dict = dict(zip(ids, names))
    while True:
        if q_get.qsize() == 0:
            time.sleep(0.1)
        img, boxes, scores, label_id, nums = q_get.get()
        box_info_np = deal_with_tf_output_data(boxes, scores, label_id, label_dict)
        if len(box_info_np) >= 1:
            # 若人数少于所设的阈值，则返回True
            t1 = time.time()
            calculate_res = srp.show_person_box(box_info_np, img, camera_ip, personNum)
            print('t' * 9, time.time() - t1)
            if calculate_res:
                if time_wait:
                    time.sleep(1)
                    if time_threshold_start + time_threshold * 5 < int(time.time()):
                        print('疑似出现脱岗行为，请注意。')
                else:
                    time_wait = True
                    time_threshold_start = int(time.time())
            else:
                time_wait = False

# def detection(camara_ip, pps, personNum, time_threshold):
#         cwd = os.path.dirname(os.getcwd())
#         model_path = os.path.join(cwd, 'model', 'offsite_model')
#         gpu_memory_limit = 2048
#         original_queue = [mp.Queue(maxsize=10) for _ in range(len(camara_ip))]
#         result_queue = [mp.Queue(maxsize=10) for _ in range(len(camara_ip))]
#         processes = []
#         for ori_q, res_q, ip, pp in zip(original_queue, result_queue, camara_ip, pps):
#             processes.append(mp.Process(target=queue_img_put, args=(ori_q, ip)))
#             processes.append(mp.Process(target=tf_model, args=(ori_q, res_q, model_path, gpu_memory_limit)))
#             processes.append(
#                 mp.Process(target=queue_img_get, args=(res_q, model_path, ip, pp, personNum, time_threshold)))
#         for process in processes:
#             process.daemon = True
#             process.start()
#         for process in processes:
#             process.join()


# if __name__ == '__main__':
#     pass
