#! /usr/bin/python3
# -*- coding: utf-8 -*-
"""
Created on 2020/1/14 8:49 星期二

@author: jyz
"""
import os

import time
import requests

import numpy as np

from config.config_setting import MODEL_PATH, GPU_MEMORY_LIMIT, logger, STREAM_SIZE, BITCH_SIZE
from tf_pose.networks import model_wh
from tf_pose.estimator import TfPoseEstimator
import tensorflow as tf
"""该文件内主要存放启动各模型需要用到的方法"""


# 模型加载和推理
# 算法id和功能对应列表{安全帽：10，反光衣：20，越界：30，人员聚集：40，摔倒行为检测：50，人员离岗：60，人员徘徊：70，裸土覆盖：80，火焰检测：90}
def tf_model(origin_img_q, result_img_q, algo_id, reqs):
    time_ = time.time()

    os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    tf_config = tf.ConfigProto(gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=GPU_MEMORY_LIMIT))

    '''loading the TensorFlow model'''
    ckpt_pwd = os.path.join(MODEL_PATH[algo_id], 'frozen_inference_graph.pb')
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(ckpt_pwd, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')
    '''run the TensorFlow model'''
    with detection_graph.as_default():
        '''limit the GPU Memory'''
        with tf.Session(graph=detection_graph, config=tf_config) as sess:
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
            num_detections = detection_graph.get_tensor_by_name('num_detections:0')
            detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
            detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
            detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
            logger.info("||| Loading TF model time:{}".format(time.time() - time_))

            # batchSize = 4  # change this
            shape = (BITCH_SIZE, STREAM_SIZE[1], STREAM_SIZE[0], 3)
            batch = np.ndarray(shape)
            devListId = 0
            dcnt = len(reqs)
            (devids, devlist) = zip(*reqs.items())
            while True:
                bid = 0
                imgList = []
                while bid < BITCH_SIZE:
                    
                    devid = devids[devListId]
                    devListId += 1
                    if devListId == dcnt: devListId = 0
                    if origin_img_q[devid].qsize() == 0:
                        continue
                    image = origin_img_q[devid].get()

                    # 判断在当前时间算法是否可运行
                    detect_result = None
                    timer = reqs[devid][algo_id]["timeList"][0] if "timeList" in reqs[devid][algo_id].keys() else []
                    if timer:
                        timer_s, timer_e = timer[0]["startTime"], timer[0]["endTime"]  # 提取起止时间
                        if timer_s <= time.strftime('%H:%M') <= timer_e:
                            if image is not None:
                                if min(image.shape[:2]) < 100:
                                    logger.error("May the image of camera is None.")
                                    continue
                                # img_ori = image.copy()
                                # img_ori = cv2.cvtColor(img_ori, cv2.COLOR_BGR2RGB)

                                imgList.append((image, devid))
                                batch[bid, :, :, :] = image
                                bid += 1
                        else:
                            continue

                    else:
                        if image is not None:
                            if min(image.shape[:2]) < 100:
                                logger.error("May the image of camera is None.")
                                continue
                            
                            imgList.append((image, devid))
                            batch[bid, :, :, :] = image
                            bid += 1
                                
                
                detect_results = sess.run(
                                    [detection_boxes, detection_scores, detection_classes, num_detections],
                                    feed_dict={image_tensor: batch},)  # (boxes, scores, label_id, nums)

                for i in range(len(imgList)):
                    a,b,c,d= detect_results
                    detect_result = a[i], b[i], c[i], d[i]
                    image, devid= imgList[i]
                    if detect_result is not None:
                        result_img_q[devid].put((image, detect_result))
                    else:
                        result_img_q[devid].put(image)


# 摔倒检测的模型加载和推理
def fall_model(origin_img_q, result_img_q, algo_id, timer):
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"

    from tensorflow import ConfigProto, GPUOptions
    tf_config = ConfigProto(gpu_options=GPUOptions(per_process_gpu_memory_fraction=GPU_MEMORY_LIMIT))
    resize = '640x368'  # 摔倒检测默认值。resize images before they are processed. Recommends : 432x368 or 656x368 or 1312x736
    w, h = model_wh(resize)
    image = origin_img_q.get()
    pb_path = os.path.join(MODEL_PATH[algo_id], 'graph_opt.pb')
    if not os.path.exists(pb_path):   # 当本地没有模型时，自动下载。
        model_url = "https://whfciot.oss-cn-hangzhou.aliyuncs.com/ai_camera/"
        model_req = requests.get(os.path.join(model_url, pb_path))
        with open(pb_path, 'wb') as cod:
            cod.write(model_req.content)

    if image.shape[0] * image.shape[1] < w * h:
        w = image.shape[1] // 16 * 16
        h = image.shape[0] // 16 * 16
    e = TfPoseEstimator(pb_path, target_size=(w, h), trt_bool=False, tf_config=tf_config)
    while True:
        fps_time = time.time()
        if origin_img_q.qsize() > 0:
            image = origin_img_q.get()
            if timer:
                timer_s, timer_e = timer[0], timer[1]  # 提取起止时间
                if timer_s <= time.strftime('%H:%M') <= timer_e:
                    humans = e.inference(image, resize_to_default=(w > 0 and h > 0), upsample_size=4.0)
                    time1 = time.time() - fps_time
                    result_img_q.put((image, humans, time1))
                else:
                    result_img_q.put(image)
            else:
                humans = e.inference(image, resize_to_default=(w > 0 and h > 0), upsample_size=4.0)
                time1 = time.time() - fps_time
                result_img_q.put((image, humans, time1))
