
import time
import multiprocessing as mp
import base64
import numpy
from PIL import Image, ImageDraw, ImageFont
import datetime
import threading
from PIL import Image
import cv2
import traceback
import numpy as np
import requests
import json
import os
import copy
import cv2
import onnx
import vision.utils.box_utils_numpy as box_utils
from caffe2.python.onnx import backend
import dlib
# onnx runtime
import onnxruntime as ort
from imutils import face_utils
from decimal import Decimal



#url = 'http://60.205.219.151:25879'
#url = 'http://10.142.156.167:17107'
url = 'http://127.0.0.1:17104'
url_api = 'http://127.0.0.1:17106'
#url_api = 'http://10.142.156.167:17106'
#url = 'http://127.0.0.1:5000/'

font_path = "./font/simsun.ttc"
label_path = "models/voc-model-labels.txt"

onnx_path = "models/onnx/version-RFB-640.onnx"
class_names = [name.strip() for name in open(label_path).readlines()]

predictor = onnx.load(onnx_path)
onnx.checker.check_model(predictor)
onnx.helper.printable_graph(predictor.graph)
predictor = backend.prepare(predictor, device="CPU")  # default CPU

ort_session = ort.InferenceSession(onnx_path)
input_name = ort_session.get_inputs()[0].name


#shape_predictor = dlib.shape_predictor('shape_predictor_5_face_landmarks.dat')
#fa = face_utils.facealigner.FaceAligner(shape_predictor, desiredFaceWidth=112, desiredLeftEye=(0.3, 0.3))

is_default = 0
threshold = 0.7

min_headsize = 80  #最小的人脸截图尺寸
rate_head = 1.2 #人脸截图比例
sum_count  =0

def predict(width, height, confidences, boxes, prob_threshold, iou_threshold=0.3, top_k=-1):
    boxes = boxes[0]
    confidences = confidences[0]
    picked_box_probs = []
    picked_labels = []
    for class_index in range(1, confidences.shape[1]):
        probs = confidences[:, class_index]
        mask = probs > prob_threshold
        probs = probs[mask]
        if probs.shape[0] == 0:
            continue
        subset_boxes = boxes[mask, :]
        box_probs = np.concatenate([subset_boxes, probs.reshape(-1, 1)], axis=1)
        box_probs = box_utils.hard_nms(box_probs,
                                       iou_threshold=iou_threshold,
                                       top_k=top_k,
                                       )
        picked_box_probs.append(box_probs)
        picked_labels.extend([class_index] * box_probs.shape[0])
    if not picked_box_probs:
        return np.array([]), np.array([]), np.array([])
    picked_box_probs = np.concatenate(picked_box_probs)
    picked_box_probs[:, 0] *= width
    picked_box_probs[:, 1] *= height
    picked_box_probs[:, 2] *= width
    picked_box_probs[:, 3] *= height
    return picked_box_probs[:, :4].astype(np.int32), np.array(picked_labels), picked_box_probs[:, 4]
#

head_size = 90 # 头像图片宽高
is_running = True # 线程运行标记

# opencv 页面写字
def cv2ImgAddText(img, text, left, top, textColor=(0, 255, 0), textSize=20):
    if (isinstance(img, numpy.ndarray)):  # 判断是否OpenCV图片类型
        img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    # 创建一个可以在给定图像上绘图的对象
    draw = ImageDraw.Draw(img)
    # 字体的格式
    fontStyle = ImageFont.truetype(
        font_path, textSize, encoding="utf-8")
    # 绘制文本
    draw.text((left, top), text, textColor, font=fontStyle)
    # 转换回OpenCV格式
    return cv2.cvtColor(numpy.asarray(img), cv2.COLOR_RGB2BGR)

# 保存头像的队列类
class naoke:
    def __init__(self):
        self.names = [] # 姓名
        self.user_ids = []  # 用户id
        self.image_features=[] # 图片特征，做判断是否重复用

        self.heads = [] # 头像图片
        self.times = [] # 保存时间
        self.heads_max = 8 # 最多保存几个
        self.count =0
        self.image_feature_max = 10 # 特征最多存多少个

        self.user_in_time_dict = {} # 屏幕显示上次时间
        self.user_clockin_time_dict = {} # 提交的上次时间

    # 添加新头像
    def add_head(self, frame, name='unknow', time_s=None, image_feature=None, user_id=None, pinyin_name=None):
        # print(frame, len(frame), len(frame.shape))
        # if len(frame) == 0:
        #     return
        need_add_face_array = True
        if user_id != None:
            # 判断前5分钟是否已检测
            user_in_time = self.user_in_time_dict.get(user_id)
            if user_in_time is not None:
                if (time.time() - user_in_time) < 5: # 前5分钟
                    return
            self.user_in_time_dict[user_id] = time.time()

            # 判断是否已在队列中
            if user_id in self.user_ids:
                del self.names[self.user_ids.index(user_id)]
                del self.heads[self.user_ids.index(user_id)]
                # del self.image_features[self.user_ids.index(user_id)]
                del self.times[self.user_ids.index(user_id)]
                del self.user_ids[self.user_ids.index(user_id)]
                self.heads.insert(0, cv2.resize(frame, (head_size, head_size)))
                self.names.insert(0, name)
                self.times.insert(0, time_s)
                self.user_ids.insert(0, user_id)
                need_add_face_array = False

        if image_feature is not None:
            min_dist = 100
            for feat in self.image_features:
                if feat is not None:
                    min_dist = min(min_dist, (sum((np.array(feat) - np.array(image_feature)) ** 2)) ** 0.5)

            # print('min_dist', min_dist)
            if not (min_dist > 0.7 or min_dist == 100):
                return

        if need_add_face_array:
            self.heads.insert(0, cv2.resize(frame, (head_size, head_size)))
            if len(self.heads) > self.heads_max:
                self.heads.pop()

            if time_s == 'no':
                time_s = '--:--:--'
            else:
                time_s = datetime.datetime.now().strftime('%H:%M:%S')
            self.times.insert(0, time_s)
            if len(self.times) > self.heads_max:
                self.times.pop()

            self.names.insert(0, name)
            if len(self.names) > self.heads_max:
                self.names.pop()

            self.user_ids.insert(0, user_id)
            if len(self.user_ids) > self.heads_max:
                self.user_ids.pop()

            # 访客用
            if image_feature is not None:
                self.image_features.insert(0,image_feature)
                if len(self.image_features) > self.image_feature_max:
                    self.image_features.pop()

        if pinyin_name is not None: # 访客的时候是''
            need_submit = True # 是否提交
            if user_id != None: # 用户可识别到， 大于5分钟才再提交
                user_in_time = self.user_clockin_time_dict.get(user_id)
                if user_in_time is not None:
                    # print('gap', (time.time() - user_in_time))
                    if (time.time() - user_in_time) < 5*60:  # 5分钟 20: #
                        need_submit = False

            if need_submit:
                if user_id != None:
                    self.user_clockin_time_dict[user_id] = time.time()
                    # print('add', self.user_clockin_time_dict[user_id])
                try:
                    ### 调用日志采集API                                                                                   ################
                    url_path = url_api + '/checkin'

                    file = {"file": ("file_name.jpg", cv2.imencode(".jpg", frame)[1].tobytes(), "image/jpg")}
                    response = requests.post(url_path, files=file, data={'pinyinName': pinyin_name})
                except Exception as e:
                    pass

    def get_head(self):
        return self.heads.pop()

    def clear(self):
        self.names = []  # 姓名
        self.image_features = []  # 图片特征，做判断是否重复用

        self.heads = []  # 头像图片
        self.times = []  # 保存时间

    def counts(self):
        self.count += 1

    def setCounts(self, count):
        self.count = count

def write_pil(name):
    write_height = 35
    write_width = len(name) * 35
    b, g, r, a = 255, 255, 255, 0
    im = np.zeros((write_height, write_width, 3), 'uint8')

    font = ImageFont.truetype(font_path, 32)

    t1 = time.time()
    ## Use simsum.ttc to write Chinese.
    img_pil = Image.fromarray(im)
    draw = ImageDraw.Draw(img_pil)
    draw.text((1, 1), name, font=font, fill=(b, g, r, a))
    img = np.array(img_pil)

    print((time.time() - t1))

    cv2.imencode('.jpg', img)[1].tofile('./faces_db/names/' + name + ".jpg")

# 画名字图片
def draw_name_img(back, name, x, y, need_ajust = False, x_padding=0, y_padding=0):
    # name = '???????'
    # print('x', x, 'y', y, 'back', back.shape)
    t1 = time.time()
    face_path = './faces_db/names/' + name + '.jpg'
    try:
        if not os.path.exists(face_path):
            write_pil(name)
    except Exception as e:
        pass
        # return cv2.putText(back, name, (x, y), font, 1, (255, 255, 255), 2)
    x += x_padding
    y += y_padding
    # print('draw_name_img', face_path)
    # front = cv2.imread(face_path)
    front = cv2.imdecode(np.fromfile(str(face_path), dtype=np.uint8), -1)
    front = cv2.cvtColor(front, cv2.COLOR_BGR2GRAY)
    front_tem = front / 255
    front_tem = 1 - front_tem
    # print(front.shape)
    h, w = front.shape
    if need_ajust and w > 70 : # 三个字
        x -= 10
    tem = back[y: y + h, x: x + w]
    tem_h, tem_w = tem.shape[0], tem.shape[1]
    if h > tem_h or w > tem_w:
        front_tem = front_tem[:tem_h, :tem_w]
    for i in range(3):
        tem[:, :, i] = front_tem * tem[:, :, i]
        tem[:, :, i] += front[:tem_h, :tem_w]
    back[y: y + h, x: x + w] = tem
    # print(time.time()-t1)
    return back

# 画头像到界面
def head_plot_now(head, background, date_time='2200', name_='unknow0', phase=0, startpoint=(0, 0), inter=80, vertical=True):
    w, h = head.shape[0], head.shape[1]
    if vertical == True:

        if phase <= 3:
            # background[
            # startpoint[0] - 23 + inter * phase:startpoint[0] - 23 + head_under_right.shape[0] + inter * phase,
            # startpoint[1] - 16:startpoint[1] - 16 + head_under_right.shape[1]] = head_under_right
            # tem = background[(startpoint[0] + inter * phase):(startpoint[0] + head.shape[0] + inter * phase),
            #       startpoint[1]:startpoint[1] + head.shape[0]]
            # for i in range(tem.shape[2]):
            #     tem[:, :, i] = mask * tem[:, :, i]
            # for i in range(head.shape[-1]):
            #     head[:, :, i] = (1 - mask) * head[:, :, i]
            # tem = cv2.add(head, tem)
            background[startpoint[0] + inter * phase:startpoint[0] + w + inter * phase,
            startpoint[1]:startpoint[1] + w] = head
            background = cv2.putText(background, date_time,
                                     (startpoint[1] + int(h * 1.2),startpoint[0] + phase * inter + int(h / 3 * 1)),font,0.7, (255, 255, 255),2)
            name_x = startpoint[1] + int(h * 1.2)
            name_y = startpoint[0] + phase * inter + int(h / 3 * 2)
            background = draw_name_img(background, name_, name_x, name_y)
            # background = cv2.putText(background, name_,
            #                          (startpoint[1] + int(h * 1.2),
            #                            startpoint[0] + phase * inter + int(h / 3 * 2)),font,0.8, (255, 255, 255),2)
        else:
            # background[
            # startpoint[0] - 23 + inter * (phase - 4):startpoint[0] - 23 + head_under_right.shape[0] + inter * (
            # phase - 4),
            # startpoint[1] + 250:startpoint[1] + 250 + head_under_right.shape[1]] = head_under_right
            # tem = background[(startpoint[0] + inter * (phase - 4)):(startpoint[0] + head.shape[0] + inter * (phase - 4)),
            #       startpoint[1] + 266:startpoint[1] + head.shape[0] + 266]
            # for i in range(tem.shape[2]):
            #     tem[:, :, i] = mask * tem[:, :, i]
            # for i in range(head.shape[-1]):
            #     head[:, :, i] = ( 1 - mask) * head[:, :, i]
            # tem = cv2.add(head, tem)
            background[startpoint[0] + inter * (phase - 4):startpoint[0] + w + inter * (phase - 4),
            startpoint[1] + 266:startpoint[1] + w + 266] = head
            background = cv2.putText(background, date_time,
                                       (startpoint[1] + int(h * 1.2) + 266,startpoint[0] + (phase - 4) * inter + int(h / 3 * 1)),font,0.7,(255, 255, 255),2)
            # background = cv2ImgAddText(background, name_,
            #                            startpoint[1] + h * 1.2 + 266,
            #                            startpoint[0] + (phase - 4) * inter + int(h / 3 * 2),
            #                            (255, 255, 255), int(w / 4))
            name_x = startpoint[1] + int(h * 1.2)+266
            name_y = startpoint[0] + (phase - 4) * inter + int(h / 3 * 2)
            background = draw_name_img(background, name_, name_x, name_y)
            # background = cv2.putText(background, name_,
            #                          (startpoint[1] + int(h * 1.2)+266,
            #                           startpoint[0] + (phase - 4) * inter + int(h / 3 * 2)), font, 0.8, (255, 255, 255),2)
    else:
        # background[startpoint[0] - 16:startpoint[0] - 16 + head_under_bot.shape[0],
        # startpoint[1] - 16 + inter * phase:startpoint[1] - 16 + head_under_bot.shape[
        #     1] + inter * phase] = head_under_bot

        head = cv2.resize(head, (125, 125))
        w ,h =125,125

        tem = background[(startpoint[0]):(startpoint[0] + head.shape[1]),
              startpoint[1] + inter * phase:startpoint[1] + head.shape[1] + inter * phase]
        for i in range(tem.shape[2]):
            tem[:, :, i] = mask_2 * tem[:, :, i]
        for i in range(head.shape[-1]):
            head[:, :, i] = (1 - mask_2) * head[:, :, i]
        tem = cv2.add(head, tem)
        background[(startpoint[0]):(startpoint[0] + h),
        startpoint[1] + inter * phase:startpoint[1] + h + inter * phase] = tem
        # background = cv2ImgAddText(background, date_time,
        #                            startpoint[1] + phase * inter + int(w / 10),
        #                            startpoint[0] + int(w / 7 * 5), (255, 255, 255), int(w / 5))
        # background = cv2ImgAddText(background, name_,
        #                            startpoint[1] + phase * inter + int(w / 10),
        #                            startpoint[0] + int(w), (255, 255, 255), int(w / 4))
        # background[startpoint[0] + inter * phase:startpoint[0] + w + inter * phase,
        # startpoint[1]:startpoint[1] + w] = head
        background = cv2.putText(background, date_time,
                                 (startpoint[1] + phase * inter + int(w / 10*0), startpoint[0] + int(w*9/10)), font,
                                 0.7, (255, 255, 255),2)
        x_padding = 20
        y_padding = -20
        name_x = startpoint[1] + phase * inter
        name_y = startpoint[0] + int(w*6/5)
        background = draw_name_img(background, name_, name_x, name_y, need_ajust=True, x_padding=x_padding, y_padding=y_padding)
        # background = cv2.putText(background, name_,
        #                          (startpoint[1] + phase * inter,
        #                           startpoint[0] + int(w*6/5)), font, 1, (255, 255, 255),2)
    return background

# 画到访人数到页面
def count_plot(background, front, startpoint=(30, 1660), nums=0):
    i=0
    # front = cv2ImgAddText(front, str(nums[i]), 140 + 180 * i, 20, (255, 255, 255), 30)
    front = cv2.putText(front, str(nums),
                       (140 + 180 * i, 40), font, 1, (255,255,255), 2)
    background[startpoint[0]:startpoint[0] + front.shape[0],
    startpoint[1]:startpoint[1] + front.shape[1]] = front

    return background


font = cv2.FONT_HERSHEY_SIMPLEX
#w是纵向h是横向
mask_2 = cv2.imread('imgs/mask_2.png')
mask = cv2.imread('imgs/mask_1.png')
mask = cv2.resize(mask, (head_size, head_size))
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(mask, 150, 255, cv2.THRESH_BINARY)

mask_2 = cv2.cvtColor(mask_2, cv2.COLOR_BGR2GRAY)
mask = mask / 255
mask_2 = cv2.resize(mask_2, (125, 125))
mask_2 = mask_2 / 255
naoke1 = cv2.imread('imgs/naoke1.jpg')
queue_mask = naoke() # 员工队列
queue_nomask = naoke() # 全部人员队列
# queue_post = naoke()

back = cv2.imread('imgs/background.png')
font = cv2.FONT_HERSHEY_SIMPLEX
w,h =back.shape[0],back.shape[1]#w是纵向h是横向
head_under_right = cv2.imread('imgs/head1.png')
head_under_right = cv2.resize(head_under_right,
                              (int(head_under_right.shape[1]/2),int(head_under_right.shape[0]/2)))
# print(head_under_right.shape)

head_under_bot = cv2.imread('imgs/head2.png')
head_under_bot = cv2.resize(head_under_bot,
                              (int(head_under_bot.shape[1]/2),int(head_under_bot.shape[0]/2)))
#在这边输入视频信号while
front= cv2.imread('imgs/f.jpg')

front= cv2.resize(front,(1150,640))

# 初始化默认头像队列
for i in range(8):
    queue_mask.add_head(naoke1, name='unknow', time_s='no',image_feature=None, user_id=None);

    queue_nomask.add_head(naoke1, name='unknow', time_s='no',image_feature=None, user_id=None);


# 对摄像头线程
def image_put(queue_head, queue_result,queue_mask, queue_nomask,is_default=0):
    if is_default == 1:
        print('--------------------------------------------------------------')
        # queue_post.clear()
        is_default =0


    cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
    t1 = time.time()
    people_in_time = time.time() # 有人的时间
    result_nums = [0, 0, 0]
    # screen_rate = 1.75  # 屏幕缩放率
    screen_rate = 2.25
    refresh_detect_people_num_gap = 5 # 多久刷新当前检测人员
    refresh_detect_people_num_time = time.time()
    global is_running
    while is_running:
        global sum_count
        start_fps = time.time()
        # 获取检测结果图片
        try:
            if not queue_result.empty():
                while not queue_result.empty():
                    type_name, result_data = queue_result.get()
                    if type_name == 'face_head':
                        face_i, name, user_id, pinyin_name = result_data
                        queue_mask.add_head(face_i, name, datetime.datetime.now().strftime('%H:%M:%S'), user_id=user_id, pinyin_name=pinyin_name)
                    elif type_name == 'guest_head':
                        face_i, name, image_feature = result_data
                        queue_nomask.add_head(face_i, name, datetime.datetime.now().strftime('%H:%M:%S'), image_feature=image_feature, pinyin_name='')
                    # elif type_name == 'people_count':
                    #     queue_mask.setCounts(result_data[0])

            # img_anniu = cv2.imread('imgs/anniu.png')      ## 员工数
            img_anniu = cv2.imread('imgs/anniu1.png')       ## 实时检测人数
            img_anniu = cv2.resize(img_anniu, (int(img_anniu.shape[1] / 2), int(img_anniu.shape[0] / 2)))

            back = cv2.imread('imgs/background.png')
            back = count_plot(back, img_anniu, nums = queue_mask.count)
            # print(queue_mask.count)
            ret, orig_image = cap.read()
            if orig_image is None:
                print("no img")
                break
            image = cv2.cvtColor(orig_image, cv2.COLOR_BGR2RGB)
            # print(image.shape)
            # image = cv2.resize(image, (320, 240))
            image = cv2.resize(image, (640, 480))
            image_mean = np.array([127, 127, 127])
            image = (image - image_mean) / 128
            image = np.transpose(image, [2, 0, 1])
            image = np.expand_dims(image, axis=0)
            image = image.astype(np.float32)
            # confidences, boxes = predictor.run(image)
            time_time = time.time()
            confidences, boxes = ort_session.run(None, {input_name: image})
            # print("cost time:{}".format(time.time() - time_time))
            boxes, labels, probs = predict(orig_image.shape[1], orig_image.shape[0], confidences, boxes, threshold)

            # 画头像转成另一个线程处理
            for i in range(8):
                back = head_plot_now(queue_mask.heads[i], back, date_time=queue_mask.times[i],
                                     name_=queue_mask.names[i],
                                     phase=i, startpoint=(170, 1350), inter=160)

                back = head_plot_now(queue_nomask.heads[i], back, date_time=queue_nomask.times[i],
                                     name_=queue_nomask.names[i],
                                     phase=i, startpoint=(860, 300), inter=200, vertical=False)
            # 只存最新一帧
            try:
                if not queue_head.empty() and queue_head.full():
                    queue_head.get_nowait()
            except Exception as e1:
                pass
            orig_image_no_retangle = copy.copy(orig_image)
            img_list = []
            #gray = cv2.cvtColor(orig_image, cv2.COLOR_BGR2GRAY)
            for i in range(boxes.shape[0]):

                box = boxes[i, :]
                x1, y1, x2, y2 = box
                # print("box1:")
                # print(box)
                box_w, box_h= min(1280,box[2])-max(0,box[0]), min(720,box[3])-max(0,box[1])
                if box_h>min_headsize and box_w>min_headsize : # 限制长宽， and (1/rate_head) <(box_w/box_h) <rate_head
                    #aligned_face = fa.align(orig_image, gray, dlib.rectangle(left = max(0,box[0]), top=max(0,box[1]), right=min(1280,box[2]), bottom=min(720,box[3])))
                    #head_img = orig_image_no_retangle[max(0,box[1]):min(720,box[3]),max(0,box[0]):min(1280,box[2])]
                    #img_list.append(head_img)
                    img_list.append(orig_image_no_retangle)
                    #img_list.append(aligned_face)
                # print('box3',box[3])
                # print('box2',box[2])
                # print('heads numbers:',len(queue_post.heads))
                # label = f"{class_names[labels[i]]}: {probs[i]:.2f}"

                cv2.rectangle(orig_image, (box[0], box[1]), (box[2], box[3]), (255, 255, 0), 4)

                # cv2.putText(orig_image, label,
                #             (box[0] + 20, box[1] + 40),
                #             cv2.FONT_HERSHEY_SIMPLEX,
                #             1,  # font scale
                #             (255, 0, 255),
                #             2)  # line type
            # 头像图片放入队列，另外一个进程检测
            print('img_list len', len(img_list))
            if time.time() - refresh_detect_people_num_time > refresh_detect_people_num_gap:
                refresh_detect_people_num_time = time.time()
                queue_mask.setCounts(len(img_list))
            try:
                if len(img_list) > 0:
                    # 改成整张图片传入后台检测，所以取第一张就可以
                    queue_head.put_nowait([img_list[0]])
            except Exception as e1:
                pass
            sum_count += boxes.shape[0]
            orig_image = cv2.resize(orig_image, (1152,648))
            back[130:130 + orig_image.shape[0], 104:104 + orig_image.shape[1]] = orig_image

            # fps_time = 1 / (time.time() - start_fps)
            # back = cv2.putText(back, 'fps:' + str(Decimal(fps_time).quantize(Decimal("0.00"))),
            #                    (100, 850), font, 1, (255, 255, 255), 2)  # 显示fps

            # cv2.namedWindow('annotated', cv2.WINDOW_NORMAL)
            #    # 全屏显示
            # cv2.setWindowProperty('annotated', cv2.WND_PROP_FULLSCREEN,
            #                          cv2.WINDOW_FULLSCREEN)
            back = cv2.resize(back,(int(2840 / screen_rate), int(1160 / screen_rate))) # window 缩放175%
            cv2.imshow('annotated', back)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        except Exception as e:
            # traceback.print_exc()
            print(e)

    is_running = False


face_mask_icon_img = cv2.imread('imgs/face_mask_icon.png')

def put_mask_to_face(face_img):
    h, w = face_img.shape[0], face_img.shape[1]

    face_mask_icon_size = int(w * 0.3)
    face_mask_icon = cv2.resize(face_mask_icon_img, (face_mask_icon_size, face_mask_icon_size))

    y = h - face_mask_icon_size
    x = w - face_mask_icon_size

    # face_img[y: y + face_mask_icon_size, x: x + face_mask_icon_size] = face_mask_icon

    alpha_h = face_mask_icon[0:face_mask_icon_size, 0:face_mask_icon_size, 2] / 255
    alpha = 1 - alpha_h
    # 按3个通道合并图片 处理png透明
    for c in range(0, 3):
        face_img[y: y + face_mask_icon_size, x: x + face_mask_icon_size, c] = (
                    alpha_h * face_mask_icon[0:face_mask_icon_size, 0:face_mask_icon_size, c]
                    + alpha * face_img[y: y + face_mask_icon_size, x: x + face_mask_icon_size, c])

    return face_img

# 图片处理逻辑
def deal_postimage(queue_head, queue_result):
    global is_default
    global is_running
    t1 = time.time()
    while is_running:
        try:
            img_list = queue_head.get()
            if len(img_list)>0:
                img_list_copy = img_list

                for i in range(len(img_list_copy)):
                    frame = img_list_copy.pop()

                    file = {"file": ("file_name.jpg", cv2.imencode(".jpg", frame)[1].tobytes(), "image/jpg")}
                    post_url = url + '/face_recog_head'

                    res = requests.post(url=post_url, files=file, data=None)

                    # print(res)
                    html_str = res.content.decode()
                    dict_jsons = json.loads(html_str)
                    # print(dict_jsons)
                    if dict_jsons is not None:
                        for dict_json in dict_jsons:
                            name = dict_json.get('name')
                            box = dict_json.get('bbox')
                            user_id = dict_json.get('user_id')
                            pinyin_name = dict_json.get('pinyin_name')
                            is_mask = dict_json.get('is_mask')
                            image_feature = dict_json.get('image_feature')
                            # image_feature
                            print('name', name)
                            # print(user_id)
                            # print("box2:")
                            # print(box)
                            # print(image_feature)
                            face_x_start = max(0,int(box[1]))
                            face_x_end = min(720,int(box[3]))
                            face_y_start = max(0,int(box[0]))
                            face_y_end = min(1280,int(box[2]))
                            if name is not None:
                                if name.lower() != 'unknown':
                                    face_i = frame
                                    # face_path = './faces_db/faces/' + name + '.jpg'
                                    # if os.path.exists(face_path):
                                    #     # face_path = (os.getcwd() + '/' + face_path).replace("\\", '/')
                                    #     print(face_path)
                                    #     face_i = cv2.imread(face_path)
                                    # else:
                                    #     print('找不到图片路径：' + face_path)
                                    face_i = face_i[face_x_start:face_x_end, face_y_start:face_y_end] # 使用视频截图做头像
                                    if is_mask:
                                        face_i = put_mask_to_face(face_i)
                                    #---------
                                    # url_path = url + '/userinfo'
                                    # # print('---------------user_id', user_id)
                                    # response = requests.get(url_path, params={'user_id': user_id})  ### data要写成字典，有个key，才能搜索到
                                    # html_str = response.content.decode()
                                    # dict_json = json.loads(html_str)
                                    # user_obj = dict_json.get('data')
                                    # if user_obj is not None:
                                    #     face_path = user_obj.get('image')
                                    #     if os.path.exists(face_path):
                                    #         face_path = os.getcwd() + '/faces_db/faces/' +name+'.jpg'
                                    #         print(face_path)
                                    #         # face_i = cv2.imread(face_path)
                                    #         face_i = cv2.imdecode(np.fromfile(str(face_path), dtype=np.uint8), -1)
                                    #     else:
                                    #         print('找不到图片路径：' + face_path)
                                    # ----------

                                    queue_result.put(('face_head', (face_i, name, user_id, pinyin_name)))

                                # queue_mask.add_head(face_i,name, datetime.datetime.now().strftime('%H:%M:%S'))
                                # queue_mask.counts()
                                else:
                                    # 访客，unknow
                                    name = 'guest'
                                    face_i = frame[face_x_start:face_x_end, face_y_start:face_y_end]
                                    if is_mask:
                                        face_i = put_mask_to_face(face_i)
                                    if image_feature is not None and len(image_feature) == 1:
                                        image_feature = image_feature[0]
                                    queue_result.put(('guest_head', (face_i, name, image_feature)))
        except Exception as e1:
            print(e1)
            # traceback.print_exc()
        # t2 = time.time()
        # if t2 - t1 > 1:
        #     try:
        #         url_path = url_api + '/stats'
        #         response = requests.get(url_path, timeout=2)
        #         t1 = t2
        #         html_str1 = response.content.decode()
        #         dict_json1 = json.loads(html_str1)
        #         staff_result = dict_json1.get('data')
        #         if staff_result is not None:
        #             staff_yes = staff_result.get('staff_yes', 0)
        #             # staff_no = staff_result.get('staff_no', 0)
        #             # result_num = staff_yes + staff_no
        #             # result_nums = [result_num, staff_yes, staff_no]
        #             queue_result.put(('people_count', (staff_yes,)))
        #     except Exception as e1:
        #         print(e1)
        #
        #     is_default = 1



def main():

    mp.set_start_method(method='spawn')  # init
    queue_head = mp.Queue(maxsize=10)
    queue_result = mp.Queue(maxsize=16)

    processes = [mp.Process(target=image_put, args=(queue_head, queue_result, queue_mask, queue_nomask)),
                 mp.Process(target=deal_postimage, args=(queue_head, queue_result))]

    [process.start() for process in processes]
    [process.join() for process in processes]


    # my_thread = threading.Thread(target=deal_postimage, args=(queue_post.heads,), name='thread_1')
    # my_thread.start()
    # image_put(queue, queue_mask, queue_nomask,is_default)

    # threads = [threading.Thread(target=image_get, args=(queue,queue_mask,queue_nomask))]
    #
    # [t.start() for t in threads]


    # [t.join() for t in threads]


if __name__ == '__main__':
    main()
