
import time
import multiprocessing as mp
import base64
import numpy
from PIL import Image, ImageDraw, ImageFont
import datetime
import threading
from PIL import Image
import cv2
import numpy as np
import requests
import json
import os
import copy
import cv2
import onnx
import vision.utils.box_utils_numpy as box_utils
from caffe2.python.onnx import backend

# onnx runtime
import onnxruntime as ort

url = 'http://127.0.0.1:17103'
label_path = "models/voc-model-labels.txt"

onnx_path = "models/onnx/version-RFB-640.onnx"
class_names = [name.strip() for name in open(label_path).readlines()]

predictor = onnx.load(onnx_path)
onnx.checker.check_model(predictor)
onnx.helper.printable_graph(predictor.graph)
predictor = backend.prepare(predictor, device="CPU")  # default CPU

ort_session = ort.InferenceSession(onnx_path)
input_name = ort_session.get_inputs()[0].name


is_default = 0
threshold = 0.7


sum  =0

def predict(width, height, confidences, boxes, prob_threshold, iou_threshold=0.3, top_k=-1):
    boxes = boxes[0]
    confidences = confidences[0]
    picked_box_probs = []
    picked_labels = []
    for class_index in range(1, confidences.shape[1]):
        probs = confidences[:, class_index]
        mask = probs > prob_threshold
        probs = probs[mask]
        if probs.shape[0] == 0:
            continue
        subset_boxes = boxes[mask, :]
        box_probs = np.concatenate([subset_boxes, probs.reshape(-1, 1)], axis=1)
        box_probs = box_utils.hard_nms(box_probs,
                                       iou_threshold=iou_threshold,
                                       top_k=top_k,
                                       )
        picked_box_probs.append(box_probs)
        picked_labels.extend([class_index] * box_probs.shape[0])
    if not picked_box_probs:
        return np.array([]), np.array([]), np.array([])
    picked_box_probs = np.concatenate(picked_box_probs)
    picked_box_probs[:, 0] *= width
    picked_box_probs[:, 1] *= height
    picked_box_probs[:, 2] *= width
    picked_box_probs[:, 3] *= height
    return picked_box_probs[:, :4].astype(np.int32), np.array(picked_labels), picked_box_probs[:, 4]
#

head_size = 90 # 头像图片宽高
is_running = True # 线程运行标记

# opencv 页面写字
def cv2ImgAddText(img, text, left, top, textColor=(0, 255, 0), textSize=20):
    if (isinstance(img, numpy.ndarray)):  # 判断是否OpenCV图片类型
        img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    # 创建一个可以在给定图像上绘图的对象
    draw = ImageDraw.Draw(img)
    # 字体的格式
    fontStyle = ImageFont.truetype(
        "font/simsun.ttc", textSize, encoding="utf-8")
    # 绘制文本
    draw.text((left, top), text, textColor, font=fontStyle)
    # 转换回OpenCV格式
    return cv2.cvtColor(numpy.asarray(img), cv2.COLOR_RGB2BGR)

# 保存头像的队列类
class naoke:
    def __init__(self):
        self.names = [] # 姓名
        self.image_features=[] # 图片特征，做判断是否重复用

        self.heads = [] # 头像图片
        self.times = [] # 保存时间
        self.heads_max = 8 # 最多保存几个
        self.count =0

    # 添加新头像
    def add_head(self, frame, name='unkonw', time_s=None,image_feature=None):
        # print(frame, len(frame), len(frame.shape))
        # if len(frame) == 0:
        #     return
        self.heads.insert(0, cv2.resize(frame, (head_size, head_size)))
        if len(self.heads) > self.heads_max:
            self.heads.pop()

        if time_s == 'no':
            time_s = '--:--:--'
        else:
            time_s = datetime.datetime.now().strftime('%H:%M:%S')
        self.times.insert(0, time_s)
        if len(self.times) > self.heads_max:
            self.times.pop()

        self.names.insert(0, name)
        if len(self.names) > self.heads_max:
            self.names.pop()

        self.image_features.insert(0,image_feature)
        if len(self.image_features) >self.heads_max:
            self.image_features.pop()
    def get_head(self):
        return self.heads.pop()
    def clear(self):
        self.names = []  # 姓名
        self.image_features = []  # 图片特征，做判断是否重复用

        self.heads = []  # 头像图片
        self.times = []  # 保存时间
    def counts(self):
        self.count+=1

    def setCounts(self, count):
        self.count = count

# 画头像到界面
def head_plot_now(head, background, date_time='2200', name_='unknow0', phase=0, startpoint=(0, 0), inter=80, vertical=True):
    w, h = head.shape[0], head.shape[1]
    if vertical == True:

        if phase <= 3:
            # background[
            # startpoint[0] - 23 + inter * phase:startpoint[0] - 23 + head_under_right.shape[0] + inter * phase,
            # startpoint[1] - 16:startpoint[1] - 16 + head_under_right.shape[1]] = head_under_right
            # tem = background[(startpoint[0] + inter * phase):(startpoint[0] + head.shape[0] + inter * phase),
            #       startpoint[1]:startpoint[1] + head.shape[0]]
            # for i in range(tem.shape[2]):
            #     tem[:, :, i] = mask * tem[:, :, i]
            # for i in range(head.shape[-1]):
            #     head[:, :, i] = (1 - mask) * head[:, :, i]
            # tem = cv2.add(head, tem)
            background[startpoint[0] + inter * phase:startpoint[0] + w + inter * phase,
            startpoint[1]:startpoint[1] + w] = head
            background = cv2.putText(background, date_time,
                                     (startpoint[1] + int(h * 1.2),startpoint[0] + phase * inter + int(h / 3 * 1)),font,0.7, (255, 255, 255),2)
            background = cv2.putText(background, name_,
                                     (startpoint[1] + int(h * 1.2),
                                       startpoint[0] + phase * inter + int(h / 3 * 2)),font,0.8, (255, 255, 255),2)
        else:
            # background[
            # startpoint[0] - 23 + inter * (phase - 4):startpoint[0] - 23 + head_under_right.shape[0] + inter * (
            # phase - 4),
            # startpoint[1] + 250:startpoint[1] + 250 + head_under_right.shape[1]] = head_under_right
            # tem = background[(startpoint[0] + inter * (phase - 4)):(startpoint[0] + head.shape[0] + inter * (phase - 4)),
            #       startpoint[1] + 266:startpoint[1] + head.shape[0] + 266]
            # for i in range(tem.shape[2]):
            #     tem[:, :, i] = mask * tem[:, :, i]
            # for i in range(head.shape[-1]):
            #     head[:, :, i] = ( 1 - mask) * head[:, :, i]
            # tem = cv2.add(head, tem)
            background[startpoint[0] + inter * (phase - 4):startpoint[0] + w + inter * (phase - 4),
            startpoint[1] + 266:startpoint[1] + w + 266] = head
            background = cv2.putText(background, date_time,
                                       (startpoint[1] + int(h * 1.2) + 266,startpoint[0] + (phase - 4) * inter + int(h / 3 * 1)),font,0.7,(255, 255, 255),2)
            # background = cv2ImgAddText(background, name_,
            #                            startpoint[1] + h * 1.2 + 266,
            #                            startpoint[0] + (phase - 4) * inter + int(h / 3 * 2),
            #                            (255, 255, 255), int(w / 4))
            background = cv2.putText(background, name_,
                                     (startpoint[1] + int(h * 1.2)+266,
                                      startpoint[0] + (phase - 4) * inter + int(h / 3 * 2)), font, 0.8, (255, 255, 255),2)
    else:
        # background[startpoint[0] - 16:startpoint[0] - 16 + head_under_bot.shape[0],
        # startpoint[1] - 16 + inter * phase:startpoint[1] - 16 + head_under_bot.shape[
        #     1] + inter * phase] = head_under_bot

        head = cv2.resize(head, (125, 125))
        w ,h =125,125

        tem = background[(startpoint[0]):(startpoint[0] + head.shape[1]),
              startpoint[1] + inter * phase:startpoint[1] + head.shape[1] + inter * phase]
        for i in range(tem.shape[2]):
            tem[:, :, i] = mask_2 * tem[:, :, i]
        for i in range(head.shape[-1]):
            head[:, :, i] = (1 - mask_2) * head[:, :, i]
        tem = cv2.add(head, tem)
        background[(startpoint[0]):(startpoint[0] + h),
        startpoint[1] + inter * phase:startpoint[1] + h + inter * phase] = tem
        # background = cv2ImgAddText(background, date_time,
        #                            startpoint[1] + phase * inter + int(w / 10),
        #                            startpoint[0] + int(w / 7 * 5), (255, 255, 255), int(w / 5))
        # background = cv2ImgAddText(background, name_,
        #                            startpoint[1] + phase * inter + int(w / 10),
        #                            startpoint[0] + int(w), (255, 255, 255), int(w / 4))
        # background[startpoint[0] + inter * phase:startpoint[0] + w + inter * phase,
        # startpoint[1]:startpoint[1] + w] = head
        background = cv2.putText(background, date_time,
                                 (startpoint[1] + phase * inter + int(w / 10*0), startpoint[0] + int(w*9/10)), font,
                                 0.7, (255, 255, 255),2)
        background = cv2.putText(background, name_,
                                 (startpoint[1] + phase * inter,
                                  startpoint[0] + int(w*6/5)), font, 1, (255, 255, 255),2)
    return background

# 画到访人数到页面
def count_plot(background, front, startpoint=(30, 1660), nums=0):
    i=0

        # front = cv2ImgAddText(front, str(nums[i]), 140 + 180 * i, 20, (255, 255, 255), 30)
    front = cv2.putText(front, str(nums),
                       (140 + 180 * i, 40), font, 1, (255,255,255), 2)
    background[startpoint[0]:startpoint[0] + front.shape[0],
    startpoint[1]:startpoint[1] + front.shape[1]] = front

    return background


font = cv2.FONT_HERSHEY_SIMPLEX
#w是纵向h是横向
mask_2 = cv2.imread('imgs/mask_2.png')
mask = cv2.imread('imgs/mask_1.png')
mask = cv2.resize(mask, (head_size, head_size))
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(mask, 150, 255, cv2.THRESH_BINARY)

mask_2 = cv2.cvtColor(mask_2, cv2.COLOR_BGR2GRAY)
mask = mask / 255
mask_2 = cv2.resize(mask_2, (125, 125))
mask_2 = mask_2 / 255
naoke1 = cv2.imread('imgs/naoke1.jpg')
queue_mask = naoke() # 员工队列
queue_nomask = naoke() # 全部人员队列
queue_post = naoke()

back = cv2.imread('imgs/background.png')
font = cv2.FONT_HERSHEY_SIMPLEX
w,h =back.shape[0],back.shape[1]#w是纵向h是横向
head_under_right = cv2.imread('imgs/head1.png')
head_under_right = cv2.resize(head_under_right,
                              (int(head_under_right.shape[1]/2),int(head_under_right.shape[0]/2)))
# print(head_under_right.shape)

head_under_bot = cv2.imread('imgs/head2.png')
head_under_bot = cv2.resize(head_under_bot,
                              (int(head_under_bot.shape[1]/2),int(head_under_bot.shape[0]/2)))
#在这边输入视频信号while
front= cv2.imread('imgs/f.jpg')

front= cv2.resize(front,(1150,640))



# 初始化默认头像队列
for i in range(8):
    queue_mask.add_head(naoke1, name='unknow', time_s='no',image_feature=None);
    queue_post.add_head(naoke1, name='unknow', time_s='no', image_feature=None);

    queue_nomask.add_head(naoke1, name='unknow', time_s='no',image_feature=None);


# 对摄像头线程
def image_put(q,queue_mask,queue_nomask,is_default=0):
    if is_default == 1:
        print('--------------------------------------------------------------')
        queue_post.clear()
        is_default =0


    cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
    t1 = time.time()
    people_in_time = time.time() # 有人的时间
    result_nums = [0, 0, 0]
    global is_running
    while is_running:
        global sum
        t2 = time.time()
        if t2 - t1 > 1:
            url_path = url + '/stats'
            response = requests.get(url_path)
            # print(response)
            t1 = t2
            html_str1 = response.content.decode()
            dict_json1 = json.loads(html_str1)
            staff_result = dict_json1.get('data')
            if staff_result is not None:
                staff_yes = staff_result.get('staff_yes', 0)
                # staff_no = staff_result.get('staff_no', 0)
                # result_num = staff_yes + staff_no
                # result_nums = [result_num, staff_yes, staff_no]
                queue_mask.setCounts(staff_yes)
        img_anniu = cv2.imread('imgs/anniu.png')
        img_anniu = cv2.resize(img_anniu, (int(img_anniu.shape[1] / 2), int(img_anniu.shape[0] / 2)))

        back = cv2.imread('imgs/background.png')
        back = count_plot(back, img_anniu, nums = queue_mask.count)
        # print(queue_mask.count)
        ret, orig_image = cap.read()
        if orig_image is None:
            print("no img")
            break
        image = cv2.cvtColor(orig_image, cv2.COLOR_BGR2RGB)
        # print(image.shape)
        # image = cv2.resize(image, (320, 240))
        image = cv2.resize(image, (640, 480))
        image_mean = np.array([127, 127, 127])
        image = (image - image_mean) / 128
        image = np.transpose(image, [2, 0, 1])
        image = np.expand_dims(image, axis=0)
        image = image.astype(np.float32)
        # confidences, boxes = predictor.run(image)
        time_time = time.time()
        confidences, boxes = ort_session.run(None, {input_name: image})
        # print("cost time:{}".format(time.time() - time_time))
        boxes, labels, probs = predict(orig_image.shape[1], orig_image.shape[0], confidences, boxes, threshold)

        for i in range(8):
            back = head_plot_now(queue_mask.heads[i], back, date_time=queue_mask.times[i],
                                 name_=queue_mask.names[i],
                                 phase=i, startpoint=(170, 1350), inter=160)

        for i in range(boxes.shape[0]):
            box = boxes[i, :]
            queue_post.add_head(orig_image[max(0,box[1]):min(720,box[3]),max(0,box[0]):min(1280,box[2])])
            # print('box3',box[3])
            # print('box2',box[2])
            # print('heads numbers:',len(queue_post.heads))
            # label = f"{class_names[labels[i]]}: {probs[i]:.2f}"

            cv2.rectangle(orig_image, (box[0], box[1]), (box[2], box[3]), (255, 255, 0), 4)

            # cv2.putText(orig_image, label,
            #             (box[0] + 20, box[1] + 40),
            #             cv2.FONT_HERSHEY_SIMPLEX,
            #             1,  # font scale
            #             (255, 0, 255),
            #             2)  # line type
        sum += boxes.shape[0]
        orig_image = cv2.resize(orig_image, (1152,648))
        back[130:130 + orig_image.shape[0], 104:104 + orig_image.shape[1]] = orig_image
        # print('iiiiiii')
        # cv2.namedWindow('annotated', cv2.WINDOW_NORMAL)
        #    # 全屏显示
        # cv2.setWindowProperty('annotated', cv2.WND_PROP_FULLSCREEN,
        #                          cv2.WINDOW_FULLSCREEN)
        cv2.imshow('annotated', back)


        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    is_running = False



# 图片处理逻辑
def deal_postimage(img_list):
    global is_default
    global is_running
    while is_running:
        if len(img_list)>0:
            img_list_copy = img_list
            for i in range(len(img_list_copy)):
                frame = img_list_copy.pop()

                file = {"file": ("file_name.jpg", cv2.imencode(".jpg", frame)[1].tobytes(), "image/jpg")}

                data = {"type_code": "type_code",
                        "area_id": "area_id"}
                post_url = url + '/face_recog_head'

                res = requests.post(url=post_url, files=file, data=data)

                print(res)
                html_str = res.content.decode()
                dict_json = json.loads(html_str)

                if dict_json is not None:
                    name = dict_json.get('name')
                    user_id = dict_json.get('user_id')
                    print(name)
                    print(user_id)
                    if name not in queue_mask.names and name!='Unknown':
                        face_i = frame
                        face_path = './faces_db/faces/'+name+'.jpg'
                        if os.path.exists(face_path):
                            face_path = (os.getcwd() + '/' + face_path).replace("\\", '/')
                            print(face_path)
                            face_i = cv2.imread(face_path)
                        else:
                            print('找不到图片路径：' + face_path)


                        queue_mask.add_head(face_i,name, datetime.datetime.now().strftime('%H:%M:%S'))
                        # queue_mask.counts()

                        ### 调用日志采集API                                                                                   ################
                        url_path = url + '/checkin'
                        response = requests.post(url_path, data={'user_id': user_id})  # user_id 哪来

            is_default = 1
            print('done,default==1')












def deal_image(frame, queue_mask, queue_nomask):
    people_len = 0
    tem_frame = frame #这张用来截图
    tem_frame_2 = copy.copy(frame) #防止画框干扰头像截图，只用来画框的照片

    # 将取出来的图片变成base64编码
    img_str = cv2.imencode('.jpg', frame)[1].tostring()  # 将图片编码成流数据，放到内存缓存中，然后转化成string格式
    b64_code = base64.b64encode(img_str)  # 编码成base64
    # print(type(b64_code))

    ### 调用人脸识别API（后台服务）
    t4 = time.time()
    url = url_path + '/face_recog'
    response = requests.post(url, data={'img': b64_code})  ### data要写成字典，有个key，才能搜索到
    # print(response)
    print('t4',(time.time())-t4)

    # 得到json字符串
    html_str = response.content.decode()
    # print('尝试输出html_str：', html_str)
    # 将字符串转换成字典
    dict_json = json.loads(html_str)
    # print('尝试输出dict_json：', dict_json)

    # result = json.loads(response.text)
    # 读取字典中的key：data，对应的value
    img_result = dict_json.get('data')

    # print('尝试输出img_result：', img_result)

    if img_result is not None:
        people_len = len(img_result)
        for i in img_result:
            is_need_checkin = False
            # 提取摄像头捕获到的user_id、名字、人脸框
            user_id = i.get('user_id')
            name = i.get('name','')
            image_feature = i.get('image_feature')
            # print(type(image_feature))
            bbox = i.get('bbox')
            print('信息最后结果：', name, bbox)
            min_dist = 100
            for feat in queue_nomask.image_features:
                if feat is not None:
                    min_dist = min(min_dist, (sum((np.array(feat) - np.array(image_feature)) ** 2)) ** 0.5)
            print('----------------------juli')
            print(min_dist)
            # 不在库里面
            h = int(bbox[3]-bbox[1])
            w = int(bbox[2]-bbox[0])
            if user_id is None:
                face_image = tem_frame[max(0,int(bbox[1])):min(int(bbox[3]),650), max(0,int(bbox[0])):min(1150,int(bbox[2]))]

                tem_frame_2 = cv2.rectangle(tem_frame_2, (max(0, int(bbox[0])), max(0, int(bbox[1]))),
                                            (min(int(bbox[2]), 650), min(1150, int(bbox[3]))), (255, 0, 0), 2)
                tem_frame_2[max(0, int(bbox[1])):max(0, int(bbox[1])) + int(h/10),
                                max(0, int(bbox[0])):max(0, int(bbox[0])) +int(w/2)] = (255, 0, 0)
                tem_frame_2 = cv2.putText(tem_frame_2, name, (max(0, int(bbox[0]))+int(w/20), max(0, int(bbox[1]))+int(h/20)), font, 0.5,
                                        (255, 255, 255), 1)
            else:
                face_image = tem_frame[max(0, int(bbox[1])):min(int(bbox[3]), 650),
                             max(0, int(bbox[0])):min(1150, int(bbox[2]))]

                tem_frame_2 = cv2.rectangle(tem_frame_2, (max(0, int(bbox[0])), max(0, int(bbox[1]))),
                                            (min(int(bbox[2]), 650), min(1150, int(bbox[3]))), (0,0,255), 2)
                tem_frame_2[max(0, int(bbox[1])):max(0, int(bbox[1])) + int(h / 10),
                max(0, int(bbox[0])):max(0, int(bbox[0])) + int(w / 2)] = (0,0,255)
                tem_frame_2 = cv2.putText(tem_frame_2, name,(max(0, int(bbox[0])) + int(w / 20), max(0, int(bbox[1])) + int(h / 20)),
                                          font, 0.5,(255, 255, 255), 1)


            if (min_dist > 0.7 or min_dist == 100) and user_id is None:
                print('get a new face')
                # queue_mask.add_head(tem_frame[int(bbox[1]):int(bbox[3]),int(bbox[0]):int(bbox[2])],name,datetime.datetime.now().strftime('%H:%M:%S'),image_feature)
                ### 调用人员基本信息API
                # 全部人员队列
                queue_nomask.add_head(face_image, name,
                                      datetime.datetime.now().strftime('%H:%M:%S'), image_feature)
                is_need_checkin = True

                # 从else开始改

            # 判断是员工
            if user_id is not None and name not in queue_mask.names:
                is_need_checkin = True
                face_i = face_image


                url_path = url + '/userinfo'
                # print('---------------user_id', user_id)
                response = requests.get(url_path, params={'user_id': user_id})  ### data要写成字典，有个key，才能搜索到
                html_str = response.content.decode()
                dict_json = json.loads(html_str)
                user_obj = dict_json.get('data')


                if user_obj is not None:
                    face_path = user_obj.get('image')
                    if os.path.exists(face_path):
                        face_path = (os.getcwd()+'/'+face_path).replace("\\",'/')
                        print(face_path)
                        face_i = cv2.imread(face_path)
                    else:
                        print('找不到图片路径：' + face_path)

                # print(face_i)
                # 员工队列
                queue_mask.add_head(face_i, name,
                                    datetime.datetime.now().strftime('%H:%M:%S'), image_feature)
                # 全部人员队列
                queue_nomask.add_head(face_i, name,
                                      datetime.datetime.now().strftime('%H:%M:%S'), image_feature)

            if is_need_checkin:
                ### 调用日志采集API                                                                                   ################
                url_path = url + '/checkin'
                response = requests.post(url_paht, data={'user_id': user_id})  # user_id 哪来

                # print(response)

    ### 调用视频流API                                                                                   ################
    # url = url_path + '/clockin/video_feed'
    # response = requests.post(url)
    # print(response)

    return people_len,tem_frame_2

# 处理图片线程
def image_get(q, queue_mask,queue_nomask):
    global is_default


    global is_running
    while is_running:
        # print('thread2 is_running', is_running)
        time_today_now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')

        _, _ = deal_image()

    print('thread2 done')

def main():

    mp.set_start_method(method='spawn')  # init
    queue = mp.Queue(maxsize=2)
    # my_thread = threading.Thread(target=deal_postimage, args=(queue_post.heads,), name='thread_1')
    # my_thread.start()
    image_put(queue, queue_mask, queue_nomask,is_default)

    threads = [threading.Thread(target=image_get, args=(queue,queue_mask,queue_nomask))]
    #
    # [t.start() for t in threads]


    # [t.join() for t in threads]


if __name__ == '__main__':
    main()
