import threading
from gevent.pywsgi import WSGIServer
from flask import Flask, Response, jsonify
from gevent import pywsgi
import copy
import csv
import multiprocessing
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
import random
# import Watch_V1108
# from Pad_2024 import main as pad_main
# from Pad_2024 import ser, port_close
import sys
import time
import traceback
from datetime import datetime
import subprocess
from scipy import io
import cv2
import matplotlib.pyplot as plt
import mediapipe as mp
import warnings
import numpy as np
from PySide6 import QtWidgets
from PySide6.QtCore import Qt, QTimer, QFile
from PySide6.QtGui import QPixmap, QImage
from PySide6.QtUiTools import QUiLoader
from PySide6.QtWidgets import QApplication
from pyqtgraph import PlotWidget
from pyqtgraph.Qt import QtCore

app = Flask(__name__)
warnings.filterwarnings("ignore", category=UserWarning, module='google.protobuf.symbol_database')  # 忽略和mediapipe版本有关的警告

folder_videosave = '/media/multimodal/HIKVISION/Data_save/Video'  # 将视频存储到HIKVISION硬盘的路径
# folder_videosave = r'C:\Users\a\Desktop\UI开发\data'  # windows test path

file_gui = 'f:\疲劳监测\AGX\GUI_AGX.ui'

cameraid = [0, 1, 2, 3, 4, 5, 6, 7, 8]

fps = 30
cap, out = None, None
imgp = None

# 函数cam_capture：相机视频获取及存储
def cam_capture(q_imgp, q_exit):  # imgp是image processing的缩写；imgs是image show的缩写

    global fps, cap, out, imgp
    stop_outer_loop = False
    ns = 0

    # 设置mediapipe参数
    mpDraw = mp.solutions.drawing_utils  # 调用人脸绘画模型工具
    mpFaceMesh = mp.solutions.face_mesh  # 设置模型的参数
    # faceMesh = mpFaceMesh.FaceMesh(max_num_faces=1)  # 将人脸检测数最大设置为1
    # faceMesh = mpFaceMesh.FaceMesh(max_num_faces=1, min_detection_confidence=0.1)  # 将人脸检测数最大设置为1
    # drawSpec = mpDraw.DrawingSpec((0, 244, 56), thickness=1, circle_radius=1)  # 这时设置人脸绘画的参数
    faceMesh = mpFaceMesh.FaceMesh(max_num_faces=1,
                                   refine_landmarks=True,
                                   min_detection_confidence=0.4,
                                   min_tracking_confidence=0.4)  # 将人脸检测数最大设置为1
    drawSpec = mpDraw.DrawingSpec((0, 244, 56), thickness=1, circle_radius=1)  # 这时设置人脸绘画的参数

    # 设置视频保存参数
    current_datetime = datetime.now()  # 获取时间
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')  # 视频编码方式有：XVID, DIVX, MJPG, mp4v
    fd0 = current_datetime.strftime("%Y_%m_%d_%H_%M_%S_%f")  # 设置时间格式, 精确到微秒，fd: formatted_datetime
    save_path = os.path.join(folder_videosave, f'Cam_{fd0}.mp4')  # 设置保存的视频名称
    fps_save = 30
    out = cv2.VideoWriter(save_path, fourcc, fps_save, (640, 480))  # 创建写入视频对象out，其中fps是保存视频的帧率
    save_interval = 2 * 60 * 60  # 视频保存时间间隔
    # save_interval = 60  # test interval
    startsave_time = time.time()
    start_time = time.time()

    while not stop_outer_loop:

        for cam_id in cameraid:

            ns += 1  # 更新相机搜索次数，ns: number of searches
            print(f'当前相机id：{cam_id}, 第{ns}次搜索')

            try:
                # 测试代码
                # path = r'H:\RL-rPPG\Camera\28-2.mp4'  # 现有视频用于测试
                # cap = cv2.VideoCapture(path)
                # 获取相机实时数据
                cap = cv2.VideoCapture(0)  # 实时读取相机数据则将path改为cam_id
                # width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
                # height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
                # fps = int(cap.get(cv2.CAP_PROP_FPS))

                # 确保cap打开了
                if not cap.isOpened():
                    print("camera is not opened, please open the camera!")
                    # cap.open()
                else:
                    print('camera is opened, read the video stream...')

                frame_num = 0
                count_false = 0
                wt = 0  # 初始化程序等待时间 wt: waiting time
                pf = 0  # 统计正对摄像头的帧数 pf: positive face
                # 使用一个While循环不间断地对usb摄像头进行读取，一直到遇到键盘终止事件时break掉
                while cap.isOpened():
                    ret, frame = cap.read()  # 使用cap.read()从摄像头读取一帧
                    # time.sleep(1/50)

                    current_datetime = datetime.now()  # 获取时间
                    fd = current_datetime.strftime("%Y_%m_%d_%H_%M_%S_%f")  # 设置时间格式, 精确到微秒，fd: formatted_datetime
                    # cam_fd = f'Cam0{cam_id}_' + fd
                    # cv2.putText(frame, cam_fd, (10, 20), cv2.FONT_HERSHEY_PLAIN,
                    #             1.2, (0, 0, 255), 2)

                    if frame is None:
                        continue
                    if not ret:
                        print(' cannot receive frames(stream end?). Exiting...')
                        break
                    # else:
                    #     print('获取视频流成功！')

                    out.write(frame)  # 保存视频帧
                    currentsave_time = time.time()
                    if currentsave_time - startsave_time > save_interval:
                        out.release()  # 关闭视频编写器
                        fd0 = current_datetime.strftime("%Y_%m_%d_%H_%M_%S_%f")  # 设置时间格式, 精确到微秒，fd: formatted_datetime
                        save_path = os.path.join(folder_videosave, f'Cam_{fd0}.mp4')  # 设置保存的视频名称
                        out = cv2.VideoWriter(save_path, fourcc, fps_save, (640, 480))
                        startsave_time = currentsave_time


                    img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

                    # 将视频帧放入队列用于后续处理和GUI显示
                    # imgp = copy.deepcopy(frame)
                    q_imgp.put(frame)
                    if q_imgp.qsize() > 1:
                        q_imgp.get()

                    frame_num = frame_num + 1  # 统计当前帧数

                    end_time = time.time()
                    elapsed_time = end_time - start_time
                    if elapsed_time >= 1.0:
                        rt_fps = frame_num / elapsed_time
                        # print(f"fps: {rt_fps}")

                    if ns < 6:
                        kpe = False
                        results = faceMesh.process(img)  # 将图像传至到面网模型中
                        if results.multi_face_landmarks:
                            # print('识别到人脸')

                            for faceLms in results.multi_face_landmarks:
                                # 因为版本问题，下面这行代码如果运行出错就将FACEMESH_CONTOURS更换为FACE_CONNECTIONS
                                # mpDraw.draw_landmarks(frame, faceLms, mpFaceMesh.FACEMESH_CONTOURS, drawSpec, drawSpec)

                                ids_to_check = [4, 8, 50, 108, 280, 337]
                                for id, lm in enumerate(faceLms.landmark):

                                    # 选取关键点
                                    if id in ids_to_check:
                                        kpe = True

                                if kpe:
                                    pf += 1  # 统计人脸连续出现在视频中的帧数
                                    if pf > 30:
                                        count_false = 0
                                        ns = 0
                                else:
                                    pf = 0
                                    count_false += 1  # 计算人脸未正对摄像头的帧数

                        else:
                            pf = 0
                            count_false += 1

                        if count_false > 20 * 1800 and cam_id == 0:
                            cap.release()
                            while cap.isOpened():
                                time.sleep(1)
                            time.sleep(3)
                            break
                        elif count_false > 10 * 1800 and cam_id != 0:
                            cap.release()
                            while cap.isOpened():
                                time.sleep(1)
                            time.sleep(3)
                            break

                    else:
                        # time.sleep(1)
                        wt += 1  # 统计等待帧数
                        print(f"长时间未识别到人脸，等待返回…… 当前等待帧数{wt}")
                        wait_frame = 10 * 1800

                        if wt > wait_frame:
                            print("结束等待，继续执行程序！")
                            ns = 0

                    # 将绘制了人脸网格的图像放入队列用于GUI显示
                    # q_imgs.put(frame)

                    if not q_exit.empty():
                        stop_outer_loop = True

                        cap.release()
                        out.release()
                        print("\n退出循环，相机已关闭！！！")
                        q_exit.get()
                        time.sleep(1)  # 等待以确保队列中的元素被取出
                        break

            except Exception as e:
                print(f'Error in cam_capture function: {e}')


def generate_frames(q_imgp):

    while True:
        # time.sleep(1/100)

        if not q_imgp.empty():
            imgp = q_imgp.get()
            # if q_imgp.qsize() > 50:
            #     q_imgp.get()

            if imgp is not None and imgp.size != 0:
                ret, buffer = cv2.imencode('.jpg', imgp)
                if ret:
                    frame_bytes = buffer.tobytes()
                    # yield (b'--frame\r\n'
                    #        b'Content-Type: image/jpeg\r\n\r\n' + frame_bytes + b'\r\n')
                    yield (b'--frame\r\n'
                           b'Content-Type: image/jpeg\r\n'
                           b'Content-Length: ' + str(len(frame_bytes)).encode() + b'\r\n'
                           b'\r\n' + frame_bytes + b'\r\n')
                else:
                    print('Frame encoding failed !')
            else:
                print('Received an empty frame !')


def start_process():

    process1.start()
    # process2.start()
    # process3.start()


def tp():
    global stop_event
    # 如果cam_capture进程打开，则传递信号以释放相机资源
    QApplication.quit()
    # 清理工作，下位机初始化+关闭串口
    if ser.isOpen():
        ser.write(bytes.fromhex('02'))
        port_close()
        print('坐垫退出')

    if process1.is_alive():
        q_exit.put(1)
        time.sleep(1)  # 等待信号成功放入队列

        while True:
            if not q_exit.empty():
                time.sleep(1)
            else:
                # v_timer1.stop()
                # v_timer2.stop()
                # v_timer3.stop()

                process1.terminate()
                # process2.terminate()
                # process3.terminate()
                # process4.terminate()
                # process5.terminate()
                # process6.terminate()
                # if process7 is not None and process7.is_alive():
                #     process7.terminate()
                # stop_event.set()

                break
    else:
        print(">>>进程未开始，直接退出！<<<")


@app.route('/video')
def video():
    return Response(generate_frames(q_imgp), mimetype='multipart/x-mixed-replace; boundary=frame')


pad_data = np.zeros((32, 32), dtype=float).tolist()  # 初始化坐垫数据
@app.route('/pad')
def send_array():
    global pad_data
    if not q_pad.empty():
        pad_data = q_pad.get()
        # print(pad_data)
        if q_pad.qsize() > 10:
            q_pad.get()
    return jsonify(pad_data)


watch_data = [84, 96, 142, 36.2]
@app.route('/watch')
def send_list():
    global watch_data
    if not q_watch.empty():
        watch_data = q_watch.get()
        if q_watch.qsize() > 10:
            q_watch.get()
        print(f'手表数据: ', watch_data)
    return jsonify(watch_data)

def run_flask():
    app.run(host='0.0.0.0', debug=False)


if __name__ == "__main__":

    q_exit = multiprocessing.Queue()
    q_imgp = multiprocessing.Queue()
    process1 = multiprocessing.Process(target=cam_capture, args=(q_imgp, q_exit,))

    # q_watch = multiprocessing.Queue()
    # process2 = multiprocessing.Process(target=Watch_V1108.run_watch, args=(q_watch,))

    # q_pad = multiprocessing.Queue()
    # process3 = multiprocessing.Process(target=pad_main, args=(q_pad,))

    # process3 = subprocess.Popen(['python', 'Pad_qt2024_展览.py'])

    flask_thread = threading.Thread(target=run_flask)
    flask_thread.daemon = True
    flask_thread.start()

    # http_server = WSGIServer(('0.0.0.0', 5000), app)
    # http_server.serve_forever()

    QApplication.setAttribute(Qt.AA_ShareOpenGLContexts)
    app = QApplication([])
    qfile_FM = QFile(file_gui)
    qfile_FM.open(QFile.ReadOnly)
    qfile_FM.close()
    QUiLoader().registerCustomWidget(PlotWidget)
    gui = QUiLoader().load(qfile_FM)

    gui.ButtonCapture.clicked.connect(start_process)
    gui.ButtonCapture_2.clicked.connect(tp)
    # app.aboutToQuit.connect(tp)

    gui.show()  # 将窗口控件显示在屏幕上
    sys.exit(app.exec())  # PySide6应该将exec_()改为exec()

