import base64
import copy
import csv
import json
import logging
import multiprocessing
import threading
from collections import deque
import requests
import websockets, asyncio
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import random
import pygame
import sys
import time
import traceback
from datetime import datetime
# import subprocess
# from scipy import io
import cv2
# import matplotlib.pyplot as plt
import mediapipe as mp
import numpy as np
# from PySide6 import QtWidgets
from PySide6.QtCore import Qt, QTimer, QFile
# from PySide6.QtGui import QPixmap, QImage
from PySide6.QtUiTools import QUiLoader
from PySide6.QtWidgets import QApplication
from pyqtgraph import PlotWidget
# from pyqtgraph.Qt import QtCore
from scipy import signal
from scipy.fftpack import fft
from scipy.signal import find_peaks
import warnings

import pymysql
import init_DataBase as initDB
import init_Files as initF

from FMM import main_multimodal

v_timer1 = None
v_timer2 = None
v_timer3 = None
current_time = datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
warnings.filterwarnings("ignore", category=UserWarning, module='google.protobuf.symbol_database')  # 忽略和mediapipe版本有关的警告

# 视频存储路径
# folder_videosave = '/home/multimodal/PycharmProjects/Data_save'  # 将视频存储到AGX本身

url = 'http://10.6.200.50:5000'  # AGX ip地址
# url = 'http://10.6.98.55:5000'  # AGX ip地址
######################################################yzx
FileFolder_Root = 'D:/Data_zgh'  # 要移动数据库文件时总存储目标文件夹
Flag_txt = 'D:/Data_zgh/path.txt'
text_all = '   '
TIME_File = 0
if os.path.isfile(Flag_txt) == False:
    os.makedirs(FileFolder_Root)
    with open(Flag_txt, 'w') as f:
        f.write('test')
conn = pymysql.connect(
            host='localhost',  # 主机号
            port=3306,
            user='root',
            password='1234',
            charset='utf8'
        )
current_time_H = datetime.now().strftime('%Y_%m_%d_%H')
current_time_f = datetime.now().strftime("%Y_%m_%d_%H_%M_%S_%f")  # 设置时间格式, 精确到微秒，fd: formatted_datetime
f_read = open(Flag_txt, 'r', encoding='utf8')
folder_save = f_read.readline()
folder_videosave = folder_save.split('&')[-1]
db_name = folder_save.split('&')[0]
# print(f'txt:{folder_videosave}')
# 数据存储路径
file_rPPG = os.path.join(folder_videosave, f'rPPG_data_{current_time}.csv').replace('\\', '/')  # 脉搏波存储路径
file_RESP = os.path.join(folder_videosave, f'RESP_data_{current_time}.csv').replace('\\', '/')  # 呼吸波存储路径
file_HR_HRV_RR = os.path.join(folder_videosave, f'HR_HRV_RR_data_{current_time}.csv').replace('\\',
                                                                                             '/')  # 心率、心率变异性以及呼吸率存储路径
file_watch = os.path.join(folder_videosave, f'watch_data_{current_time}.csv').replace('\\', '/')  # 手表数据存储路径
file_HP = os.path.join(folder_videosave, f'HP_data_{current_time}.csv').replace('\\', '/')  # AU及头部姿态存储路径
fatigue_log_file = os.path.join(r'D:\Fatigue_time', f'Fatigue_timelog_{current_time}.csv')
file_video = os.path.join(folder_videosave, f'Cam_{current_time}.mp4')  # 设置保存的视频名称
# watch数据存储路径
# file_watch = os.path.join('./RT_data/', f'watch_data_{current_time}.csv')  # linux path
count_watch_s = 0
######################################################################yzx

# 初始化：uddata函数中的更新次数count, 保存watch数据的cvs文件列标签fieldnames, 基本生理参数
count, cw, counter_fatigue, fatigue_status = 0, 0, 0, 0
fieldnames = ['Times', 'HR', 'BOS', 'HRV', 'TEMP']  # 列名
hr_video, hr_watch, bos_watch, hrv_watch, temp_watch = 84, 84, 96, 35, 36.25

# 设置所有相机id
cameraid = [0, 2, 4, 6, 8]  # 已连接摄像头编号，注意：Linux每个摄像头有两个编号，例如一号摄像头编号有[0, 1]，需要取其中第一个
# cameraid = [0, 1, 2, 3, 4, 5]  # windows

# 指定音乐文件路径
# music_file = "/home/multimodal/PycharmProjects/pythonProject/Canon.mp3"
music_file = r"D:\PycharmProjects\pythonProject\Canon.mp3"

# 加载GUI界面的路径
# file_gui = r'E:\Mental_Fatigue\codes\Multi_Modal BACKUP\Server_GUI.ui'
file_gui = r'f:\疲劳监测\Multi_Modal_Fatigue_Monitoring_System\Server_GUI.ui'

# 设置脉搏波滤波参数
lowcut = 1  # 低截止频率
highcut = 2.1  # 高截止频率
nyquist_frequency = 15  # 尼奎斯特频率，即采样频率的一半
low = lowcut / nyquist_frequency
high = highcut / nyquist_frequency
b, a = signal.butter(4, [low, high], btype='band', analog=False)[:2]  # 4阶Butterworth带通滤波器

# 设置呼吸信号滤波参数
lowcut2 = 0.11  # 低截止频率
highcut2 = 0.4  # 高截止频率
nyquist_frequency2 = 15  # 尼奎斯特频率，即采样频率的一半
low2 = lowcut2 / nyquist_frequency2
high2 = highcut2 / nyquist_frequency2
b2, a2 = signal.butter(4, [low2, high2], btype='band', analog=False)[:2]  # 4阶Butterworth带通滤波器

# 初始化心率参数
HR_rPPG, HRV_rPPG, HRV_rPPG_show, RR_rPPG_Show, RR_rPPG = 75, 140, 140, 12, 12
HR_rPPG_AVE, HRV_rPPG_AVE, RR_rPPG_AVE = [], [], []
rppg_len = 900  # rppg数据的显示长度
cap_ready = 0
fps = 30
cap = None
out = None


# 函数cam_capture：相机视频获取及存储
def cam_capture(q_imgp, q_imgweb, q_imghp, q_imgs, q_exit):  # imgp是image processing的缩写；imgs是image show的缩写

    global fps, cap, out
    stop_outer_loop = False
    ns = 0

    # 设置mediapipe参数
    mpDraw = mp.solutions.drawing_utils  # 调用人脸绘画模型工具
    mpFaceMesh = mp.solutions.face_mesh  # 设置模型的参数
    # faceMesh = mpFaceMesh.FaceMesh(max_num_faces=1)  # 将人脸检测数最大设置为1
    # faceMesh = mpFaceMesh.FaceMesh(max_num_faces=1, min_detection_confidence=0.1)  # 将人脸检测数最大设置为1
    # drawSpec = mpDraw.DrawingSpec((0, 244, 56), thickness=1, circle_radius=1)  # 这时设置人脸绘画的参数
    faceMesh = mpFaceMesh.FaceMesh(max_num_faces=1,
                                   refine_landmarks=True,
                                   min_detection_confidence=0.4,
                                   min_tracking_confidence=0.4)  # 将人脸检测数最大设置为1
    drawSpec = mpDraw.DrawingSpec((0, 244, 56), thickness=1, circle_radius=1)  # 这时设置人脸绘画的参数

    # 设置视频保存参数
    current_datetime = datetime.now()  # 获取时间
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')  # 视频编码方式有：XVID, DIVX, MJPG, mp4v
    fd0 = current_datetime.strftime("%Y_%m_%d_%H_%M_%S_%f")  # 设置时间格式, 精确到微秒，fd: formatted_datetime
    save_path = os.path.join(folder_videosave, f'Cam_{fd0}.mp4')  # 设置保存的视频名称
    fps_save = 30
    # out = cv2.VideoWriter(save_path, fourcc, fps_save, (640, 480))  # 创建写入视频对象out，其中fps是保存视频的帧率


    while not stop_outer_loop:

        for cam_id in cameraid:

            ns += 1  # 更新相机搜索次数，ns: number of searches
            print(f'当前相机id：{cam_id}, 第{ns}次搜索')

            try:

                # 获取相机数据
                # path = r'D:\Data_zgh\DATAzgh_20240610_13_x_2\DATAzgh-20240610_13_x\Cam_2024_06_10_13_42_00_077215.mp4'  # 现有视频用于测试
                cap = cv2.VideoCapture(url + '/video')  # 实时读取相机数据则将path改为cam_id
                # cap = cv2.VideoCapture(0)  # 实时读取相机数据则将path改为cam_id
                # width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
                # height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
                # fps = int(cap.get(cv2.CAP_PROP_FPS))

                # 确保cap打开了
                if not cap.isOpened():
                    print("camera is not opened, please open the camera!")
                    # cap.open()
                else:
                    print('camera is opened, read the video stream...')

                start_time = time.time()
                frame_num = 0
                count_false = 0
                wt = 0  # 初始化程序等待时间 wt: waiting time
                pf = 0  # 统计正对摄像头的帧数 pf: positive face
                # 使用一个While循环不间断地对usb摄像头进行读取，一直到遇到键盘终止事件时break掉
                while cap.isOpened():
                    ret, frame = cap.read()  # 使用cap.read()从摄像头读取一帧
                    # time.sleep(1/30)
                    end_time = time.time()
                    elapsed_time = end_time - start_time
                    if elapsed_time >= 1.0:
                        rt_fps = frame_num / elapsed_time
                        # print(f"fps: {rt_fps}")

                    current_datetime = datetime.now()  # 获取时间
                    fd = current_datetime.strftime("%Y_%m_%d_%H_%M_%S_%f")  # 设置时间格式, 精确到微秒，fd: formatted_datetime
                    cam_fd = f'Cam0{cam_id}_' + fd
                    cv2.putText(frame, cam_fd, (10, 20), cv2.FONT_HERSHEY_PLAIN,
                                1.2, (0, 0, 255), 2)

                    if frame is None:
                        continue
                    if not ret:
                        print(' cannot receive frames(stream end?). Exiting...')
                        cap.release()
                        break
                    # else:
                    #     print('获取视频流成功！')

                    # out.write(frame)  # 保存视频帧
                    img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

                    # 将视频帧放入队列用于后续处理和GUI显示
                    imgp = copy.deepcopy(frame)
                    q_imgp.put(imgp)
                    # print(q_imgp.qsize())

                    imghp = copy.deepcopy(frame)
                    q_imghp.put(imghp)
                    # print(q_imghp.qsize())

                    frame_num = frame_num + 1  # 统计当前帧数

                    if ns < 6:
                        # print(f"当前ns值：{ns}")
                        kpe = False
                        results = faceMesh.process(img)  # 将图像传至到面网模型中
                        if results.multi_face_landmarks:
                            # print('识别到人脸')

                            for faceLms in results.multi_face_landmarks:
                                # 因为版本问题，下面这行代码如果运行出错就将FACEMESH_CONTOURS更换为FACE_CONNECTIONS
                                mpDraw.draw_landmarks(frame, faceLms, mpFaceMesh.FACEMESH_CONTOURS, drawSpec, drawSpec)

                                ids_to_check = [4, 8, 50, 108, 280, 337]
                                for id, lm in enumerate(faceLms.landmark):

                                    # if id == 8:
                                    #     kpe0 = True
                                    # elif id == 65:
                                    #     kpe1 = True
                                    # elif id == 295:
                                    #     kpe2 = True

                                    # if id == 66:
                                    #     kpe1 = True
                                    # elif id == 296:
                                    #     kpe2 = True

                                    # 选取关键点
                                    if id in ids_to_check:
                                        kpe = True

                                if kpe:
                                    pf += 1  # 统计人脸连续出现在视频中的帧数
                                    if pf > 30:
                                        count_false = 0
                                        ns = 0
                                else:
                                    pf = 0
                                    count_false += 1  # 计算人脸未正对摄像头的帧数

                        else:
                            pf = 0
                            count_false += 1

                        if count_false > 6000 * 1800 and cam_id == 0:
                            cap.release()
                            while cap.isOpened():
                                time.sleep(1)
                            time.sleep(3)
                            break
                        elif count_false > 6000 * 1800 and cam_id != 0:
                            cap.release()
                            while cap.isOpened():
                                time.sleep(1)
                            time.sleep(3)
                            break

                    else:
                        # time.sleep(1)
                        wt += 1  # 统计等待帧数
                        print(f"长时间未识别到人脸，等待返回…… 当前等待帧数{wt}")
                        wait_frame = 6000 * 1800

                        if wt > wait_frame:
                            print("结束等待，继续执行程序！")
                            ns = 0

                    # 将绘制了人脸网格的图像放入队列用于GUI显示
                    # q_imgs.put(frame)

                    imgweb = copy.deepcopy(frame)
                    q_imgweb.put(imgweb)
                    if q_imgweb.qsize() > 2:
                        q_imgweb.get()

                    if not q_exit.empty():
                        stop_outer_loop = True

                        cap.release()
                        # out.release()
                        print("\n退出循环，相机已关闭！！！")
                        q_exit.get()
                        time.sleep(1)  # 等待以确保队列中的元素被取出
                        break

            except Exception as e:
                print(f'Error in cam_capture function: {e}')


# 函数video_processing：视频流处理；获取rPPG信号；计算生理参数
def video_processing(q_rPPG_RESP, q_rPPG_BVP, q_rPPG_HR, q_rPPG_RR, q_imgp, q_eye, q_mouth):
    global HR_rPPG, HRV_rPPG, HRV_rPPG_show, RR_rPPG, RR_rPPG_Show, HR_rPPG_AVE, HRV_rPPG_AVE, RR_rPPG_AVE
    pix1, pix5, pix13, AVE10, AVE11, AVE12, AVE70, AVE71, AVE72, AVE110, AVE111, AVE112, BVP, RESP = [], [], [], [0], [0], [0], [0], [0], [0], [0], [0], [0], [], []
    start_time = time.time()
    fieldnames_rppg = ['Times', 'R', 'G', 'B', 'R7', 'G7', 'B7', 'R11', 'G11', 'B11']
    fieldnames_resp = ['Times', 'R']
    fieldnames_HR_HRV_RR = ['Times', 'HR', 'HRV', 'RR']

    # 将rPPG数据实时保存
    def write_to_csv(datatocsv, csv_file_path):
        try:
            # 打开CSV文件以附加模式写入数据
            with open(csv_file_path, mode='a', newline='') as file:
                writer = csv.DictWriter(file, fieldnames=fieldnames_rppg)
                curr_time = datetime.now().strftime('%Y_%m_%d_%H_%M_%S_%f')
                writer.writerow(
                    {'Times': curr_time, 'R': datatocsv[0], 'G': datatocsv[1], 'B': datatocsv[2], 'R7': datatocsv[3],
                     'G7': datatocsv[4], 'B7': datatocsv[5], 'R11': datatocsv[6], 'G11': datatocsv[7],
                     'B11': datatocsv[8]})
        except Exception as e:
            print(f"An error occurred: {e}")

    # 创建rPPG csv文件每一列的header
    with open(file_rPPG, mode='a', newline='') as rppg_file:
        writer_header = csv.DictWriter(rppg_file, fieldnames=fieldnames_rppg)
        writer_header.writeheader()

    # 将呼吸(RESP)数据实时保存
    def write_to_csv_resp(respdata, resp_file_path):
        try:
            # 打开CSV文件以附加模式写入数据
            with open(resp_file_path, mode='a', newline='') as file:
                writer = csv.DictWriter(file, fieldnames=fieldnames_resp)
                curr_time = datetime.now().strftime('%Y_%m_%d_%H_%M_%S_%f')
                writer.writerow({'Times': curr_time, 'R': respdata})
        except Exception as e:
            print(f"An error occurred: {e}")

    # 创建RESP csv文件每一列的header
    with open(file_RESP, mode='a', newline='') as resp_file:
        writer_header_resp = csv.DictWriter(resp_file, fieldnames=fieldnames_resp)
        writer_header_resp.writeheader()

    # 将心率和心率变异性以及呼吸率值实时保存
    def write_to_csv_HR_HRV_RR(HR_HRV_RR_data, HR_HRV_RR_file_path):
        try:
            # 打开CSV文件以附加模式写入数据
            with open(HR_HRV_RR_file_path, mode='a', newline='') as file:
                writer = csv.DictWriter(file, fieldnames=fieldnames_HR_HRV_RR)
                curr_time = datetime.now().strftime('%Y_%m_%d_%H_%M_%S_%f')
                writer.writerow({'Times': curr_time, 'HR': HR_HRV_RR_data[0], 'HRV': HR_HRV_RR_data[1], 'RR': HR_HRV_RR_data[2]})
        except Exception as e:
            print(f"An error occurred: {e}")

    # 创建HR_HRV_RR csv文件每一列的header
    with open(file_HR_HRV_RR, mode='a', newline='') as HR_HRV_RR_file:
        writer_header_HR_HRV_RR = csv.DictWriter(HR_HRV_RR_file, fieldnames=fieldnames_HR_HRV_RR)
        writer_header_HR_HRV_RR.writeheader()

    # mpDraw = mp.solutions.drawing_utils  # 调用人脸绘画模型工具
    mpFaceMesh = mp.solutions.face_mesh  # 设置模型的参数
    faceMesh = mpFaceMesh.FaceMesh(max_num_faces=1,
                                   refine_landmarks=True,
                                   min_detection_confidence=0.4,
                                   min_tracking_confidence=0.4)  # 将人脸检测数最大设置为1
                                # drawSpec = mpDraw.DrawingSpec((0, 244, 56), thickness=1, circle_radius=1)  # 这时设置人脸绘画的参数

    frame_num, count_false, count_HR, count_RR = 0, 0, 0, 0

    while True:
        # time.sleep(1/60)
        try:
            if not q_imgp.empty():

                frame = q_imgp.get()
                if q_imgp.qsize() > 1:
                    q_imgp.get()
                # print(f'imgp: {q_imgp.qsize()}')

                frame_num = frame_num + 1
                end_time = time.time()
                elapsed_time = end_time - start_time
                if elapsed_time >= 1.0:
                    rt_fps = frame_num / elapsed_time

                if frame is None:
                    continue
                img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                results = faceMesh.process(img)  # 将图像传至到面网模型中

                if results.multi_face_landmarks:

                    for faceLms in results.multi_face_landmarks:
                        # 因为版本问题，下面这行代码如果运行出错就将FACEMESH_CONTOURS更换为FACE_CONNECTIONS
                        # mpDraw.draw_landmarks(frame, faceLms, mpFaceMesh.FACEMESH_CONTOURS, drawSpec, drawSpec)

                        for id, lm in enumerate(faceLms.landmark):
                            ih, iw, ic = img.shape
                            # 将归一化的数据变为图像大小
                            x1, y1, z1 = lm.x * iw, lm.y * ih, lm.z  # 提取编号为id关键点的坐标
                            x, y = int(x1), int(y1)  # 取为int类型

                            # ROI7 左脸颊ROI
                            if id == 36:
                                x36, y36 = x, y
                            elif id == 118:
                                x118, y118 = x, y
                            elif id == 187:
                                x187, y187 = x, y
                                ROI70 = img[y118: y187, x187: x36, 0]
                                if not np.isnan(ROI70).all():
                                    ROI71 = img[y118: y187, x187: x36, 1]
                                    ROI72 = img[y118: y187, x187: x36, 2]
                                    ave70, ave71, ave72 = [np.nanmean(roi7) for roi7 in [ROI70, ROI71, ROI72]]
                                else:
                                    # print("出现了NAN")
                                    # traceback.print_exc()
                                    ave70, ave71, ave72 = AVE70[-1], AVE71[-1], AVE72[-1]

                                AVE70.append(ave70)
                                AVE71.append(ave71)
                                AVE72.append(ave72)

                                ROI7_data = [ave70, ave71, ave72]

                            # ROI1 额头区域的ROI
                            if id == 67:
                                x67, y67 = x, y
                            elif id == 334:
                                x334, y334 = x, y
                                ROI10 = img[y67 - 30: y334 - 20, x67: x334, 0]
                                if not np.isnan(ROI10).all():
                                    ROI11 = img[y67 - 30: y334 - 20, x67: x334, 1]
                                    ROI12 = img[y67 - 30: y334 - 20, x67: x334, 2]
                                    ave10, ave11, ave12 = [np.nanmean(roi) for roi in [ROI10, ROI11, ROI12]]
                                else:
                                    # print("出现了NAN")
                                    # traceback.print_exc()
                                    ave10, ave11, ave12 = AVE10[-1], AVE11[-1], AVE12[-1]

                                AVE10.append(ave10)
                                AVE11.append(ave11)
                                AVE12.append(ave12)

                                pix1.append(ave11)  # 将此ROI通道1每一帧像素的平均值存入pix1中

                                ROI1_data = [ave10, ave11, ave12]  # 10, 11, 12对应于三个颜色通道

                            # ROI11 右脸颊ROI
                            if id == 349:
                                x349, y349 = x, y
                            elif id == 411:
                                x411, y411 = x, y
                                ROI110 = img[y349: y411, x349: x411, 0]
                                if not np.isnan(ROI110).all():
                                    ROI111 = img[y349: y411, x349: x411, 1]
                                    ROI112 = img[y349: y411, x349: x411, 2]
                                    ave110, ave111, ave112 = [np.nanmean(roi11) for roi11 in [ROI110, ROI111, ROI112]]
                                else:
                                    # print("出现了NAN")
                                    # traceback.print_exc()
                                    ave110, ave111, ave112 = AVE110[-1], AVE111[-1], AVE112[-1]

                                AVE110.append(ave110)
                                AVE111.append(ave111)
                                AVE112.append(ave112)

                                ROI11_data = [ave110, ave111, ave112]

                                ROI_data = ROI1_data + ROI7_data + ROI11_data
                                write_to_csv(ROI_data, file_rPPG)  # 将此ROI RGB三通道数据存入指定文件夹

                            # ROI5:眼睛区域的ROI
                            if id == 225:
                                x225, y225 = x, y
                            elif id == 233:
                                x233, y233 = x, y
                                ROI5 = img[y225: y233, x225: x233, :]
                                # 将眼睛区域的图像放入队列用于GUI显示
                                # q_eye.put(ROI5)


                                # if not np.isnan(ROI5).all():
                                #     ave5 = np.nanmean(ROI5)
                                # else:
                                #     # print("出现了NAN")
                                #     ave5 = pix5[-1]
                                # pix5.append(ave5)

                            # ROI6嘴部区域的ROI
                            if id == 216:
                                x216, y216 = x, y
                            elif id == 422:
                                x422, y422 = x, y
                                ROI6 = img[y216: y422, x216: x422, :]
                                # 将眼睛区域的图像放入队列用于GUI显示
                                # q_mouth.put(ROI6)


                            # ROI13 胸口区域的ROI
                            if id == 150:
                                x150, y150 = x, y
                                ROI13 = img[y150 + 20:y150 + 100, x150 - 60: x150 + 60, 0]
                                if not np.isnan(ROI13).all():
                                    ave13 = np.nanmean(ROI13)
                                else:
                                    # print("出现了NAN")
                                    ave13 = pix13[-1]
                                pix13.append(ave13)
                                write_to_csv_resp(ave13, file_RESP)

                        if len(AVE71) > rppg_len:
                            AVE70, AVE71, AVE72 = AVE70[-rppg_len:], AVE71[-rppg_len:], AVE72[-rppg_len:]
                        if len(AVE111) > rppg_len:
                            AVE110, AVE111, AVE112 = AVE110[-rppg_len:], AVE111[-rppg_len:], AVE112[-rppg_len:]
                        if len(pix1) > rppg_len:
                            pix1 = pix1[-rppg_len:]
                            AVE10, AVE11, AVE12 = AVE10[-rppg_len:], AVE11[-rppg_len:], AVE12[-rppg_len:]
                            A10, A11, A12 = np.array(AVE10), np.array(AVE11), np.array(AVE12)

                            # 将数据转换到CHROM颜色空间，得到原始rPPG信号BVP_O
                            Xs = A10 - A11
                            Ys = 1.5 * A10 + AVE11 - 1.5 * A12
                            alpha = np.std(Xs) / np.std(Ys)
                            BVP_O = Xs - alpha * Ys
                            BVP_O = BVP_O - np.mean(BVP_O)  # 去趋势非常重要

                            # 如果使用实时fps，则执行以下代码：
                            # lowcut = 0.8  # 低截止频率
                            # highcut = 2.5  # 高截止频率
                            # nyquist_frequency = fps /2  # 尼奎斯特频率，即采样频率的一半
                            # low = lowcut / nyquist_frequency
                            # high = highcut / nyquist_frequency
                            # b, a = signal.butter(4, [low, high], btype='band', analog=False)[:2]  # 4阶Butterworth带通滤波器

                            BVP = signal.lfilter(b, a, BVP_O)  # 对原始rPPG信号滤波

                            # FFT计算心率
                            fft_result = fft(BVP)
                            N = len(fft_result)
                            frequencies = np.fft.fftfreq(N, 1 / fps)  # 计算频率轴
                            main_frequency_index = np.argmax(np.abs(fft_result[:N // 2]))  # 找到主频率的索引，只考虑正频率部分
                            main_frequency = frequencies[main_frequency_index]
                            HR_rPPG = main_frequency * 60
                            HR_rPPG_AVE.append(HR_rPPG)

                            # 通过峰值间隔计算心率变异性HRV
                            peaks, _ = find_peaks(BVP, distance=15)  # 检测峰值，最小间隔16点
                            rr_intervals_samples = np.diff(peaks)  # 计算相邻峰值之间间隔的数据点数
                            rr_intervals_ms = rr_intervals_samples * (1000 / fps)  # 将心率间隔转换为毫秒ms
                            hrv_intervals_ms = np.diff(rr_intervals_ms)
                            HRV_rPPG = np.std(abs(hrv_intervals_ms))
                            # HRV_rPPG_AVE.append(HRV_rPPG)

                            # if len(HRV_rPPG_AVE) > 300:
                            #     hrv_ave = np.mean(HRV_rPPG_AVE)  # 计算HRV均值

                            if len(HR_rPPG_AVE) > 180:
                                HRV_rPPG_show = HRV_rPPG
                                # HR_rPPG_AVE = HR_rPPG_AVE[-150:]
                                HR_rPPG = np.mean(HR_rPPG_AVE)  # 计算累积的心率数据的均值
                                # 将计算的心率HR_rPPG和原始滤波后的BVP信号放入队列
                                q_rPPG_HR.put(HR_rPPG)

                                HR_rPPG_AVE = []

                            # BVP = (BVP - np.min(BVP)) / (np.max(BVP) - np.min(BVP))
                            if np.max(BVP) != 0 and np.max(BVP) is not None:
                                BVP = BVP / np.max(BVP)

                            BVP = BVP.tolist()
                            q_rPPG_BVP.put(BVP)


                        else:
                            BVP = pix1
                            # BVP = (BVP - np.min(BVP)) / (np.max(BVP) - np.min(BVP)).tolist()
                            if np.max(BVP) != 0 and np.max(BVP) is not None:
                                BVP = (BVP / np.max(BVP)).tolist()
                            q_rPPG_BVP.put(BVP)


                        if len(pix13) > rppg_len:
                            pix13 = pix13[-rppg_len:]
                            resp = np.array(pix13) - np.mean(pix13)
                            RESP = signal.lfilter(b2, a2, resp)
                            # FFT计算呼吸率
                            fft_result = fft(RESP)
                            N = len(fft_result)
                            frequencies = np.fft.fftfreq(N, 1 / fps)  # 计算频率轴
                            main_frequency_index = np.argmax(np.abs(fft_result[:N // 2]))  # 找到主频率的索引，只考虑正频率部分
                            main_frequency = frequencies[main_frequency_index]
                            RR_rPPG = main_frequency * 60
                            RR_rPPG_AVE.append(RR_rPPG)

                            if len(RR_rPPG_AVE) > 90:
                                RR_rPPG_Show = np.mean(RR_rPPG_AVE)  # 计算累积的呼吸数据的均值
                                # RR_rPPG_Show_cum.append(RR_rPPG_Show)
                                RR_rPPG_AVE = []

                            # 将心率HR、心率变异性HRV、呼吸率RR存入csv文件
                            HR_HRV_RR_data = [HR_rPPG, HRV_rPPG, RR_rPPG]
                            q_HR_HRV_RR_data = [HR_rPPG, int(HRV_rPPG_show), RR_rPPG_Show]
                            write_to_csv_HR_HRV_RR(HR_HRV_RR_data, file_HR_HRV_RR)

                            count_HR += 1
                            if count_HR >= 30:
                                q_rPPG_RR.put(q_HR_HRV_RR_data)
                                count_HR = 0

                            if np.max(RESP) != 0 and np.max(RESP) is not None:
                                RESP = RESP / np.max(RESP)

                            RESP = RESP.tolist()
                            q_rPPG_RESP.put(RESP)

                        else:
                            RESP = pix13
                            if np.max(RESP) != 0 and np.max(RESP) is not None:
                                RESP = (RESP / np.max(RESP)).tolist()

                            q_rPPG_RESP.put(RESP)


                        # if len(pix5) > rppg_len:
                        #     pix5 = pix5[-rppg_len:]

                else:
                    ROI10, ROI11, ROI12 = [img[:, :, i] for i in range(3)]
                    ave10, ave11, ave12 = [np.mean(roi) for roi in [ROI10, ROI11, ROI12]]

                    BVP.append(ave11)  # 将此ROI通道1每一帧像素的平均值存入pix1中
                    if len(BVP) > rppg_len:
                        BVP = BVP[-rppg_len:]

                    pix13.append(ave10)
                    if len(pix13) > rppg_len:
                        pix13 = pix13[-rppg_len:]

                    datatocsv = [ave10, ave11, ave12, ave10, ave11, ave12, ave10, ave11, ave12]
                    write_to_csv(datatocsv, file_rPPG)  # 将此ROI RGB三通道数据存入指定文件夹
                    write_to_csv_resp(ave10, file_RESP)

                    q_rPPG_BVP.put(BVP)


                    q_rPPG_RESP.put(pix13)

                    count_false = count_false + 1  # 计算未检测到人脸的帧数

        except Exception as e:
            print(f'Error in video_processing function: {e}')


# 函数video_processing：视频流处理；获取rPPG信号；计算生理参数
def video_processing_HP(q_imghp, q_status, q_HPdata, q_face_status):
    from EyeDetectionModule import EyeDetection as EyeDet
    from MouthDetectionModule import MouthDetection as MouthDet
    from PoseAnalysisModule import PoseAnalysis as PoseAnal
    from FocusEvaluatorModule import FocusEvaluator as FocusEval
    from scipy.spatial import distance as dist
    left_eye_radius = 0
    right_eye_radius = 0
    HP_data = [0.2, 0.5, 0.2, 0.1, 0.7, 0.14, 0.12, 0.3, 0.25, 0.9, 0.11, 0.35, 0.4, 0.15]
    fieldnames_HP = ['Times', 'Rota_x', 'Rota_y', 'Rota_z', 'Left_eye_radius', 'Right_eye_radius', 'Ear', 'Mar',
                     'Perclos', 'Perblink', 'Peryawn', 'AU44', 'AU12', 'AU45', 'Fatigue_Score_10min']

    def extract_facial_landmarks(face_landmarks):
        """
        Extracts the largest facial landmarks from the provided list of landmarks.

        Args:
            face_landmarks (list): A list of facial landmark objects.

        Returns:
            numpy.ndarray: An array containing the largest facial landmarks.
        """

        largest_area = 0
        largest_landmarks = 0
        for face_lm_set in face_landmarks:
            landmarks_array = [np.array([point.x, point.y, point.z]) for point in face_lm_set.landmark]

            landmarks_array = np.array(landmarks_array)

            # Ensure landmarks are within the range [0, 1]
            landmarks_array[landmarks_array[:, 0] < 0., 0] = 0.
            landmarks_array[landmarks_array[:, 0] > 1., 0] = 1.
            landmarks_array[landmarks_array[:, 1] < 0., 1] = 0.
            landmarks_array[landmarks_array[:, 1] > 1., 1] = 1.

            # Calculate the dimensions of the bounding box
            dx = landmarks_array[:, 0].max() - landmarks_array[:, 0].min()
            dy = landmarks_array[:, 1].max() - landmarks_array[:, 1].min()
            area = dx * dy

            # Update the largest area and corresponding landmarks
            if area > largest_area:
                largest_landmarks = landmarks_array
                largest_area = area

        return largest_landmarks

    # 保存检测结果的List
    # 眼睛和嘴巴都是，张开为‘1’，闭合为‘0’
    video_fps = fps  # 视频fps=20
    list_B = np.ones(video_fps * 3)  # 眼睛状态List,建议根据fps修改，视频fps=20
    list_Y = np.zeros(video_fps * 10)  # 嘴巴状态list，10s

    list_Y1 = np.ones(int(video_fps * 1.5))  # 如果在list_Y中存在list_Y1，则判定一次打哈欠(大约1.5s)，
    list_Y1[int(video_fps * 1.5) - 1] = 0  # 从持续张嘴到闭嘴判定为一次打哈欠

    list_blink = np.zeros(10)  # 大约是记录10S内信息，眨眼为‘1’，不眨眼为‘0’
    list_yawn = np.zeros(30)  # 大约是半分钟内打哈欠记录，打哈欠为‘1’，不打哈欠为‘0’
    yawn_count = 0

    """
        Instantiation of the Mediapipe face mesh model. This model returns 478 landmarks
        if the refine_landmarks parameter is set to True. It provides 468 landmarks for the face
        and the remaining 10 landmarks for the irises.
        """
    detector = mp.solutions.face_mesh.FaceMesh(static_image_mode=False,
                                               min_detection_confidence=0.3,
                                               min_tracking_confidence=0.3,
                                               refine_landmarks=True)

    # Instantiation of the eye detection and pose analysis objects
    EyeDetector = EyeDet(display_processing=True)
    MouthDetector = MouthDet(display_processing=False)
    HeadPoseAnalysis = PoseAnal(show_axis=True)

    # Instantiation of the attention evaluation object, along with various thresholds
    start_time = time.perf_counter()
    AttentionEvaluator = FocusEval(t_now=start_time, ear_threshold=0.15, mar_threshold=0.080,
                                   gaze_time_threshold=3,
                                   roll_threshold=20,
                                   pitch_threshold=20,
                                   yaw_threshold=20,
                                   ear_time_threshold=2,
                                   mar_time_threshold=3,
                                   gaze_threshold=0.015,
                                   pose_time_threshold=3)

    with open(file_HP, mode='a', newline='') as HP_file:
        writer_header_resp = csv.DictWriter(HP_file, fieldnames=fieldnames_HP)
        writer_header_resp.writeheader()

    # 将头部姿态(HP)数据实时保存
    def write_to_csv_HP(HPdata, resp_file_path):
        try:
            # 打开CSV文件以附加模式写入数据
            with open(resp_file_path, mode='a', newline='') as file:
                writer = csv.DictWriter(file, fieldnames=fieldnames_HP)
                curr_time = datetime.now().strftime('%Y_%m_%d_%H_%M_%S_%f')
                writer.writerow({'Times': curr_time, 'Rota_x': HPdata[0], 'Rota_y': HPdata[1], 'Rota_z': HPdata[2],
                                 'Left_eye_radius': HPdata[3], 'Right_eye_radius': HPdata[4],
                                 'Ear': HPdata[5], 'Mar': HPdata[6], 'Perclos': HPdata[7], 'Perblink': HPdata[8],
                                 'Peryawn': HPdata[9],
                                 'AU44': HPdata[10], 'AU12': HPdata[11], 'AU45': HPdata[12],
                                 'Fatigue_Score_10min': HPdata[13]
                                 })
        except Exception as e:
            print(f"An error occurred: {e}")

    counter, counter_q = 0, 0
    Fatigue_Score_10min = 0
    mpFaceMesh = mp.solutions.face_mesh
    faceMesh_Iris = mpFaceMesh.FaceMesh(max_num_faces=1,
                                        refine_landmarks=True,
                                        min_detection_confidence=0.4,
                                        min_tracking_confidence=0.4)
    LEFT_IRIS = [474, 475, 476, 477]
    RIGHT_IRIS = [469, 470, 471, 472]
    EYEMou_list = [14, 78, 145, 154, 163, 308, 374, 381, 390]
    while True:
        # time.sleep(1/60)
        try:

            if not q_imghp.empty():
                frame = q_imghp.get()
                if q_imghp.qsize() > 1:
                    q_imghp.get()


                if frame is None:
                    continue
                current_time_TMP = time.perf_counter()
                # fps = counter / (current_time_TMP - start_time)
                # if fps == 0:
                #     fps = 10

                tick_start = cv2.getTickCount()
                # Convert the RGB frame to grayscale
                grayscale_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

                # Get the frame size
                frame_size = frame.shape[1], frame.shape[0]

                # Apply a bilateral filter to reduce noise while preserving details
                # Create a 3D matrix from the grayscale image to provide it to the model
                grayscale_frame = np.expand_dims(cv2.bilateralFilter(grayscale_frame, 5, 10, 10), axis=2)
                grayscale_frame = np.concatenate([grayscale_frame, grayscale_frame, grayscale_frame], axis=2)

                # Find faces using the face mesh model
                landmarks = detector.process(grayscale_frame).multi_face_landmarks
                img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                results_Iris = faceMesh_Iris.process((img))

                if landmarks:  # Process the frame only if at least one face is found
                    flag_B = True  # 是否闭眼的flag
                    flag_Y = False  # 张嘴flag
                    # Get facial landmarks and extract the bounding box of the largest face
                    if results_Iris.multi_face_landmarks:
                        mesh_points = np.array(
                            [np.multiply([p.x, p.y], [frame.shape[1], frame.shape[0]]).astype(int) for p in
                             results_Iris.multi_face_landmarks[0].landmark])
                        left_eye_center, left_eye_radius = cv2.minEnclosingCircle(mesh_points[LEFT_IRIS])
                        right_eye_center, right_eye_radius = cv2.minEnclosingCircle(mesh_points[RIGHT_IRIS])
                    for faceLms in landmarks:
                        mp_data = []
                        for id, lm in enumerate(faceLms.landmark):
                            ih, iw, ic = img.shape
                            x, y = int(lm.x * iw), int(lm.y * ih)
                            if id in EYEMou_list:  # id_list = [105, 46, 55, 334, 285, 276]:
                                # id_list = [46, 55, 105, 276, 285, 334]
                                mp_data.append([x, y])
                    left_EYE_center = [(mp_data[7][0] + mp_data[8][0]) / 2, (mp_data[7][1] + mp_data[8][1]) / 2]
                    right_EYE_center = [(mp_data[3][0] + mp_data[4][0]) / 2, (mp_data[3][1] + mp_data[4][1]) / 2]
                    lEYE_width = dist.euclidean(mp_data[7], mp_data[8])
                    lEYE_height = dist.euclidean(mp_data[6], left_EYE_center)
                    lEYE = lEYE_height / lEYE_width
                    rEYE_height = dist.euclidean(mp_data[2], right_EYE_center)
                    rEYE_width = dist.euclidean(mp_data[3], mp_data[4])
                    rEYE = rEYE_height / rEYE_width
                    EYE = (lEYE + rEYE) / 2
                    EYE = 1 - max(0, min(EYE * 10, 1))

                    Mouth_center = [(mp_data[1][0] + mp_data[5][0]) / 2, (mp_data[1][1] + mp_data[5][1]) / 2]
                    Mouth_width = dist.euclidean(mp_data[1], mp_data[5])
                    Mouth_height = dist.euclidean(mp_data[0], Mouth_center)
                    Mouth = Mouth_height / Mouth_width
                    Mouth = max(0, min(Mouth * 10, 1))

                    # print(lEYE, rEYE, EYE, Mouth)
                    largest_landmarks = extract_facial_landmarks(landmarks)

                    # Show the eye keypoints (optional)
                    EyeDetector.show_eye_keypoints(color_frame=frame, landmarks=largest_landmarks,
                                                   frame_size=frame_size)

                    # Compute the average Eye Aspect Ratio (EAR) score for both eyes
                    ear_score = EyeDetector.get_EAR(landmarks=largest_landmarks)

                    # shows the mouth keypoints (can be commented)
                    MouthDetector.show_mouth_keypoints(color_frame=frame, landmarks=largest_landmarks,
                                                       frame_size=frame_size)
                    # compute the MAR score of the eyes
                    mar_score = MouthDetector.get_MAR(landmarks=largest_landmarks)

                    # Compute the PERCLOS score and tiredness state
                    tiered_by_blinking, perclos_score = AttentionEvaluator.get_PERCLOS_EAR(current_time_TMP, fps,
                                                                                           ear_score)
                    tiered_by_yawning, perm = AttentionEvaluator.get_PERCLOS_MAR(current_time_TMP, fps,
                                                                                 mar_score=mar_score)
                    # print(perclos_score, perm)
                    # Compute the Gaze Score
                    gaze_score = EyeDetector.calculate_gaze_score(face_landmarks=largest_landmarks)

                    # Compute the head pose
                    frame, roll_angle, pitch_angle, yaw_angle = HeadPoseAnalysis.estimate_pose(
                        image=frame, detected_landmarks=largest_landmarks, image_size=frame_size)

                    # Evaluate scores for EAR, Gaze, and Head Pose
                    asleep_state, looking_away_state, distracted_state = AttentionEvaluator.eval_attention_state(
                        t_now=current_time_TMP,
                        ear_score=ear_score,
                        gaze_score=gaze_score,
                        head_roll=roll_angle,
                        head_pitch=pitch_angle,
                        head_yaw=yaw_angle)

                    # print(Fatigue_Score_10min)
                    # Stop the tick counter for computing the processing time for each frame
                    tick_end = cv2.getTickCount()
                    # Processing time in milliseconds
                    frame_processing_time_ms = ((tick_end - tick_start) / cv2.getTickFrequency()) * 1000

                    # # Show the frame on the screen
                    # cv2.imshow("Press 'q' to terminate", frame)
                    # # If the 'q' key is pressed on the keyboard, terminate the program
                    # if cv2.waitKey(20) & 0xFF == ord('q'):
                    #     break

                    if mar_score > 0.070:
                        flag_Y = True
                        # print(mar_score)

                    # else:
                    # print("mouth close")
                    if ear_score < 0.2:
                        flag_B = False
                    if flag_B:
                        # print(' 1:eye-open')
                        list_B = np.append(list_B, 1)  # 睁眼为‘1’
                    else:
                        # print(' 0:eye-closed')
                        list_B = np.append(list_B, 0)  # 闭眼为‘0’
                    list_B = np.delete(list_B, 0)
                    if flag_Y:
                        list_Y = np.append(list_Y, 1)  # 张嘴为‘1’
                    else:
                        list_Y = np.append(list_Y, 0)
                    list_Y = np.delete(list_Y, 0)

                    if list_B[video_fps * 3 - 2] == 1 and list_B[video_fps * 3 - 1] == 0:
                        # if list_B[video_fps * 3 - 2] == 0:
                        # 如果上一帧为’1‘，此帧为’0‘则判定为眨眼
                        # print('----------------眨眼----------------------')
                        BLINK = 1
                        list_blink = np.append(list_blink, 1)
                        list_blink = np.delete(list_blink, 0)
                    else:
                        BLINK = 0

                    # print(list_blink)
                    # 检测打哈欠
                    # if Yawn(list_Y,list_Y1):
                    if (list_Y[len(list_Y) - len(list_Y1):] == list_Y1).all():
                        print('----------------------打哈欠----------------------')
                        yawn_count += 1
                        list_Y = np.zeros(50)  # 此处是检测到一次打哈欠之后将嘴部状态list全部置‘0’，考虑到打哈欠所用时间较长，所以基本不会出现漏检
                        list_yawn = np.append(list_yawn, 1)
                        list_yawn = np.delete(list_yawn, 0)  # 删除掉数组里面第一个数据

                    # 实时计算PERCLOS perblink,peryawn
                    # 即计算平均闭眼时长百分比，平均眨眼百分比，平均打哈欠百分比
                    perclos = 1 - np.average(list_B)
                    perblink = max(0, min(np.average(list_blink), 1))
                    peryawn = max(0, min(np.average(list_yawn), 1))
                    mar_score = max(0, min(mar_score * 10, 1))
                    # print(perblink, peryawn)
                    Fatigue_Score = asleep_state * 0.6 + distracted_state * 0.1 + looking_away_state * 0.05 + tiered_by_blinking * 0.15 + tiered_by_yawning * 0.1 + perclos * 0.5

                    counter_q += 1
                    if counter_q > 30*30:
                        q_face_status.put(Fatigue_Score)
                        if q_face_status.qsize() > 1:
                            q_face_status.get()
                        counter_q = 0

                    counter += 1
                    if counter == fps * 600:

                        # q_status.put(Fatigue_Score)
                        # if q_status.qsize() > 1:
                        #     q_status.get()
                        counter = 0

                    Fatigue_Score_10min = max(0, min(Fatigue_Score, 1))

                    if counter % fps == 0:
                        list_yawn = np.append(list_yawn, 0)
                        list_yawn = np.delete(list_yawn, 0)  # 删除掉数组里面第一个数据
                        list_blink = np.append(list_blink, 0)
                        list_blink = np.delete(list_blink, 0)
                    # print(pitch_angle, ear_score)
                    HP_data = [round(pitch_angle[0], 3), round(yaw_angle[0], 3), round(roll_angle[0], 3),
                               round(left_eye_radius, 3), round(right_eye_radius, 3),
                               round(ear_score, 3), round(mar_score, 3),
                               round(perclos, 3), round(perblink, 3), round(peryawn, 3),
                               round(EYE, 3), round(Mouth, 3), round(BLINK, 3),
                               round(Fatigue_Score_10min, 3)]
                    # print(HP_data)
                    write_to_csv_HP(HP_data, file_HP)  # 将此ROI RGB三通道数据存入指定文件夹
                    q_HPdata.put(HP_data)
                    # print(fps)
                else:
                    # HP_data = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
                    # HP_data = [0.2, 0.5, 0.2, 0.1, 0.7, 0.14, 0.12, 0.3, 0.25, 0.9, 0.11, 0.35, 0.4, 0.15]
                    # HP_data
                    write_to_csv_HP(HP_data, file_HP)  # 将此ROI RGB三通道数据存入指定文件夹

                    q_HPdata.put(HP_data)

                    # print('nothing detected')
                    ###############################################################################################################眼动方向结束


        except Exception as e:
            print(f'Error in video_processing_HP function: {e}')


# 函数pad：获取坐垫模块数据并显示于GUI
def pad(q_pad):

    # plt.ion()  # 开启交互模式
    # fig, ax = plt.subplots()
    # heatmap = ax.imshow(np.zeros((32, 32)), cmap='hot', interpolation='nearest')
    # plt.colorbar(heatmap, ax=ax)
    #
    while True:
        response = requests.get(url + '/pad')
        if response.status_code == 200:
            pad_data, dl, posture, axis, wavelet_features= response.json()

            print(f'坐垫数据测试：{wavelet_features}')
            print(response.json())

            for key, values in pad_data.items():
                q_pad.put((values, dl['DL'], posture['Posture'], axis, wavelet_features))

            # for key, values in pad_data.items():
            #     print("Received array:", [len(values), len(values[0])])
    #
    #         # 更新热图数据
    #         heatmap.set_data(array)
    #         fig.canvas.draw()  # 刷新图像
    #         fig.canvas.flush_events()
    #
        time.sleep(1/5)


def watch(q_watch):
    # print('watch启动')

    while True:
        try:
            response = requests.get(url + '/watch')
            if response.status_code == 200:
                watch_data = response.json()
                q_watch.put(watch_data)

                hr_watch = watch_data[0]
                bos_watch = watch_data[1]
                hrv_watch = watch_data[2]
                temp_watch = watch_data[3]

                with open(file_watch, mode='a', newline='') as csv_file:
                    writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
                    current_t = datetime.now().strftime('%Y_%m_%d_%H_%M_%S_%f')
                    writer.writerow({'Times': current_t, 'HR': hr_watch, 'BOS': bos_watch, 'HRV': hrv_watch, 'TEMP': temp_watch})
                # print(f'>>>>>>>>>>>>>>>>>>>watch data: {watch_data}')
        except Exception as e:
            print(f'watch error occurred: {e}')

        time.sleep(1)


def start_gui():
    # global process5

    # 启动进程
    process1.start()
    process2.start()
    process3.start()
    process4.start()
    # process5 = subprocess.Popen(['python', 'Pad_qt2024.py'])
    process5.start()
    process6.start()
    process8.start()
    process9.start()
    monitor_thread.start()



    # # 设置GUI初始参数
    # font = gui.label_3.font()
    # font.setBold(True)
    # gui.label_3.setFont(font)
    # gui.label_3.setStyleSheet("font-size: 17px;")
    #
    # font = gui.label_4.font()
    # font.setBold(True)
    # gui.label_4.setFont(font)
    # gui.label_4.setStyleSheet("font-size: 17px;")
    #
    # font = gui.label_5.font()
    # font.setBold(True)
    # gui.label_5.setFont(font)
    # gui.label_5.setStyleSheet("font-size: 17px;")
    #
    # font = gui.label_6.font()
    # font.setBold(True)
    # gui.label_6.setFont(font)
    # gui.label_6.setStyleSheet("font-size: 17px;")
    #
    # font = gui.label_7.font()
    # font.setBold(True)
    # gui.label_7.setFont(font)
    # gui.label_7.setStyleSheet("font-size: 17px;")
    #
    # font = gui.label_8.font()
    # font.setBold(True)
    # gui.label_8.setFont(font)
    # gui.label_8.setStyleSheet("font-size: 17px;")
    #
    # font = gui.label_9.font()
    # font.setBold(True)
    # gui.label_9.setFont(font)
    # gui.label_9.setStyleSheet("font-size: 17px;")
    #
    # font = gui.label_10.font()
    # font.setBold(True)
    # gui.label_10.setFont(font)
    # gui.label_10.setStyleSheet("font-size: 17px;")

    # 使用QTimer更新GUI数据
    # global v_timer1, v_timer2  # 声明v_timer为全局变量.connect(uddata)才不会出错
    # v_timer1 = QTimer()
    # v_timer2 = QTimer()
    # v_timer1.start(int(1000 / 35))  # 设定定时器，定时器以视频帧率来定时执行后续操作，单位毫秒
    v_timer2.start(int(5000))  # 指定*ms更新一次
    # v_timer3.start(int(1000))  # 设定手表采样率为每*ms采集一次
    # v_timer1.timeout.connect(uddata)
    v_timer2.timeout.connect(updateHR)
    # v_timer3.timeout.connect(updateWatch)


def calculate_mean(multi_data):
    means = {}
    for i, key in enumerate(multidata_queues):
        if i < len(multidata_queues)-1:
            multidata_queues[key].append(multi_data[i])
            # print(f'测试是111111：{multi_data[i]}')
            if len(multidata_queues[key]) == multidata_queues[key].maxlen:
                means[key] = sum(multidata_queues[key]) / len(multidata_queues[key])
                multidata_queues[key].clear()
            else:
                means[key] = None
    return means

face_status = 0
def fatigue_detection(q_multimodal, q_prediction_result, q_status, q_face_status):
    global face_status
    while True:
        try:
            if not q_multimodal.empty():
                data_to_multimodal = q_multimodal.get()
                prediction_results = main_multimodal(data_to_multimodal)

                if not q_face_status.empty():
                    face_status = q_face_status.get()
                    if face_status > 0.9:
                        prediction_result = prediction_results + 0.05
                    elif face_status > 0.4:
                        prediction_result = prediction_results + (face_status - 0.4)*0.1
                    else:
                        prediction_result = prediction_results
                    if prediction_result > 0.95:
                        prediction_result = 0.95
                else:
                    prediction_result = prediction_results

                prediction_result = round(prediction_result, 3)
                prediction_result = np.array(prediction_result, dtype=np.float64)

                q_prediction_result.put(prediction_result)
                q_status.put(prediction_result)
        except Exception as e:
            print('模型error!!! : {e}')
        time.sleep(10)


feature_names = ['Rota_x', 'Rota_y', 'Rota_z', 'Left_eye_radius', 'Right_eye_radius', 'Ear', 'Mar', 'AU44', 'AU12', 'AU45', 'Perclos',
                 'Perblink', 'RESP', 'BOS', 'HRV', 'TEMP', 'HR', 'Pad']
multidata_queues = {f'{i}': deque(maxlen=30) for i in feature_names}

max_detection_time = 300  # detection interval
means_dict = {f'{i}': deque(maxlen=max_detection_time) for i in feature_names}
count_time0, max_len = 0, 600
# HP_data = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
HP_data = [0.2, 0.5, 0.2, 0.1, 0.7, 0.14, 0.12, 0.3, 0.25, 0.9, 0.11, 0.35, 0.4, 0.15]
RESP, BVP, mean_HR_Video, mean_RR, mean_HRV_Video, MAX_HR, MAX_HRV, MAX_RR, MIN_HR, MIN_HRV, MIN_RR = [0], [0], 84, 12, 140, 84, 12, 140, 84, 12, 140
HR_Video_save, RR_save, HRV_Video_save, tenmin_HR_SAVE, tenmin_HRV_SAVE, tenmin_RR_SAVE= [], [], [], [], [], []
HR_Video, HRV_Video, HR_Watch, RR, TEMP, SPO2, HR_Watch_Mem = 84, 140, 84, 12, 36.2, 96, 84

MCD = []
for _ in range(9):
    mcd = random.randint(0, 1)
    MCD.append(mcd)

wavelet_features = [random.randint(1,10) for _ in range(8)]
LCG = [random.randint(6, 15), random.randint(7, 10)]
RCG = [random.randint(6, 15), random.randint(7, 10)]
WE = [random.randint(60, 100)]
zuozi = [0]
DL = [round(random.uniform(0, 1), 1)]
Pad_data = [[random.uniform(0, 1) for _ in range(32)] for _ in range(32)]
count_sockettime, count_multi, count_FI = 0, 0, 0
runNum = 0
SELECT_dtobj = ''
currentFile_TIMESTR = ''
fileTIME = ''
DB_table_dict = {}
DB_table = []
DATA_res = ''
# 基础时间
start_time = datetime(2024, 5, 29, 14, 0, 0)
data_sql = ''
send_from_db = False
img_base64 = ""
ROW_COUNT = 0
row_count = 0
datawebhis_TMP = {}
FI = 0.6
MCD = []
count_FI_0 = 0
for _ in range(9):
    mcd = random.randint(0, 1)
    MCD.append(mcd)
# FI = deque([0]*10, maxlen=30)

def run_server(q_imgweb, q_rPPG_RESP, q_rPPG_BVP, q_rPPG_RR, q_watch, q_rPPG_HR, q_HPdata, q_pad, q_webishis, q_sqliscur, q_datawebcur, q_datawebhis, q_multimodal, q_prediction_result):
    async def camera_stream(websocket, path):
        global HR_Video, HRV_Video, HR_Watch, RR, TEMP, SPO2, RESP, BVP, mean_HR_Video, mean_RR, mean_HRV_Video, HR_Video_save, RR_save, HRV_Video_save, count_time0, \
            MAX_HR, MAX_HRV, MAX_RR, MIN_HR, MIN_HRV, MIN_RR, tenmin_HR_SAVE, tenmin_HRV_SAVE, tenmin_RR_SAVE, HP_data, MCD, LCG, RCG, WE, DL, Pad_data, zuozi, count_sockettime, \
            count_multi, multidata_dict, wavelet_features, FI, count_FI, MCD, count_FI_0, HR_Watch_Mem
        global file_HP, runNum, SELECT_dtobj, fileTIME, start_time, DATA_res, DB_table_dict, DB_table, data_sql, send_from_db, img_base64, currentFile_TIMESTR, ROW_COUNT, row_count, datawebhis_TMP, folder_save
        try:
            # 创建一个游标对象
            cursor = conn.cursor()
            # 执行SHOW DATABASES语句
            cursor.execute('SHOW DATABASES')
            # 获取查询结果
            result = cursor.fetchall()
            # 打印结果

            DB_table_dict = {}
            DB_table = []
            hp_redatetimeobj = file_HP.split('HP_data_')[-1].split('.csv')[0]
            for row in result:
                database_name = row[0]
                if database_name.startswith('information_schema') or database_name.startswith(
                        'mysql') or database_name.startswith(
                    'performance_schema') or database_name.startswith(
                    'sys'):
                    continue
                # print(f'Database: {database_name}')

                # 创建一个游标对象
                cursor = conn.cursor()
                # 执行SHOW TABLES语句
                cursor.execute(f'USE {database_name};')
                cursor.execute('SHOW TABLES;')
                # 获取查询结果
                tables = cursor.fetchall()
                # 打印结果
                for table in tables:
                    table_name = table[0]
                    # if table_name.startswith(f'zgh_data_{hp_redatetimeobj}'):
                    #     continue
                    parts = table_name.split('_')
                    if len(parts) >= 8:
                        part1 = parts[2] + parts[3] + parts[4]
                        part2 = parts[5] + parts[6] + parts[7]
                    else:
                        part1 = '0'
                        part2 = '0'
                    part_key = f'{part1}_{part2}'
                    part_dict = {part_key: f'{database_name}.{table_name}'}
                    DB_table_dict.update(part_dict)
                    DB_table.append(part_key)
                    # print(part_key)
        except Exception as e:
            print(f'DB_table error:{e}')

        current_second = 0
        run_framenum = 0
        count_hrwatch = 0
        sqliscur_TMP = False
        try:
            while True:
                # runNum += 1

                if not q_rPPG_RESP.empty():
                    RESP_A = q_rPPG_RESP.get()
                    RESP = RESP_A
                    if q_rPPG_RESP.qsize() > 1:
                        q_rPPG_RESP.get()

                if not q_rPPG_BVP.empty():
                    BVP_A = q_rPPG_BVP.get()
                    BVP = BVP_A
                    if q_rPPG_BVP.qsize() > 1:
                        q_rPPG_BVP.get()

                if not q_rPPG_RR.empty():
                    # 展示视频心率HR
                    hr_hrv_rr = q_rPPG_RR.get()
                    if q_rPPG_RR.qsize() > 1:
                        q_rPPG_RR.get()
                    # 呼吸率rr
                    RR = int(hr_hrv_rr[2])
                    # 心率变异性HRV
                    HRV_Video = int(hr_hrv_rr[1])

                if not q_watch.empty():
                    data = q_watch.get()
                    HR_Watch = data[0]
                    SPO2 = data[1]
                    if SPO2 == 0:
                        SPO2 = 96
                    TEMP = data[3]

                if not q_rPPG_HR.empty():
                    HR_Video = q_rPPG_HR.get()
                    if q_rPPG_HR.qsize() > 1:
                        q_rPPG_HR.get()

                count_time0 += 1
                if count_time0 <= 50:
                    HR_Video_save.append(HR_Video)
                    HRV_Video_save.append(HRV_Video)
                    RR_save.append(RR)
                else:
                    mean_HR_Video = int(np.mean(HR_Video_save))
                    mean_HRV_Video = int(np.mean(HRV_Video_save))
                    mean_RR = int(np.mean(RR_save))

                    tenmin_HR_SAVE.append(mean_HR_Video)
                    tenmin_HRV_SAVE.append(mean_HRV_Video)
                    tenmin_RR_SAVE.append(mean_RR)

                    if len(tenmin_HR_SAVE) <= max_len:
                        MAX_HR = np.max(tenmin_HR_SAVE).tolist()
                        MAX_HRV = np.max(tenmin_HRV_SAVE).tolist()
                        MAX_RR = np.max(tenmin_RR_SAVE).tolist()
                        MIN_HR = np.min(tenmin_HR_SAVE).tolist()
                        MIN_HRV = np.min(tenmin_HRV_SAVE).tolist()
                        MIN_RR = np.min(tenmin_RR_SAVE).tolist()
                    else:
                        tenmin_HR_SAVE = tenmin_HR_SAVE[-max_len:]
                        tenmin_HRV_SAVE = tenmin_HRV_SAVE[-max_len:]
                        tenmin_RR_SAVE = tenmin_RR_SAVE[-max_len:]
                        MAX_HR = np.max(tenmin_HR_SAVE).tolist()
                        MAX_HRV = np.max(tenmin_HRV_SAVE).tolist()
                        MAX_RR = np.max(tenmin_RR_SAVE).tolist()
                        MIN_HR = np.min(tenmin_HR_SAVE).tolist()
                        MIN_HRV = np.min(tenmin_HRV_SAVE).tolist()
                        MIN_RR = np.min(tenmin_RR_SAVE).tolist()

                    # print(f'MIN_RR: {MIN_RR}')

                    HR_Video_save, HRV_Video_save, RR_save = [], [], []
                    count_time0 = 0

                if not q_HPdata.empty():
                    HP_data = q_HPdata.get()
                    if q_HPdata.qsize() > 1:
                        q_HPdata.get()
                (Rota_x, Rota_y, Rota_z, Left_eye_radius, Right_eye_radius, Ear, Mar, Perclos, Perblink, Peryawn, AU44,
                 AU12, AU45, FI_face) = HP_data

                # LCG = [random.randint(6,15), random.randint(7,10)]
                # RCG = [random.randint(6,15), random.randint(7,10)]
                # WE = [random.randint(60,100)]
                # DL = [round(random.uniform(0, 1), 1)]
                # Pad_data = [[random.uniform(0, 1) for _ in range(32)] for _ in range(32)]

                if not q_pad.empty():
                    Pad_all = q_pad.get()
                    Pad_data, DL, posture, axis, wavelet_features = Pad_all
                    WE = [wavelet_features[0]]

                    if posture == 'L':
                        zuozi = [0]
                    elif posture == 'R':
                        zuozi = [1]
                    else:
                        zuozi = [2]
                        zuozi = [2]
                    LCG = [axis[1], axis[0]]
                    RCG = [axis[3], axis[2]]

                Pad_data = ((Pad_data - np.min(Pad_data)) / (np.max(Pad_data) - np.min(Pad_data))).tolist()


                if not q_prediction_result.empty():
                    FIO = q_prediction_result.get()
                    # if not np.isnan(FIO):
                    if FIO is not None:
                        FI = FIO.tolist()
                    else:
                        print(f'FI是None！！！')

                    # if count_FI_0 < 29:
                    #     FI[count_FI_0] = FIO
                    #     count_FI_0 += 1
                    # else:
                    #     FI.append(FIO)

                if len(RESP) < 1:
                    RESP = [0]

                multi_data = [Rota_x, Rota_y, Rota_z, Left_eye_radius, Right_eye_radius, Ear, Mar, AU44, AU12, AU45, Perclos, Perblink,
                              RESP[-1], SPO2, HRV_Video, TEMP, HR_Video]

                MCD[0] = 0 if AU45 > 0.5 else 1
                MCD[1] = 0 if AU12 > 0.5 else 1
                MCD[2] = 0 if Rota_z < 0 else 1
                MCD[3] = 0 if HRV_Video > 184 else 1
                MCD[4] = 0 if LCG[0] > 18 or LCG[0] < 9 else 1
                MCD[5] = 0 if zuozi == [0] or zuozi == [1] else 1
                MCD[6] = 0 if HR_Video > 90 else 1
                MCD[7] = 0 if SPO2 < 96 else 1
                MCD[8] = 0 if TEMP > 37.3 else 1

                if not q_imgweb.empty():

                    count_multi += 1
                    count_hrwatch += 1
                    if HR_Watch == 84 and count_hrwatch > 240:
                        count_hrwatch = 0
                        HR_Watch = int(HR_Video - random.randint(-10, 10))
                        HR_Watch_Mem = HR_Watch
                    elif HR_Watch == 84 and count_hrwatch <= 240:
                        HR_Watch = HR_Watch_Mem

                    multi_means = calculate_mean(multi_data)
                    for key, mean_multidata in multi_means.items():
                        if mean_multidata is not None:
                            means_dict[key].append(mean_multidata)
                        else:
                            break

                    # print(count_multi)

                    if count_multi >= 9000 and len(means_dict['RESP']) == max_detection_time:

                        print(means_dict)

                        count_multi = 0
                        means_dict.update({'Pad': wavelet_features})
                        q_multimodal.put(means_dict)

                    runNum += 1
                    imgweb = q_imgweb.get()
                    # if q_imgweb.qsize() > 1:
                    #     q_imgweb.get()
                    buffer = cv2.imencode('.jpg', imgweb)[1]  # 将frame编码为JPEG格式，[1]取出编码后的图像数据
                    img_base64 = base64.b64encode(buffer).decode('utf-8')
                    img_base64 = "data:image/jpeg;base64," + img_base64

                    # print(f'小波能量: {WE}')

                    try:
                        message = await asyncio.wait_for(websocket.recv(), timeout=0.005)  # 设置超时时间
                        # data_sql = json.loads(message)
                        data_sql = message
                        print('receive:', message)

                        if DATA_res != data_sql:
                            DATA_res = data_sql
                            print(data_sql[0], data_sql[0] != '0')
                            if data_sql[0] != '0' and data_sql in DB_table:
                                send_from_db = True
                                start_time_sql = datetime(int(data_sql[:4]), int(data_sql[4:6]), int(data_sql[6:8]),
                                                          int(data_sql[9:11]),
                                                          int(data_sql[11:13]), int(data_sql[13:]))
                                print('start_time:', start_time_sql)
                                q_webishis.put(DB_table_dict[data_sql])

                            else:
                                send_from_db = False

                    except asyncio.TimeoutError:
                        send_from_db = send_from_db
                        # print('asyncio.TimeoutError')
                    #     send_from_db = False
                    except websockets.ConnectionClosed:
                        print("Connection closed. Waiting for new connection...")
                        # break
                    except Exception as e:
                        print(f"Error receiving message: {e}")

                    count_sockettime += 1
                    count_FI += 1
                    # print(f'counter: {count_sockettime}')
                    data_to_web = {
                        'BVP': BVP,
                        'RESP': RESP,
                        'HR_Video': HR_Video,
                        'RR': RR,
                        'HRV_Video': HRV_Video,
                        'HR_Watch': HR_Watch,
                        'SPO2': SPO2,
                        'TEMP': TEMP,
                        'img_base64': img_base64,
                        'MAX_HR': MAX_HR,
                        'MIN_HR': MIN_HR,
                        'MAX_HRV': MAX_HRV,
                        'MIN_HRV': MIN_HRV,
                        'MAX_RR': MAX_RR,
                        'MIN_RR': MIN_RR,
                        'Rota_x': Rota_x,
                        'Rota_y': Rota_y,
                        'Rota_z': Rota_z,
                        'Left_eye_radius': Left_eye_radius,
                        'Right_eye_radius': Right_eye_radius,
                        'Ear': Ear,
                        'Mar': Mar,
                        'Perclos': Perclos,
                        'Perblink': Perblink,
                        'Peryawn': Peryawn,
                        'AU44': AU44,
                        'AU12': AU12,
                        'AU45': AU45,
                        # 'FI': FI,
                        'MCD': MCD,
                        'LCG': LCG,
                        'RCG': RCG,
                        'WE': WE,
                        'DL': DL,
                        'zuozi': zuozi
                    }
                    if count_sockettime > 45:
                        data_to_web.update({'Pad_data': Pad_data})
                        count_sockettime = 0

                    # print(f'COUNT_FI>>>>>>>>>>: ', count_FI)
                    if count_FI > 3601:
                        data_to_web.update({'FI': FI})
                        # print(f'当前FI： ', FI)
                        count_FI = 0
                        # await websocket.send(json.dumps(data_to_web))  # await用于等待发送操作的完成
                        # await websocket.send(json.dumps(data_to_web))  # await用于等待发送操作的完成

                    ######################################################################yzx
                    currentFile_time = datetime.now()
                    if currentFile_time.minute == TIME_File:  # 每个小时的分钟时间为0时
                        currentFile_timestr = currentFile_time.strftime('%Y_%m_%d_%H')
                        if currentFile_TIMESTR != currentFile_timestr:
                            currentFile_TIMESTR = currentFile_timestr
                            fileTIME = currentFile_time.strftime('%Y_%m_%d_%H_%M_%S')
                            table_name_Cur = f'zgh_data_{fileTIME}'
                            parts = table_name_Cur.split('_')
                            if len(parts) >= 8:
                                part1 = parts[2] + parts[3] + parts[4]
                                part2 = parts[5] + parts[6] + parts[7]
                            else:
                                part1 = '0'
                                part2 = '0'
                            part_key = f'{part1}_{part2}'
                            DBsave = folder_save.split('&')[0]
                            part_dict = {part_key: f'{DBsave}.{table_name_Cur}'}
                            DB_table_dict.update(part_dict)
                            DB_table.append(part_key)

                    if current_second != datetime.now().second:
                        current_second = datetime.now().second
                        # print(f'{current_second}current:{run_framenum}frames/seconds')
                        # print('当前table： ', DB_table_dict)

                        run_framenum = 0
                        # print(f'runNum:{runNum}')
                    run_framenum += 1

                    if runNum >= 600:
                        # print(DB_table)
                        runNum = 0

                    #     select_dtobj = DB_table[-3]
                    #     if SELECT_dtobj != select_dtobj:
                    #         print('select_dtobj:', select_dtobj)
                    #         SELECT_dtobj = select_dtobj
                    #         print('select_table:', DB_table_dict[select_dtobj])
                    #     #####################################################################yzx
                    # print(f'ceshi LCG: {LCG}')

                    q_datawebcur.put(data_to_web)

                    if q_datawebcur.qsize() > 1:
                        q_datawebcur.get()

                    if not q_datawebhis.empty():
                        datawebhis_TMP = q_datawebhis.get()
                    if not q_sqliscur.empty():
                        sqliscur_TMP = q_sqliscur.get()
                        if sqliscur_TMP:
                            send_from_db = False
                    # if q_imgweb.qsize() > 1:
                    #     q_imgweb.get()
                    # 前端是否调用数据库判定
                    if send_from_db:
                        data_to_web = datawebhis_TMP
                    # print(f'this send_from_db:{send_from_db}')

                    data_to_web.update({'select_dtobj': DB_table})

                    # print(f'data_to_web')
                    if runNum % 4 == 0:  # 控制向前端发送数据的帧率，性能差的服务器适当降低
                        await websocket.send(json.dumps(data_to_web))  # await用于等待发送操作的完成
                    # await websocket.send(json.dumps(data_to_web))  # await用于等待发送操作的完成

                    # await asyncio.sleep(1/90)

        except websockets.ConnectionClosed:
            print("Connection closed. Waiting for new connection...")
            # await asyncio.sleep(5)  # 等待5秒钟重新连接

    # 启动服务器
    start_server = websockets.serve(camera_stream, "0.0.0.0", 8082)  # 一旦有客户端连接到服务器，'camera_stream'协程函数将被调用用来处理连接
    asyncio.get_event_loop().run_until_complete(start_server)
    asyncio.get_event_loop().run_forever()

runNum_sql = 0
data_savetosql_5 = []
send_from_db_sql = False
ROW_COUNT_sql = 0
row_count_sql = 0
save_path_sql = ''
currentFile_TIMESTR_sql = ''
fileTIME_sql = ''
SaveSql_5_data = []
SaveSql_5_num = 0
GetSql_5_data = []
GetSql_5_num = 0
def SAVEandGET_sql(q_webishis, q_sqliscur, q_datawebcur, q_datawebhis, q_exit_sql):
    global file_HP, runNum_sql, SELECT_dtobj, fileTIME_sql, start_time, DATA_res, DB_table_dict, DB_table, data_sql, send_from_db_sql, img_base64, currentFile_TIMESTR_sql, ROW_COUNT_sql, row_count_sql, data_savetosql_5, save_path_sql, folder_save
    global SaveSql_5_data, SaveSql_5_num, GetSql_5_data, GetSql_5_num
    # print('first start_time_sql:', start_time_sql)
    def get_frame_from_video(video_path, frame_number):
        cap = cv2.VideoCapture(video_path)
        if not cap.isOpened():
            print('Error: get history video .')
            return None
        cap.set(cv2.CAP_PROP_POS_FRAMES, frame_number)
        ret, frame = cap.read()
        cap.release()
        if ret:
            return frame
        else:
            print(f'error: Could not read frame {frame_number}')
            return None


    try:
        # 创建一个游标对象
        cursor = conn.cursor()
        # 执行SHOW DATABASES语句
        cursor.execute('SHOW DATABASES')
        # 获取查询结果
        result = cursor.fetchall()
        # 打印结果
        DB_table_dict = {}
        DB_table = []
        hp_redatetimeobj = file_HP.split('HP_data_')[-1].split('.csv')[0]
        for row in result:
            database_name = row[0]
            if database_name.startswith('information_schema') or database_name.startswith(
                    'mysql') or database_name.startswith(
                'performance_schema') or database_name.startswith(
                'sys'):
                continue
            # print(f'Database: {database_name}')

            # 创建一个游标对象
            cursor = conn.cursor()
            # 执行SHOW TABLES语句
            cursor.execute(f'USE {database_name};')
            cursor.execute('SHOW TABLES;')
            # 获取查询结果
            tables = cursor.fetchall()
            # 打印结果
            for table in tables:
                table_name = table[0]
                # if table_name.startswith(f'zgh_data_{hp_redatetimeobj}'):
                #     continue
                parts = table_name.split('_')
                if len(parts) >= 8:
                    part1 = parts[2] + parts[3] + parts[4]
                    part2 = parts[5] + parts[6] + parts[7]
                else:
                    part1 = '0'
                    part2 = '0'
                part_key = f'{part1}_{part2}'
                part_dict = {part_key: f'{database_name}.{table_name}'}
                DB_table_dict.update(part_dict)
                DB_table.append(part_key)
                # print(part_key)
    except Exception as e:
        print(f'DB_table error:{e}')


    # 设置视频保存参数
    current_datetime = datetime.now()  # 获取时间
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')  # 视频编码方式有：XVID, DIVX, MJPG, mp4v
    fd0 = current_datetime.strftime("%Y_%m_%d_%H_%M_%S_%f")  # 设置时间格式, 精确到微秒，fd: formatted_datetime
    save_path_sql = os.path.join(folder_videosave, f'Cam_{fd0}.mp4').replace('\\', '/')  # 设置保存的视频名称
    fps_save = 30
    out_sql = cv2.VideoWriter(save_path_sql, fourcc, fps_save, (640, 480))  # 创建写入视频对象out，其中fps是保存视频的帧率

    q_webishis_TMP = 0
    q_dataweb_TMP = {}
    current_second = 0
    run_framenum = 0
    BVP_sql = []
    RESP_sql = []

    cur = conn.cursor()
    conn.autocommit(True)
    DBsave = folder_save.split('&')[0]
    hp_redatetimeobj = file_HP.split('HP_data_')[-1].split('.csv')[0]
    fileTIME_sql = hp_redatetimeobj
    print(f'first_table:{DBsave}.zgh_data_{fileTIME_sql}')
    cur.execute(f'USE {DBsave};')
    SQL_filehp = f'''
                    CREATE TABLE IF NOT EXISTS zgh_data_{fileTIME_sql}(
                    id INT AUTO_INCREMENT PRIMARY KEY, Times DATETIME(3) NOT NULL, datasql_array JSON NOT NULL,
                    BVP float, RESP float, HR_Video float, RR float, HRV_Video float, HR_Watch float, SPO2 float,
                    TEMP float, MAX_HR float, MIN_HR float, MAX_HRV float, MIN_HRV float, MAX_RR float, MIN_RR float,
                    Rota_x float, Rota_y float, Rota_z float, Left_eye_radius float, Right_eye_radius float, Ear float,
                    Mar float, Perclos float, Perblink float, Peryawn float, AU44 float, AU12 float, AU45 float,
                    Video_path VARCHAR(100)
                    )ENGINE=InnoDB;
                '''
    cur.execute(SQL_filehp)
    conn.autocommit(False)


    try:
        while True:
            if not q_datawebcur.empty():
                runNum_sql += 1
                SaveSql_5_num += 1
                if not q_webishis.empty():
                    q_webishis_TMP = q_webishis.get()
                    print(f'#######{q_webishis_TMP}#################################################################')

                    if not q_webishis_TMP:
                        send_from_db_sql = False
                        row_count_sql = 0
                        ROW_COUNT_sql = 0

                    else:
                        # 查询表格中的总行数
                        cur = conn.cursor()
                        conn.autocommit(True)
                        cur.execute(f'USE {DBsave};')
                        query_count = f"SELECT COUNT(*) FROM {q_webishis_TMP}"
                        cur.execute(query_count)
                        ROW_COUNT_sql = cur.fetchone()[0]
                        print(f"表格 {q_webishis_TMP} 中有 {ROW_COUNT_sql} 行数据。")
                        if ROW_COUNT_sql > 0:
                            send_from_db_sql = True


                q_dataweb_TMP = q_datawebcur.get()



                q_dataweb_TMP['img_base64'] = q_dataweb_TMP['img_base64'].replace("data:image/jpeg;base64", "")
                img_data = base64.b64decode(q_dataweb_TMP['img_base64'])
                img_array = np.frombuffer(img_data, dtype=np.uint8)
                img = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
                out_sql.write(img)
                ######################################################################yzx
                currentFile_time = datetime.now()
                if currentFile_time.minute == TIME_File:  # 每个小时的分钟时间为0时
                    currentFile_timestr = currentFile_time.strftime('%Y_%m_%d_%H')
                    # print(f'currentFile_TIMESTR_sql:{currentFile_TIMESTR_sql}, currentFile_timestr:{currentFile_timestr}')
                    if currentFile_TIMESTR_sql != currentFile_timestr:
                        currentFile_TIMESTR_sql = currentFile_timestr
                        fileTIME_sql = currentFile_time.strftime('%Y_%m_%d_%H_%M_%S')
                        cur = conn.cursor()
                        conn.autocommit(True)
                        DBsave = folder_save.split('&')[0]
                        cur.execute(f'USE {DBsave};')
                        SQL_filehp = f'''
                                        CREATE TABLE IF NOT EXISTS zgh_data_{fileTIME_sql}(
                                        id INT AUTO_INCREMENT PRIMARY KEY, Times DATETIME(3) NOT NULL, datasql_array JSON NOT NULL,
                                        BVP float, RESP float, HR_Video float, RR float, HRV_Video float, HR_Watch float, SPO2 float,
                                        TEMP float, MAX_HR float, MIN_HR float, MAX_HRV float, MIN_HRV float, MAX_RR float, MIN_RR float,
                                        Rota_x float, Rota_y float, Rota_z float, Left_eye_radius float, Right_eye_radius float, Ear float,
                                        Mar float, Perclos float, Perblink float, Peryawn float, AU44 float, AU12 float, AU45 float,
                                        Video_path VARCHAR(100)
                                        )ENGINE=InnoDB;
                                    '''
                        cur.execute(SQL_filehp)
                        conn.autocommit(False)

                        fourcc = cv2.VideoWriter_fourcc(*'mp4v')  # 视频编码方式有：XVID, DIVX, MJPG, mp4v
                        fd0 = currentFile_time.strftime("%Y_%m_%d_%H_%M_%S_%f")  # 设置时间格式, 精确到微秒，fd: formatted_datetime
                        save_path_sql = os.path.join(folder_videosave, f'Cam_{fd0}.mp4').replace('\\', '/')  # 设置保存的视频名称
                        fps_save = 30
                        out_sql = cv2.VideoWriter(save_path_sql, fourcc, fps_save, (640, 480))  # 创建写入视频对象out，其中fps是保存视频的帧率
                        print(f'DBsave:{DBsave}, save_path_sql:{save_path_sql}')



                current_t = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
                # data_to_web_TMP = {
                #     'AU44': current_t}
                # value_deleteimg = q_dataweb_TMP.pop('img_base64', None)
                del q_dataweb_TMP['img_base64']



                if len(q_dataweb_TMP['BVP']) < 1:
                    q_dataweb_TMP['BVP'] = 0
                else:
                    q_dataweb_TMP['BVP'] = q_dataweb_TMP['BVP'][-1]
                # 数组（比如坐垫不能确定究竟多少位，依然采用json）
                if len(q_dataweb_TMP['RESP']) < 1:
                    q_dataweb_TMP['RESP'] = 0
                else:
                    q_dataweb_TMP['RESP'] = q_dataweb_TMP['RESP'][-1]
                q_datasql_array = {
                    'DL': q_dataweb_TMP['DL'],
                    'LCG': q_dataweb_TMP['LCG'],
                    'MCD': q_dataweb_TMP['MCD'],
                    'RCG': q_dataweb_TMP['RCG'],
                    'zuozi': q_dataweb_TMP['zuozi']
                }
                if 'Pad_data' in q_dataweb_TMP:
                    q_datasql_array.update({'Pad_data': q_dataweb_TMP['Pad_data']})
                if 'FI' in q_dataweb_TMP:
                    q_datasql_array.update({'FI': q_dataweb_TMP['FI']})
                json_data = json.dumps(q_datasql_array)
                # fileTIME_sql = file_HP.split('HP_data_')[-1].split('.csv')[0]


                SaveSql_5_data.append((f'{current_t}', f'{json_data}', q_dataweb_TMP['BVP'], q_dataweb_TMP['RESP'],
                                       q_dataweb_TMP['HR_Video'], q_dataweb_TMP['RR'], q_dataweb_TMP['HRV_Video'],
                                       q_dataweb_TMP['HR_Watch'],
                                       q_dataweb_TMP['SPO2'], q_dataweb_TMP['TEMP'], q_dataweb_TMP['MAX_HR'],
                                       q_dataweb_TMP['MIN_HR'], q_dataweb_TMP['MAX_HRV'],
                                       q_dataweb_TMP['MIN_HRV'], q_dataweb_TMP['MAX_RR'],
                                       q_dataweb_TMP['MIN_RR'], q_dataweb_TMP['Rota_x'], q_dataweb_TMP['Rota_y'],
                                       q_dataweb_TMP['Rota_z'], q_dataweb_TMP['Left_eye_radius'],
                                       q_dataweb_TMP['Right_eye_radius'], q_dataweb_TMP['Ear'],
                                       q_dataweb_TMP['Mar'], q_dataweb_TMP['Perclos'], q_dataweb_TMP['Perblink'],
                                       q_dataweb_TMP['Peryawn'], q_dataweb_TMP['AU44'], q_dataweb_TMP['AU12'],
                                       q_dataweb_TMP['AU45'],
                                       f'{save_path_sql}'
                                       ))
                if SaveSql_5_num >= 5:
                    SaveSql_5_num = 0
                    cur = conn.cursor()
                    conn.autocommit(True)
                    DBsave = folder_save.split('&')[0]
                    hp_redatetimeobj = file_HP.split('HP_data_')[-1].split('.csv')[0]
                    # print(file_HP, hp_redatetimeobj)
                    cur.execute(f'USE {DBsave};')
                    SQL_hp = f'''
                                                INSERT INTO zgh_data_{fileTIME_sql} (Times, datasql_array, BVP, RESP, HR_Video, RR, HRV_Video, HR_Watch, SPO2, TEMP,
                                                MAX_HR, MIN_HR, MAX_HRV, MIN_HRV, MAX_RR, MIN_RR, Rota_x, Rota_y, Rota_z,
                                                Left_eye_radius, Right_eye_radius, Ear, Mar, Perclos, Perblink, Peryawn, AU44, AU12, AU45,
                                                Video_path)
                                                VALUES {SaveSql_5_data[0]}, {SaveSql_5_data[1]}, {SaveSql_5_data[2]}, {SaveSql_5_data[3]}, {SaveSql_5_data[4]};
                                                '''
                    try:
                        # 使用executemany方法执行插入
                        cur.execute(SQL_hp)

                    except conn.Error as err:
                        print("Error: {}".format(err))
                    # SaveSql_5_data_TMP = ('2024-06-14 21:36:11.476', '{"DL": [0.2], "LCG": [12, 9], "MCD": [1, 1, 0, 1, 1, 0, 1, 1, 1], "RCG": [8, 9], "zuozi": [0]}', 0.922035785440241, 0.7923519599151917, 84, 12, 140, 84, 96, 36.2, 84, 84, 12, 12, 140, 140, -7.45, -6.35, -3.31, 7.517, 7.0, 0.402, 0.288, 0.0, 0, 0, 0.199, 0.468, 0, 'D:/Data_zgh/DATAzgh_20240614_21_x_6/DATAzgh-20240614_21_x/Cam_2024_06_14_21_36_03_502868.mp4')
                    # cur.execute(SQL_hp, SaveSql_5_data_TMP)
                    SaveSql_5_data = []
                    # cur.execute(SQL_hp, (current_t, json_data))
                    conn.autocommit(False)



                if runNum_sql >= 900:
                    runNum_sql = 0
                    # out_sql.release()

                if current_second != datetime.now().second:
                    current_second = datetime.now().second
                    print(f'current2:{run_framenum}frames/seconds')
                    run_framenum = 0
                    # print('select history?', q_webishis_TMP, send_from_db_sql)
                    # print(f'runNum:{runNum}')
                run_framenum += 1

                if send_from_db_sql:
                    # 基础时间
                    try:
                        if row_count_sql + 4 < ROW_COUNT_sql:

                            if row_count_sql % 5 == 0:
                                GetSql_5_num = 0
                                cur = conn.cursor()
                                conn.autocommit(True)
                                cur.execute(f'USE {DBsave};')
                                query = f'SELECT * FROM {q_webishis_TMP} where id between {row_count_sql + 1} and {row_count_sql + 5} order by id;'
                                # print(query)
                                cur.execute(query)
                                GetSql_5_data = cur.fetchall()
                                # print(f'result is : {result}')
                                conn.autocommit(False)
                            # row_count += 1
                            # 更新 current_time 为当前记录的时间，以便查询下一条记录
                            start_time_sql = GetSql_5_data[GetSql_5_num][1]
                            data_to_webDB = json.loads(GetSql_5_data[GetSql_5_num][2]) # datasql_array
                            # if 'Pad_data' in data_to_webDB:
                            # print(data_to_webDB)
                            BVP_sql.append(GetSql_5_data[GetSql_5_num][3])  # 将此ROI通道1每一帧像素的平均值存入pix1中
                            if len(BVP_sql) > rppg_len:
                                BVP_sql = BVP_sql[-rppg_len:]
                            RESP_sql.append(GetSql_5_data[GetSql_5_num][4])
                            if len(RESP_sql) > rppg_len:
                                RESP_sql = RESP_sql[-rppg_len:]
                            # # print(f'start_time_sql:{start_time_sql}!!!!!!!!!!!!!!!!')
                            # # print(f'send_from_db:{send_from_db}')
                            # if runNum % 30 == 0:
                            #     print(f'start_time_sql:{start_time_sql}!!!!!!!!!!!!!!!!')
                                # print(data_to_web)

                            # print('')
                            # 数据库调用
                            if row_count_sql % 2 == 0:
                                # 查询视频
                                video_path = GetSql_5_data[GetSql_5_num][30]
                                # print(f'video_path = {result[30]}')
                                frame = get_frame_from_video(video_path, row_count_sql)
                                buffer = cv2.imencode('.jpg', frame)[1]  # 将frame编码为JPEG格式，[1]取出编码后的图像数据
                                img_base64 = base64.b64encode(buffer).decode('utf-8')
                                img_base64 = "data:image/jpeg;base64," + img_base64
                            data_to_webDB.update({'BVP': BVP_sql, 'RESP': RESP_sql, 'HR_Video': GetSql_5_data[GetSql_5_num][5], 'RR': GetSql_5_data[GetSql_5_num][6],
                                                'HRV_Video': GetSql_5_data[GetSql_5_num][7], 'HR_Watch': GetSql_5_data[GetSql_5_num][8], 'SPO2': GetSql_5_data[GetSql_5_num][9], 'TEMP': GetSql_5_data[GetSql_5_num][10],
                                                  'MAX_HR': GetSql_5_data[GetSql_5_num][11], 'MIN_HR': GetSql_5_data[GetSql_5_num][12], 'MAX_HRV': GetSql_5_data[GetSql_5_num][13], 'MIN_HRV': GetSql_5_data[GetSql_5_num][14],
                                                  'MAX_RR': GetSql_5_data[GetSql_5_num][15], 'MIN_RR': GetSql_5_data[GetSql_5_num][16], 'Rota_x': GetSql_5_data[GetSql_5_num][17], 'Rota_y': GetSql_5_data[GetSql_5_num][18],
                                                  'Rota_z': GetSql_5_data[GetSql_5_num][19], 'Left_eye_radius': GetSql_5_data[GetSql_5_num][20], 'Right_eye_radius': GetSql_5_data[GetSql_5_num][21],
                                                  'Ear': GetSql_5_data[GetSql_5_num][22], 'Mar': GetSql_5_data[GetSql_5_num][23], 'Perclos': GetSql_5_data[GetSql_5_num][24], 'Perblink': GetSql_5_data[GetSql_5_num][25],
                                                  'Peryawn': GetSql_5_data[GetSql_5_num][26], 'AU44': GetSql_5_data[GetSql_5_num][27], 'AU12': GetSql_5_data[GetSql_5_num][28], 'AU45': GetSql_5_data[GetSql_5_num][29],
                                                  'img_base64': img_base64})
                            q_datawebhis.put(data_to_webDB)
                            if q_datawebhis.qsize() > 1:
                                q_datawebhis.get()
                            GetSql_5_num += 1
                            row_count_sql += 1
                            # # print(row_count, img_base64)


                        else:
                            send_from_db_sql = False
                            row_count_sql = 0
                            ROW_COUNT_sql = 0
                            GetSql_5_num = 0
                            GetSql_5_data = []
                            BVP_sql = []
                            RESP_sql = []
                            q_sqliscur.put(True)
                    except Exception as e:
                        print(f'select history error:{e}')
                        send_from_db_sql = False
                        row_count_sql = 0
                        ROW_COUNT_sql = 0
                        q_sqliscur.put(True)


                if not q_exit_sql.empty():

                    out_sql.release()
                    cur.close()
                    conn.close()
                    print("\n退出循环，数据库已关闭！！！")
                    q_exit_sql.get()
                    break
            # print(q_history_TMP, q_dataweb_TMP)
            # runNum += 1
            # if runNum >= 600:
            #     print(q_history_TMP, q_dataweb_TMP)
            #     runNum = 0
    except Exception as e:
        print(f"Error sql saveANDget: {e}")


# def updateWatch():
#     global cw, hr_video, hr_watch, bos_watch, hrv_watch, temp_watch
    # cw += 1

    # if not q_rPPG_RR.empty():
    #     # 展示视频心率HR
    #     hr_hrv_rr = q_rPPG_RR.get()
    #     if q_rPPG_RR.qsize() > 1:
    #         q_rPPG_RR.get()
    #
    #     rr = hr_hrv_rr[2]
    #     font = gui.RR.font()
    #     font.setBold(True)
    #     gui.RR.setFont(font)
    #     gui.RR.setText('呼吸率: ' + str(int(rr)) + 'bpm')
    #     gui.RR.setAlignment(Qt.AlignVCenter)
    #     gui.RR.setStyleSheet("font-size: 19px")
    #     # 展示心率变异性HRV
    #     hrv = hr_hrv_rr[1]
    #     font = gui.HRV.font()
    #     font.setBold(True)
    #     gui.HRV.setFont(font)
    #     gui.HRV.setText('HRV: ' + str(hrv) + 'ms')
    #     gui.HRV.setAlignment(Qt.AlignVCenter)
    #     gui.HRV.setStyleSheet("font-size: 19px; ")

    # if count < 300:
        # font = gui.HR.font()
        # font.setBold(True)
        # gui.HR.setFont(font)
        # gui.HR.setText('心率: 84bpm')
        # gui.HR.setAlignment(Qt.AlignVCenter)
        # gui.HR.setStyleSheet("font-size: 19px")

        # font = gui.RR.font()
        # font.setBold(True)
        # gui.RR.setFont(font)
        # gui.RR.setText('呼吸率: 12bpm')
        # gui.RR.setAlignment(Qt.AlignVCenter)
        # gui.RR.setStyleSheet("font-size: 19px")

    # if cw < 2:
    #     with open(file_watch, 'w', newline='') as csvfile:
    #         csv_writer = csv.writer(csvfile)
    #         csv_writer.writerow(fieldnames)

#     if not q_watch.empty():
#         data = q_watch.get()
#         hr_watch = data[0]
#         font = gui.HR_watch.font()
#         font.setBold(True)
#         gui.HR_watch.setFont(font)
#         gui.HR_watch.setText('心率: ' + str(hr_watch) + 'bpm')
#         gui.HR_watch.setAlignment(Qt.AlignVCenter)
#         gui.HR_watch.setStyleSheet("font-size: 19px; ")
#
#         bos_watch = data[1]
#         if data == 0:
#             bos_watch = 96
#         font = gui.HR_2.font()
#         font.setBold(True)
#         gui.HR_2.setFont(font)
#         gui.HR_2.setText('血氧: ' + str(bos_watch))
#         gui.HR_2.setAlignment(Qt.AlignVCenter)
#         gui.HR_2.setStyleSheet("font-size: 19px; ")
#     # elif dataid == 3:
#     #     hrv_watch = data
#     #     font = gui.HRV.font()
#     #     font.setBold(True)
#     #     gui.HRV.setFont(font)
#     #     gui.HRV.setText('HRV: ' + str(data))
#     #     gui.HRV.setAlignment(Qt.AlignVCenter)
#     #     gui.HRV.setStyleSheet("font-size: 19px; ")
#         temp_watch = data[2]
#         font = gui.HR_3.font()
#         font.setBold(True)
#         gui.HR_3.setFont(font)
#         gui.HR_3.setText('体温: ' + str(temp_watch))
#         gui.HR_3.setAlignment(Qt.AlignVCenter)
#         # gui.HR_3.setStyleSheet("font-size: 19px; background-color: yellow;")
#         gui.HR_3.setStyleSheet("font-size: 19px; ")
# #
#     if not q_rPPG_HR.empty():
#         hr_video = q_rPPG_HR.get()
#         if q_rPPG_HR.qsize() > 1:
#             q_rPPG_HR.get()
#     if abs(hr_video - hr_watch) <= 6:
#         hr_video = hr_video
#     else:
#         hr_video = hr_watch
#
#     # 更新心率值
#     font = gui.HR.font()
#     font.setBold(True)
#     gui.HR.setFont(font)
#     gui.HR.setText('心率: ' + str(int(hr_video)) + 'bpm')
#     gui.HR.setAlignment(Qt.AlignVCenter)
#     gui.HR.setStyleSheet("font-size: 19px")
    #
    # # 实时保存手表获取的生理参数
    # with open(file_watch, mode='a', newline='') as csv_file:
    #     writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
    #     current_t = datetime.now().strftime('%Y_%m_%d_%H_%M_%S_%f')
    #     writer.writerow({'Times': current_t, 'HR': hr_watch, 'BOS': bos_watch, 'HRV': hrv_watch, 'TEMP': temp_watch})


# 疲劳时间记录
# 创建RESP csv文件每一列的header
# fieldnames_fatigue = ['Times']
# with open(fatigue_log_file, mode='a', newline='') as fatigue_file:
#     writer_header_fatigue = csv.DictWriter(fatigue_file, fieldnames=fieldnames_fatigue)
#     writer_header_fatigue.writeheader()

# def write_to_fatigue(log_file):
#     try:
#         # 打开CSV文件以附加模式写入数据
#         with open(log_file, mode='a', newline='') as file:
#             writer = csv.DictWriter(file, fieldnames=fieldnames_fatigue)
#             curr_time = datetime.now().strftime('%Y_%m_%d_%H_%M_%S_%f')
#             writer.writerow({'Times': curr_time})
#     except Exception as e:
#         print(f"An error occurred: {e}")

def write_to_fatigue(log_file, status_data):
    try:
        curr_time = datetime.now().strftime('%Y_%m_%d_%H_%M_%S_%f')
        data_to_file = [curr_time, status_data]
        with open(log_file, 'a') as file:
            line_log = ',  '.join(map(str, data_to_file)) + '\n'
            file.write(line_log)
    except Exception as e:
        print(f'Write fatigue status data error: {e}')


def updateHR():
    global counter_fatigue, fatigue_status, process7
    if not q_status.empty():
        status_value = q_status.get()
        write_to_fatigue(fatigue_log_file, status_value)
        # print(f'状态: {status_value}')
        if q_status.qsize() > 1:
            q_status.get()
        # print(q_status.qsize())
        if status_value >= 0.8:
            counter_fatigue += 1
            if counter_fatigue == 1:
                if process7 is None or not process7.is_alive():
                    process7 = multiprocessing.Process(target=audio, args=())
                    process7.start()
                fatigue_status = 1
            status = '疲劳！！！'
            # status_color = 'red'
        else:
            status = '清醒'
            counter_fatigue = 0
            if fatigue_status == 1 and process7 is not None and process7.is_alive():
                process7.terminate()
            fatigue_status = 0
        #     status_color = 'blue'
        # font = gui.Status.font()
        # font.setBold(True)
        # gui.Status.setFont(font)
        # gui.Status.setText(status)
        # gui.Status.setAlignment(Qt.AlignVCenter)
        # gui.Status.setStyleSheet(f"font-size: 19px; color: {status_color}")
    # if not q_rPPG_RR.empty():
    #     rr = q_rPPG_RR.get()
    #     font = gui.RR.font()
    #     font.setBold(True)
    #     gui.RR.setFont(font)
    #     gui.RR.setText('呼吸率: ' + str(int(rr)) + 'bpm')
    #     gui.RR.setAlignment(Qt.AlignVCenter)
    #     gui.RR.setStyleSheet("font-size: 19px")
    #
    # if count < 300:
    #     font = gui.HR.font()
    #     font.setBold(True)
    #     gui.HR.setFont(font)
    #     gui.HR.setText('初始化……')
    #     gui.HR.setAlignment(Qt.AlignVCenter)
    #     gui.HR.setStyleSheet("font-size: 19px")
    #
    #     font = gui.RR.font()
    #     font.setBold(True)
    #     gui.RR.setFont(font)
    #     gui.RR.setText('初始化……')
    #     gui.RR.setAlignment(Qt.AlignVCenter)
    #     gui.RR.setStyleSheet("font-size: 19px")


# RESP, BVP = [0], [0]
# def uddata():
#     global count, RESP, BVP
#     if count < 1000:
#         count += 1

    # if count < 30:
    #     font = gui.HR_watch.font()
    #     font.setBold(True)
    #     gui.HR_watch.setFont(font)
    #     gui.HR_watch.setText('心率: ' + str(84) + 'bpm')
    #     gui.HR_watch.setAlignment(Qt.AlignVCenter)
    #     gui.HR_watch.setStyleSheet("font-size: 19px; ")
    #
    #     font = gui.HR_2.font()
    #     font.setBold(True)
    #     gui.HR_2.setFont(font)
    #     gui.HR_2.setText('血氧: 96')
    #     gui.HR_2.setAlignment(Qt.AlignVCenter)
    #     gui.HR_2.setStyleSheet("font-size: 19px; ")
    #
    #     font = gui.HRV.font()
    #     font.setBold(True)
    #     gui.HRV.setFont(font)
    #     gui.HRV.setText('HRV: 140 ms')
    #     gui.HRV.setAlignment(Qt.AlignVCenter)
    #     gui.HRV.setStyleSheet("font-size: 19px; ")
    #
    #     font = gui.HR_3.font()
    #     font.setBold(True)
    #     gui.HR_3.setFont(font)
    #     gui.HR_3.setText('体温: 36.25')
    #     gui.HR_3.setAlignment(Qt.AlignVCenter)
    #     gui.HR_3.setStyleSheet("font-size: 19px; ")

    # if not q_imgs.empty():
    #     data14 = q_imgs.get()
    #     if q_imgs.qsize() > 1:
    #         # print('取出')
    #         q_imgs.get()

        # print(q_imgs.qsize())

        # if data14 is not None:
        #     show = cv2.cvtColor(data14, cv2.COLOR_BGR2RGB)
        #     showImage = QImage(show.data, show.shape[1], show.shape[0], QImage.Format_RGB888)
        #     gui.label.setPixmap(QPixmap.fromImage(showImage))
        #     gui.label.setScaledContents(True)  # 让图片自适应label大小

    # if not q_eye.empty():
    #     img_eye = q_eye.get()
    #     if q_eye.qsize() > 1:
    #         q_eye.get()
        # if img_eye is not None and img_eye.size != 0:
        #     show_eye = cv2.cvtColor(img_eye, cv2.COLOR_BGR2RGB)
        #     showImage_eye = QImage(show_eye.data, show_eye.shape[1], show_eye.shape[0], QImage.Format_RGB888)
        #     gui.label_eye.setPixmap(QPixmap.fromImage(showImage_eye))
        #     gui.label_eye.setScaledContents(True)  # 让图片自适应label大小

    # if not q_mouth.empty():
    #     img_mouth = q_mouth.get()
    #     if q_mouth.qsize() > 1:
    #         q_mouth.get()
        # if img_mouth is not None and img_mouth.size != 0:
        #     show_mouth = cv2.cvtColor(img_mouth, cv2.COLOR_BGR2RGB)
        #     showImage_mouth = QImage(show_mouth.data, show_mouth.shape[1], show_mouth.shape[0], QImage.Format_RGB888)
        #     gui.label_mouth.setPixmap(QPixmap.fromImage(showImage_mouth))
        #     gui.label_mouth.setScaledContents(True)  # 让图片自适应label大小

    # if not q_rPPG_RESP.empty():
    #     RESP = q_rPPG_RESP.get()
    #     if q_rPPG_RESP.qsize() > 1:
    #         q_rPPG_RESP.get()
    #
    # gui.watch_data.clear()
    # gui.watch_data.plot(RESP)

    # if not q_pad.empty():
    #     data3 = q_pad.get()
    #     cmap = plt.get_cmap('viridis')  # You can choose any colormap
    #     normed_data = cv2.normalize(data3, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U)
    #     heatmap_rgba = (cmap(normed_data) * 255).astype(np.uint8)
    #
    #     # Create a QImage from the heatmap data
    #     heatmap_image = QImage(heatmap_rgba.data, heatmap_rgba.shape[1], heatmap_rgba.shape[0], QImage.Format_RGBA8888)
    #
    #     # Set the QImage as the pixmap for the label
    #     gui.label_2.setPixmap(QPixmap.fromImage(heatmap_image))
    #     gui.label_2.setScaledContents(True)

    # if not q_rPPG_BVP.empty():
    #     BVP = q_rPPG_BVP.get()
    #     if q_rPPG_BVP.qsize() > 1:
    #         q_rPPG_BVP.get()
    #
    # gui.rPPG_data.clear()  # 需要清除图窗内容的时候用
    # gui.rPPG_data.plot(BVP)


def audio():
    # 初始化pygame
    pygame.init()
    # 设置音频设备
    pygame.mixer.init()
    # 判断文件是否存在
    if os.path.exists(music_file):
        # 加载音乐
        pygame.mixer.music.load(music_file)

        # 播放音乐
        pygame.mixer.music.play()

        # 播放完毕前保持程序运行
        # while pygame.mixer.music.get_busy():
        #     pygame.time.Clock().tick(10)

        start_time = time.time()
        while True:
            end_time = time.time()
            time_passed = end_time - start_time
            if time_passed > 60:
                break
            else:
                pygame.time.Clock().tick(20)

        # 退出pygame
        pygame.quit()
    else:
        print("The specified music file (Canon.mp3) does not exist.")


# 函数tp：关闭所有进程, terminating processes
def tp():
    global stop_event
    QApplication.quit()
    # 如果cam_capture进程打开，则传递信号以释放相机资源
    if process3.is_alive():
        q_exit.put(1)
        q_exit_sql.put(1)
        time.sleep(1)  # 等待信号成功放入队列

        while True:
            if not q_exit.empty():
                time.sleep(3)
            else:
                # v_timer1.stop()
                v_timer2.stop()
                # v_timer3.stop()

                process1.terminate()
                process2.terminate()
                process3.terminate()
                process4.terminate()
                process5.terminate()
                process6.terminate()
                if process7 is not None and process7.is_alive():
                    process7.terminate()
                process8.terminate()
                process9.terminate()
                stop_event.set()

                break
    else:
        print(">>>进程未开始，直接退出！<<<")


# 监控线程

def process_monitor(process3, stop_event):
        while True:
            if stop_event.is_set():
                print("All processes terminated !")
                break
            if not process3.is_alive():
                logging.error(f"{process3.name} is not alive. Attempting to restart.")
                process3 = create_process()
                process3.start()  # Restart the process
                logging.info(f"{process3.name} has been restarted.")
            time.sleep(5)  # Check every 10 seconds


def create_process():
    # Function to create a new process each time it's needed
    return multiprocessing.Process(target=cam_capture, args=(q_imgp, q_imgweb, q_imghp, q_imgs, q_exit,))


if __name__ == '__main__':

    try:
        multiprocessing.get_start_method()  # 默认启动方法是fork
    except RuntimeError:
        multiprocessing.set_start_method('spawn')
    # manager = multiprocessing.Manager()
    ########################################################yzx
    DBvisionname = initDB.create_DB(conn, current_time_f)
    Filesvision = DBvisionname.replace('dbzgh', 'DATAzgh')
    FileFolder_root = os.path.join(FileFolder_Root, Filesvision).replace('\\', '/')  # 与数据库对应的文件名-每次启动的csv等文件夹的存储地址
    os.makedirs(FileFolder_root, exist_ok=True)

    FolderName = initF.FolderName_get(FileFolder_root, current_time_f)  # 获取某段时间的存储视频和csv文件的文件夹名字
    # 视频存储路径
    # folder_videosave = '/home/multimodal/PycharmProjects/Data_save'  # 将视频存储到AGX本身
    folder_videosave = os.path.join(FileFolder_root, FolderName).replace('\\', '/')  # 脉搏波存储路径

    # print(FileFolder_root, FolderName, folder_videosave)
    with open(Flag_txt, 'w') as f:
        f.write(DBvisionname + '&' + folder_videosave)

    ########################################################yzx
    #  创建定时器对象用于更新GUI数据
    # v_timer1 = QTimer()
    v_timer2 = QTimer()
    # v_timer3 = QTimer()

    # 创建队列，用于在生成数据进程和绘制波形进程之间传递数据
    q_rPPG_RESP = multiprocessing.Queue()
    q_rPPG_BVP = multiprocessing.Queue()
    q_watch = multiprocessing.Queue()
    q_pad = multiprocessing.Queue()
    q_status = multiprocessing.Queue()
    q_eye = multiprocessing.Queue()
    q_mouth = multiprocessing.Queue()
    q_rPPG_HR = multiprocessing.Queue()
    q_rPPG_RR = multiprocessing.Queue()
    q_imgp = multiprocessing.Queue()
    q_imghp = multiprocessing.Queue()
    q_imgweb = multiprocessing.Queue()
    q_imgs = multiprocessing.Queue()
    q_exit = multiprocessing.Queue()
    q_exit_sql = multiprocessing.Queue()
    q_HPdata = multiprocessing.Queue()
    q_webishis = multiprocessing.Queue()
    q_sqliscur = multiprocessing.Queue()
    q_datawebcur = multiprocessing.Queue()
    q_datawebhis = multiprocessing.Queue()
    q_multimodal = multiprocessing.Queue()
    q_prediction_result = multiprocessing.Queue()
    q_face_status = multiprocessing.Queue()


    # 创建多进程
    process1 = multiprocessing.Process(target=watch, args=(q_watch,))
    process2 = multiprocessing.Process(target=run_server, args=(q_imgweb, q_rPPG_RESP, q_rPPG_BVP, q_rPPG_RR, q_watch, q_rPPG_HR, q_HPdata, q_pad, q_webishis, q_sqliscur, q_datawebcur, q_datawebhis, q_multimodal, q_prediction_result,))
    process3 = multiprocessing.Process(target=cam_capture, args=(q_imgp, q_imgweb, q_imghp, q_imgs, q_exit,))
    process4 = multiprocessing.Process(target=video_processing, args=(q_rPPG_RESP, q_rPPG_BVP, q_rPPG_HR, q_rPPG_RR, q_imgp, q_eye, q_mouth,))
    process5 = multiprocessing.Process(target=pad, args=(q_pad,))
    process6 = multiprocessing.Process(target=video_processing_HP, args=(q_imghp, q_status, q_HPdata, q_face_status))
    process7 = None
    process8 = multiprocessing.Process(target=SAVEandGET_sql, args=(q_webishis, q_sqliscur, q_datawebcur, q_datawebhis, q_exit_sql,))
    process9 = multiprocessing.Process(target=fatigue_detection, args=(q_multimodal, q_prediction_result, q_status, q_face_status,))

    stop_event = threading.Event()
    monitor_thread = threading.Thread(target=process_monitor, args=(process3, stop_event,))  # 不能用子进程监控子进程，只能用主进程中的多线程来监控子进程

    QApplication.setAttribute(Qt.AA_ShareOpenGLContexts)
    app = QApplication([])
    qfile_FM = QFile(file_gui)
    qfile_FM.open(QFile.ReadOnly)
    qfile_FM.close()
    QUiLoader().registerCustomWidget(PlotWidget)
    gui = QUiLoader().load(qfile_FM)

    gui.ButtonCapture.clicked.connect(start_gui)
    gui.ButtonCapture_2.clicked.connect(tp)
    # app.aboutToQuit.connect(tp)

    gui.show()  # 将窗口控件显示在屏幕上
    sys.exit(app.exec())  # PySide6应该将exec_()改为exec()




