import argparse

import cv2
import mediapipe as mp
from ffpyplayer.player import MediaPlayer

from datetime import datetime
from matplotlib import pyplot as plt
import mss
import numpy as np
from scipy.signal import find_peaks
from scipy.spatial import distance as dist

from fer import FER

import threading
import time
import sys
from PIL import ImageFont, ImageDraw, Image
import os
import io
import base64

MAX_FRAMES = 120 # modify this to affect calibration period and amount of "lookback"
RECENT_FRAMES = int(MAX_FRAMES / 10) # modify to affect sensitivity to recent changes

EYE_BLINK_HEIGHT = .15 # threshold may depend on relative face shape

SIGNIFICANT_BPM_CHANGE = 8

LIP_COMPRESSION_RATIO = .35 # from testing, ~universal

TELL_MAX_TTL = 30 # how long to display a finding, optionally set in args

TEXT_HEIGHT = 30

FACEMESH_FACE_OVAL = [10, 338, 297, 332, 284, 251, 389, 356, 454, 323, 361, 288, 397, 365, 379, 378, 400, 377, 152, 148, 176, 149, 150, 136, 172, 58, 132, 93, 234, 127, 162, 21, 54, 103, 67, 109, 10]

EPOCH = time.time()


recording = None

tells = dict()

blinks = [False] * MAX_FRAMES
blinks2 = [False] * MAX_FRAMES # for mirroring

hand_on_face = [False] * MAX_FRAMES
hand_on_face2 = [False] * MAX_FRAMES # for mirroring

face_area_size = 0 # relative size of face to total frame

hr_times = list(range(0, MAX_FRAMES))
hr_values = [400] * MAX_FRAMES
avg_bpms = [0] * MAX_FRAMES

gaze_values = [0] * MAX_FRAMES

emotion_detector = FER(mtcnn=True)
calculating_mood = False
mood = ''

meter = cv2.imread('meter.png')

# BPM chart
fig = None
ax = None
line = None
peakpts = None



# 在全局变量区域添加logo加载代码
logo = None
try:
    # 注意：这里使用示例base64数据，您需要替换为实际的logo数据
    logo_data = 'iVBORw0KGgoAAAANSUhEUgAAAGQAAABoCAQAAACtKZ2kAAAACXBIWXMAAAsTAAALEwEAmpwYAAAABGdBTUEAALGOfPtRkwAAACBjSFJNAAB6JQAAgIMAAPn/AACA6QAAdTAAAOpgAAA6mAAAF2+SX8VGAAAfOklEQVR42rycd3Qd133nP7fMzCt4KAQJgmgsYpEoUhQpUoVqkSyXyLEtuSSxnTjFJ7u2YzvZ5CR7Nvl3T87Zk02PI6U6x5vETmI7lmxHoqXIRVYXRZGi2ERSJEABIHp7Zcq9d/94g4f3HkiRKtbMATkA3szc7/3dX/v+fhfiES50lDD4OCwOB4BEYhFYAAwCCWgMFge46idUkhfttLsO2kTW5mKnPNFGEs9ItBAlV2ZOzstpNefmksjDIBAILBaNReCQGBQWEAjAIFFADGgyXPjQvOVD4KRbbXvNgOtxq1yby4rACSTOOUcCDoRwxIjqmJ1IqDAvxtyoOCcHxYiI3voo9FsBAHSYjcm2ZK1d7XJC4ITBgbNIYRFSVmXlcEgEwjnrnLBOuAxZ1iQ7BaIiJtVZ9TLHGBfvPBABrWZ7sstsMm1OOCMsFimFkJQpMa+KckpVkllp/HThGAxeq8mywhZcq8u7jJM4rFN2te2JbxLFeFAd1C+KEfHOABE4ZTfGe5LrWOEgwaKUk/NinAk95A/bMTvrKr6prnlJgCLBEGHxkCgiqXKu1XTRbXttt+1wHVY6I3yzJb5afNA75D3DcVH8iQIRkE1uS/Ym/VYTSyO0iNQ5dUS+oobsrEUiU9PgakbCLbvGssCCHFZVU5JX/fFmc6VZb30SvOjG+AamgmfVI3LyJwJEQGBuDN+X9BFjUFLoQW+/fIHXRDrMN3qkdxXlMf+Ye9CtcTuiXWa9FS52rZX3y73+j7zviTn39gIRmF2Ve81aYkICGamn/Mc5qq15kyCaITkY8UZ42G6wt8fXmTYRulzlQ/FNwSPevst7hb4sWRSie6J3OSsiAjnlP5R5Khkm9R9v3+EATvun9beTW83NZrULTXvxk8E1+qti6DJGeSmHCMkN4b1JryiLjJrVT+hH5KQmQqQuzCEgtUwSD4PB4WHRhEhyqbLHxGQwKGJUapCrOuOlU6KqlqP6xNbkzuhWu8qF+CxkHs08JCrRW3GIwq/8Yni7i4mEHzzuPeDGxNsshwvLRszpb+nvx3eHdziLX/6IuSb/V2L89d4sX/eJHeXfDH+KMlrP5+4P/pYx3rHDwWzw1dz/1cMEshRvWPifbpN4M0AEZn3x96KrKZPLHMv+H/W0450+HJzI/UHmhyIQkVlR+i27W7xRIAK7YeE345XEyst8O/OHjLx5GOItgRHF4O+zfyeMAK/0ueR28UZ0RGA3Vn7D5IXVYf7L9vk3NmyRzo9IzcDiFQgkEolY8iKXJRf1eHY0/KxZIeLip6TJ/PhC92p3gaFEfaUvmLxwMvbuU4ftZQOQQEIEzJLgMUkL48yTkGWAEA9DAcimgNxlmnCHfMX7Qz7v+my08Gli/xl3OUBs2+znkjZhVJT5kjt8aQAScMTEzGOQVMgziSJPCZ85JjHk6SYiJCTCEKCJ0LSh8dOw5lKAvJHMny78ll3jkoVfaZn2TrhL6oha+NWkT8SK/F97h/UlZWApMs2rTDPLFGVcbQmJ9EuhUOmnqyc4QqYYZ4RxxiiTpAvudZYXmfGWv5DTTlq/+BlWqtrT0lNQf0rKHyzvoiL8/FcyL2qCi1gDgcQQMskg55ijnP5MvAFdqkKKmGWC00xRwdW0ablNslRgOHM/FmNWFn+5mqXVjdzh6iLU8KqFn6FMNrNP/zDBYrC1iLZ+CAkzDDHEDPEbBHAhQIKYBYYZZ5b4Ak/TaBQAmeMt/yR8UalcU74rIa47dVwvwEzxk06iMqfy33B1jxFU6l4bs8A4kKDfEoTmZSowlJlHsJqgAYZXrys/sOuLd4ioeG/HYV3nFKRi8fRIfiZaS6JLub8XYaPgNCBQRIwzxQzmbYPQDMcxyzCzNa2pLuKlMyH7NX8QafPlTwhxgaUFcW/5LlERmeBBNxQ33Bwj0FimGWPmLS6lSx8SKDPGeQxZPETTqcqFf5VSRpUd4Y1L45AJ1TMWpXtsDh0cC/6r2RtXZ2WY0UsGZ2+fbByTnCNO7V39Cd5LmR/i26T8IZGrWS0fH58AuT3aQyiT7Ndk3GTYUDhGqLxDIJYkYxhkAUfStLgSMg/KojBhb+lOl/5UVnU+EsW7LM7PHNenoNkkwyjxm1pO4i1FXBLDOUq1zGVRCRxyMniWgDjcK3R1snX1BaYnuopYaPWoWWZsHTNULnsgApmaS4VQLrt4oxOiIiKNfkM6JqkwyDpU06gM+rtqjw3iNaWt+pADdJWkjG50gTDeKXXILJvRaUpIzCVfKdEobFuxa75/bFWpK2kvBy5bERZQjCJDXfFmC1PZsfbhlmE1odEklwElZpJVy6CIcf9AeJt10W1+FQgIXK5yPTEqeETZZnkUL7moBBpFeWBqw1h3uW+hr1SIPIuzWOFwDkH6lUYpEs9kSrmR/GDHcNup1kFp5CWglDhP37JoLHg03IOKt9sBOQjaIkiuMWtIvCG5P256xByl18mGBQpHpXto99DVMxvKWYuwIrHGJhaJwsPHQwKGmIiEJM3VY3/hCrfZEURtwz0vr9kvT2kXvw6UGTzWNK0KfcY/Vt5hg2hvZtChYwThLhyed0glrmGYCZPkXkcO5KauPXfTxBXFgjUiMuUEQZ4OeuhhDSsokE+jNUdIiXmmGOE1Rpm0xQgkkTjfN77+xLvbzq57pnN/ZtJcFMo4WQrYhhF4+ys7icNtgedi7ePaS1tcLK1+vvFjhuJFgmuJJu4evnVoz1x3YmVsSzFZ1rCRK1lHFy14uFpRwtVSrKqSJ5SY4jTHOcFrLo69uCLKG0Y2FT6w9qX+HwQnLrbQJsg0aIpDvqRmTM50m355WkuijaZdGG+Is41AJi5oNCWaUv/pOwZvXCi4yJZjfLrZzg1soA1Bklr6Rr5KNFxl6GM9d7LAGZ7jRYZdFGkWModvOXV9z6HV+1qOK+JlKyBhjvaGzEVMq1eS65DJNcFpbYi3CYHWR2Wdoium09saH+Yxs/Xoe1/bUs6p0JYs3dzMDvppxxKnoaWr2TuJqmODLaZK9dTCHo+tbGOBQQ7xFEPWlSvi1HXnruk8s/kHK5+QrvntIfMUGtOtF+LdziRXeQ9qoZMNJELIY/X3JcxdAMTC6mP3vHJ9WcvQlC3ruIOb6MKSUKkbflVuPo4FppkjxOHTQhsdaOKatByOEPDYwlbexwEe46hLKlIMbxzb1H9L/zdzJ5ptaJmWhuWlz8qK0UkPbTrpMJ0gZ9WZugCMhSYLoXDZsz994s6FVlUhSbiK97OTFkJKDVRClTH0meJHHOIsU0QpB+lToJ+t3EgPpmHhOCIEOe5gL4d5iBddEkpevXLkd9Y9v+4BPZo0QJmirR7KFPN02BbXr20POZyYdQuu9uGFBt8h8Bnf9tInRvpF6MoVruD93EyOkFJKmNa/KMMk/8UPGEmrVDKNDiImGON5vs0e3scGYgyizhRYKsAudrCf7/AyhKE4estrV1/19a4fyQaZQPsSkJI4LzrRZkDbPjRGzSxpiGOsTjs0ru3o+4+8qyxlKWYNP82dFKhQRiKaaAMNPMLXGcZDYVFkyNT8SBmHYo6HeYL38GFaiJYZgwqC69nJ03yDM06V5vPPfnrd7q3/rofiGpQSrTXNE6gRu91Zs1rblVgp9cjSr84T1ngoj5HbDt071ikrSZLnA3yATkJK6Sw30gM+8/wjjyMJMLQzQC+t+DWWZZ5RBjmPR8Q3OcRn2Ey5SbMsjgqSW9nFD/kWo0k2ObtjfMumh3u/JdwilDnaUigCNRgjrO1SH/2gaRNKP8mrFosjYRJHngqQC4597NmfLXoyStjBF7kLTZgWjlOyOX2cwGeCP+F5fCwtXMctrKcVH0WV7wgo0MtGVlBkAZ9xnqOPgbRATY3Vr34Xo9nKHoqcRiRGjm0P1606nis7DIIISSZ9s80le51xnvrZm82ATIL/lBPVh5QoY8mTEK5+/vMn9oqKsZqP8t9YTaXmWUVTtu0xxx9xlAyWddzJemRagRe1zzssglVcAYyhCDnABnobzMoSKEtMgRvp5ARFp+Pp/qldnSOFsQhwaPI1uim+1bbLWfVLxwXZx3IvePj4TBMRY2hn7JqnvjDap8oRV/B57sIQNbysnhCVwH3sJ4NhFzeTIUHV0jBXy/u91IL1kmMQQZlXuCmd2/onLgFyXMluxhhEJ+WWwRtby62nHQ4fTRaNwivJYZLsg+pTJQ7qswqJYIwxckQIJu544tdKWRE53stnWUt4Afu0dBXwCA/gY7iaG9IizrMM0ltTZYdkmtOsQOCwdJFlCMk0ZXankhN1z136N6admwg4Tmwt53YKr+uEsz4JEdnqRI2q59WY+iQGiUYwwSSQxYhXPvr8zyeJNQGf5uNo4tq819PUi1B8zvGXRFg2cwsWh2KB7zPGRnI1k+DxNM+wjlYs4FiNZgifkwywoW55LVF0IjUACZJr2chx5hxmeHuybuXhbChI8FGIlHurmeiYUpWj8I/+6pEPUYltF7/LeymTNIQdi1Spq+mHZB+TQDu7a4tumoSE8Tp7FHIex0j6QkfMVtZicDxAsY4NWExmHa4ufC1xLb/HFirOK76y45nfsJ0y9UJN3K/FVgeln/rskTtkKXSb+X12plzu0gtEXfhXXf2aYZ7Cw7GdQq1JZgSLZbQGXDHFPJphkho4xU58PE5ysEbBuTppNBr4Ej38PrdTQZWGNz3+W0mbSg1IDYhIXb7AE0c/dWy3LJa5it+hl9LyIjLUOoYsDovmBSYRdLKh1tETMwzIOo8keY0QyRTFmtQSulKZPJEOxzXX4Bv+D8ny69xKCV0+2/ej33BtutaVBFLgp68SwcHPnPgpWQq5jd+lg0qd8EQNgq1REiKNSJ9HYrmCTK1NaYoJ2mllimlUGoIPVSlypuu4GcEVKDyOcx5dN02iLn6zdYYlRvPr3EuCqJy54rHfLnV7tVRb1riSYP/njt4syxVu5/O0EtUZwyVh07QyFROcRRPQV9MZxRAlNtBLhZHUHc4zRY51xIykMiuhsHSQQzDNq+i697mGJbykjQKD4lf4GDGqcnbdt78w3s4ir1W17KH4zi8duU4XDe/li2QQBPh4eAT46fXilawTuWKEWRx5WhYpBgzDaHrpBMawCCSzVGjnKhSjJPi8wHdIgDztWBLO1EGoRm1B+r4Av3Z6+GjgF/k4Mbp8rv+Hn410ddHr6jwc/OCxW72ioYc7OUOSLoh6RV+cL0VPHRDJFBZJjnxKo0kmmaBAO+AzQ4RGMELESlbRzhQlBGeYZITNONo5B4zV/I3AoRlloRYYyrrfVRebZg+HOIpfPnXV47/8ri9jHHoezau3PfFhXXYIJvjfNNcMbfqQxcf8DD9XU3jBHKAo8hQOR0IXgiID+LTSwixzrCRmGE0XPqs4wiQVZpE8zzBwHgEUU08iAJ+nuZ9KWhGpsi5LS60KtVo9cKjyi3e0FXd+LXa6gpbH7jJOO4etZXpL8VGjF3dEPMeHa20XYHBI5nguNYZ52oAuFFk6eYVRulMZrcTSw8u8hsaS5zWGgICgptIuXa4vcp58LZMUtbG4OiPj8KqOuXzophUPiJJuw3dd505vUKHDY206QNFEmi7ZLckH8es8sY/A0MW1GCQlnmQCj9VYBKs5yXkE5ymxhjwJK8lzFihwO3OAZIhBSDVvMc1+T7q0qPNdSyGSS2GMUURgvVUTnZFBK3C7vz66YbBbhRF7+EiaIzTKZWkeFF6aqFZ/UwAsit7UwhznNdroxKBYTcB5yoxi6UKSkKOVCRz99NCLw2MWCxQagKznf9Ucp6sLVpZ0M8Mz/A1gdNvC7f+yJkmQkJCduvVL7fM2EHyDH9FOgIeX2gmdXvn4VCnoxnx7JR6CEhUSYgTrqbCaLA5DG63MM8IEAd1Uo9YOQLAGiIgJmUECqxrcYYJMbVTVbnl4eGh0etXGMf6GIkZnxd1/13ZynjLSw0ewZuh9f9E24QLDfXwzTWItNnWBBodJv7cNlVfDatqABWbTlHYt29mUBoY+q4g5yCyttKVGpAfwWYOF1EGCx/q6xerqYoilMbj0yhHwJH/CAkLn7U/9Q9+BuJocVKsLPitPbP+jVeddYPgHvpJWDanRA41KvyR0wyrWYYgYQgGWDHfSVbNB3QjOEdNBgAUsq5C0siIFMkURWMUGkmXlDHEBcyPJ8D3+lFmU12buvr//x2Fj0OiI8Udu/ZPe8yII+A/+GluDUp/jiSbyx6LZiUNxlnItO7c1w92Bh8DRlb7I0MLVXFWL7k5jMWyiI5XhUnrlGiIIkfowzTf5GwzKyxXv/LM1+8MLdT4Y/NE7/7h7NMlkeJS/pJwWiV0DDLHUjpya49104pjh1DLe3pEng8FjdR34PWwiATTjDKFQ7L1AaNpcbHJ4KL7GPyHAbwm3/1n+5eRiLRyGzPk9f9o/kuR8fsgfMJjmYMt7Q13dPd3cQojkZWaboFgyrCChQEedDhhMOriDRMRsYQdRrThdX6iu92YZynyJf0PhMi0LO+/Ln2wsEqlPpXjKTJJHYReu3F/unFynk/M8S5bNKWmw5FHcMu/SzwHmiZlloKmypKlwkk0NyrxYlHieV1Bo/jt9mLo3iFrytTjTkgwH+XMO4JHkBk5u/1LulYgWssQE6Z1NQDwqZCvdzxXUuauFrbj9LHA1WZI6D7skSJdyhB208RSaWUqsbWr5yKHZUiMYlnjkl3kBRcwHeS/hRcumVc1Q7OM+JvCFy17z5M773KTFXBiIoMw4LXiE+IT0HFmx8Nq22PPMYY4xkFaL6oPqemtiGGCM42QZp0gPfk3dq9FC0BC/KRRHeA5FyBY+g6iZdNcARaSymOIf+A8cUmt/23d2fCWJK4g6INVKvPoFHI4YQ4zEJyQgRrLi9JpTs1vmOoJ4lCcRbEqHJ5toiEUq4hqGOIPPOCN00NagKfWZtUeJZzmIJKGbL9KVUhs0kEJVaft4PM2f8yI+ZAoLN/5j7z6wRCmQHAJFDoVG/WLN9bSlViggRmPIj/c/F7VNrpcY+yKv0kNXbSOMaHqtJWAXo5zBo8gZihQI8FANXVuCCid4kmE0Iev4H2xYVvhehKTJMMFX+QrzeJJM//Fd93W8HKNT9j5OnaxIbavYlwYFIQqfEqO0UyKTBuWG6ZsPfGx8ha4kLsP1fJi1RDXFdXWLzOGT8AD/ToSPIaCdVaykBS/lQWY5zyQLSCzwHj5GB2GdyRB1HFjAPN9lH1MEmEyhsuWB3n3CKAwZDPNYcmwAyigK1fuWgFT51DEklRRITEgBsergPUf3RlYkFVbyIe6krQZG1PErDonPAb7OERxeClSiEDhMGiMkGPq5hzvSnzX7C0lAVGXjUUglgoEj277mvRqh8FIgM+TYhEeZ8MJAqhZgjhmyNSAZcsSM7d7/82OrVQUbsZZ3cxvtdXR2vW8JCHmBx3iZEqKBOLVYfNZzC7fTTiU16q7BqflEHOC7HAa0sNm26S0PDHxf2jK2BiRC0k+AI7w4kOqszKUMShVIhjIa23bi7mO3zee90NqIAe7mBlbgiNPBLBHWkoCY0xzlJCPMkuDwaGEV69nKFrLEDV6lep+HpsQLPMxRDFokQdZseXbtf7jzOq0BVIH4BKxON5a9LpDFfodqV84iEFDA5Jrzd5+8bqEgKok19HATe9hAloSkKUEWeGgMZYokODQ5cmgscS11WyIwPGJe4yA/5iQOTxDkop6XNjy66kiZIpk6IAkr6Ko5gQsAMYTIlOGqzmqR+TqJVDnYMisZ7znz7rPXz7XK0JmIgE3s4Vr68DFNcKpeWtX0yC4rWSs0ljEO8yzHmEcipQuy4cBLmx8unCjjE1GuAZEoVtJZZ6hDIhQt1ad9r1rO2jXz0cwTrd91DcXgiSYgJdopYwm6T972yt65ThfLJHEJBTayi22sIZ/GUq7OeTaaVZFqjiBknGPs5zhTgIfQzi/M9z53xfdzZ6reItMEZBVddV5JUN4x95HM8x3fwYKeB4Qq3pusq3TlDsjhpTkLKBAub28mpjB6xb/1PDJ109kbxvqsF0SxOcwhcvSwgU300kVrqm+2ITq1OAxlJhnkFK/yGrMYNL40vnStI3371z8hRjzilEFpfG87LSw0NKMUfy5ZV+qRz4jzoBUgrJyToQzMlXK4Pq5tIeRCex0tMWp67X/275vZcv7mM1fOd1opTZicssd5mCxtrGI1HXSQJ4MHWEJKTDPJOBNMUcKikEJqtCI/s+pA95OZo0HoU74ACLBkWVEXUwDYNXalKMs5URKAloBwwaFkOybe7j3W+IAVTF6kH8WR4EzLke4jPS3TV8xumtk811tqQQiTmEk77l5OvcJiS79ZKsMJKbRyWpEt5c+uPN15suWYmHaEy7LEJRgePXhNgWe4x2WE84/peUcqEQgOVypORlf63XK0kZprbWJVlstGLLQd7D6oMF2Ta2c2zm6Y7yzl48BKh3PWCcA5ISQIIQTS6Thbyo93nl1xsvBqMppxgihlmi9+rEQ0rw2vsotEquxBv77JX4x5I1G/aTF7vW82zkoGSfESewksMQ5vrGNs9XMecctEe7S6snqmNWmVLVPOynxQLrcKUcqU9FRuvGUyM+mms2gMEcmyTu/lRxv5ZflMsi3pw6hJebiaYKU9jcTeM+F64nCn/6BLGudc0bqsM+XCgKqtMmIhWMifU6xA0s4LWLZxmK1U0MRpmS8k4fJ2a9k0OGxsfhIivtOB9k65YlPfr3xazmFNX3i1wTS1p2Zo4fJ2kSxm3Waxmzi1VDb93tS2jV/eYemgs24LzeJp10RXEgsyT1b5Lr3Y9+uTmco+KwJHdI8OltrMF89C6j/f2cPQwQAanZY4Fok6j+RufDx9TB5a5Nvk0rx7D8kZTLIxvl0u630WFCgg3kEwDkeetnTzTFh3RpSvLO8lEgQPxWZxt0Ld0lPj2e+7gKh8t2l3NJ6WmDw9ZFLm/CcPIkMnBVy6cBpOHX7YSudljmUPLRWBZL3Ict/T58GsiD7uNd2s0iA8z0ryTeW3t/ewOALytJHDYUmaphTCu6KriGWS+Xr9nMp6tXYLuQfxqJRuWtgbE9XOSmr8qjTZSvJk0W87GJv6rRV0piCqltCgahPqYdaWPuBCF2Qf807StDGkTqTBj5KdpeuIwk8EQ96QS2uprknsgnYkFSqETdTmm11KFkULeTrxSOpymyrAqBaX28L855OcwB/KfKORoJPNK7Dwz96EkDY//znX6aGxF9QJhyBDgRVkyKWbiN2bAlB90ir6WEsn6gLGWdSMeaJmfyleLRJlW/5FNm3VkZb608BEy5clIkl6Fz6X5OPXGZ6rDWINq8ijUxL79XYXLhkPh8CjlRV0sYZOvEvcF+IofTy8QZRFJv+tzMuqziB7eGhv2U3+S+7fZz8uK+EmvpD9i0v99QWHQ6HJYilgcUTkcSkJ6lIHuJihqLRg1EIOD41IffblOMnKJ8P3UnYtLY9mvm1o3rmo7QWGlnkoDsofoVzZar+Yv19Mu8swl6S0vyLDCgwxFTQJ1yLIcw2t5DDo1EiIukzfXc4mDC/+RPQuUZaZzI/z/3Shz6tPYJedDv8YmWirqJhuc616lemlvxxja2Smq2WAtm7/rUgLyhJHHkcHrQha8GhJE19byx8bm8xkWq5wdY2BaZ29UPlcvFdUyAVPtd+vjWD5KS82x/mvFh4mK6K4q/Tb5nbxppxatUcySZeWfVMGQWA3VX4v3EFZZjNP6b91F3mEvvhAsv9sJyo/a631y58WV+r/50rveLCFwPx0eI/xCUUm2Of/S3LRsEK/3px6+8RY5ddMC5XyLWog+KbeL95REHZd/OHkWhc6oUXmK+rR15On+oXX2eZlkKPBIdFpeoldwdxgNogKoxfXEdLeOpmG8l66LdKm3LppKkhcXEeAtcm94c8nfS4i450ufFk9XW0l8S+iDfpSK10P5f5Y3Vr5iFtBGF0b7dCHvX3ysHM/UUn0xHckt7kcFeEEwQPegyq61G4sfWmlBf147mjy0Wg3jijeFl+tT3qPqxfEwtsPQSi7Lbox3mlyInSJDPyjwb9y+nK8jL48CyQm/PvV5vie6CqLSMxGuyUe9w/ynDgpYvf2QMD1mWui681aK0UkEhHoU9nv6WeMvbzni4cu+mhHhENDLfbVxNeG7zGbbOAS4aRHIobVUXHUndXT0tqUIrVphBaSpYJGkGCR5InSLrh6f+MQedljrjTbzYDNkTiHJxI9GDzmnvSTRaPtY7AIDPla0+CbkEidxrzIi3bA7Q532T6LcLbHDLh3i/lkSg3LswyqcWaJGpuRaaoMCsApWl2nWWMHzIDrpN35aY+I0hPeAX+/OyFM/Ia8zhuSiEpdm0fs2e3JdXazW+WkM84ihBJCJJTkvJzR027WzcWzQRLNaiNIcAiRbUsUBdtmC7aDdtfhctYHDFYIFMhZedZ7Vh6QC9X+owS/FsS8zRKpMwCxfCF4QeTd5mhrstb2kkdgnXNZmxXdsXQCgS07YaNa6DIvF3uVrBNWOBDGKZSM1YR6VR0SJ5iWb9L/v6W/QecQRXXAOyDxukyvvcL0uy5bcDmkE1YIJ3B1IZAD4cAhhBQWIUIxLybFoDekzorzRKIWIb+54y3/VUAHiDE1pg4olG9aXScrbavJuUBmjSbrVOpAnSsJR0WGosg0U2pKztiKSQfw1i3f/x8A9fEZmB+In7MAAAAASUVORK5CYII='
    logo = Image.open(io.BytesIO(base64.b64decode(logo_data)))
    # 调整logo大小
    logo = logo.resize((100, 100), Image.LANCZOS)
except Exception as e:
    print(f"无法加载logo: {e}")


def chart_setup():
  global fig, ax, line, peakpts

  plt.ion()
  fig = plt.figure()
  ax = fig.add_subplot(1,1,1) # 1st 1x1 subplot
  ax.set(ylim=(185, 200))
  line, = ax.plot(hr_times, hr_values, 'b-')
  peakpts, = ax.plot([], [], 'r+')


def decrement_tells(tells):
  for key, tell in tells.copy().items():
    if 'ttl' in tell:
      tell['ttl'] -= 1
      if tell['ttl'] <= 0:
        del tells[key]
  return tells


def main():
  global TELL_MAX_TTL
  global recording
  global DRAW_LANDMARKS, FLIP, RECORD

  parser = argparse.ArgumentParser()
  parser.add_argument('--input', '-i', nargs='*', help='Input video device (number or path), file, or screen dimensions (x y width height), defaults to 0', default=['0'])
  parser.add_argument('--landmarks', '-l', help='Set to any value to draw face and hand landmarks')
  parser.add_argument('--ttl', '-t', help='How many frames for each displayed "tell" to last, defaults to 30', default='30')
  parser.add_argument('--record', '-r', help='Set to any value to save a timestamped AVI in current directory')
  parser.add_argument('--second', '-s', help='Secondary video input device (number or path)')
  args = parser.parse_args()

  if len(args.input) == 1:
    INPUT = int(args.input[0]) if args.input[0].isdigit() else args.input[0]
  elif len(args.input) != 4:
    return print("Wrong number of values for 'input' argument; should be 0, 1, or 4.")

  DRAW_LANDMARKS = args.landmarks is not None
  BPM_CHART = False
  FLIP = False
  if args.ttl and args.ttl.isdigit():
    TELL_MAX_TTL = int(args.ttl)
  RECORD = args.record is not None

  SECOND = int(args.second) if (args.second or "").isdigit() else args.second

  if BPM_CHART:
    chart_setup()

  if SECOND:
    cap2 = cv2.VideoCapture(SECOND)

  calibrated = False
  calibration_frames = 0
  with mp.solutions.face_mesh.FaceMesh(
      max_num_faces=1,
      refine_landmarks=True,
      min_detection_confidence=0.5,
      min_tracking_confidence=0.5) as face_mesh:
    with mp.solutions.hands.Hands(
        max_num_hands=2,
        min_detection_confidence=0.7) as hands:
      if len(args.input) == 4:
        screen = {
          "top": int(args.input[0]),
          "left": int(args.input[1]),
          "width": int(args.input[2]),
          "height": int(args.input[3])
        }
        with mss.mss() as sct: # screenshot
          while True:
            image = np.array(sct.grab(screen))[:, :, :3] # remove alpha channel
            image = np.ascontiguousarray(image)  # 确保数组内存连续，解决OpenCV兼容性问题

            #image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            calibration_frames += process(image, face_mesh, hands, calibrated, DRAW_LANDMARKS, BPM_CHART, FLIP)
            calibrated = (calibration_frames >= MAX_FRAMES)
            if SECOND:
              process_second(cap2, image, face_mesh, hands)
            cv2.imshow('face', image)
            if RECORD:
              recording.write(image)
            if cv2.waitKey(1) & 0xFF == ord('q'):
              break
      else:
        cap = cv2.VideoCapture(INPUT)
        fps = None
        if isinstance(INPUT, str) and INPUT.find('.') > -1: # from file
          fps = cap.get(cv2.CAP_PROP_FPS)
          print("FPS:", fps)
          # cap.set(cv2.CAP_PROP_BUFFERSIZE, 10)
        else: # from device
          cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
          cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
          cap.set(cv2.CAP_PROP_FPS, 30)

        if RECORD:
          RECORDING_FILENAME = str(datetime.now()).replace('.','').replace(':','') + '.avi'
          FPS_OUT = 10
          FRAME_SIZE = (int(cap.get(3)), int(cap.get(4)))
          recording = cv2.VideoWriter(
            RECORDING_FILENAME, cv2.VideoWriter_fourcc(*'MJPG'), FPS_OUT, FRAME_SIZE)

        while cap.isOpened():
          success, image = cap.read()
          if not success: break
          calibration_frames += process(image, face_mesh, hands, calibrated, DRAW_LANDMARKS, BPM_CHART, FLIP, fps)
          calibrated = (calibration_frames >= MAX_FRAMES)
          if SECOND:
            process_second(cap2, image, face_mesh, hands)
          cv2.imshow('face', image)
          if RECORD:
            recording.write(image)
          if cv2.waitKey(1) & 0xFF == ord('q'):
            break

        cap.release()
        if SECOND:
          cap2.release()
        if RECORD:
          recording.release()
  cv2.destroyAllWindows()


def new_tell(result):
  global TELL_MAX_TTL

  return {
    'text': result,
    'ttl': TELL_MAX_TTL
  }


def draw_on_frame(image, face_landmarks, hands_landmarks):
  mp.solutions.drawing_utils.draw_landmarks(
      image,
      face_landmarks,
      mp.solutions.face_mesh.FACEMESH_CONTOURS,
      landmark_drawing_spec=None,
      connection_drawing_spec=mp.solutions.drawing_styles
      .get_default_face_mesh_contours_style())
  mp.solutions.drawing_utils.draw_landmarks(
      image,
      face_landmarks,
      mp.solutions.face_mesh.FACEMESH_IRISES,
      landmark_drawing_spec=None,
      connection_drawing_spec=mp.solutions.drawing_styles
      .get_default_face_mesh_iris_connections_style())
  for hand_landmarks in (hands_landmarks or []):
    mp.solutions.drawing_utils.draw_landmarks(
        image,
        hand_landmarks,
        mp.solutions.hands.HAND_CONNECTIONS,
        mp.solutions.drawing_styles.get_default_hand_landmarks_style(),
        mp.solutions.drawing_styles.get_default_hand_connections_style())


def draw_bpm_histogram(image, bpm_history, width=200, height=160, bar_color=(0, 0, 255)):
    """
    在image左上角画心率历史趋势条形图
    :param image: OpenCV BGR图像
    :param bpm_history: BPM历史列表
    :param width, height: 区域宽高
    :param bar_color: 条形颜色
    """
    x = 10
    y = round(image.shape[0]) - 210  # 向上调整位置
    
    if not bpm_history or max(bpm_history) == 0:
        return
    
    n = len(bpm_history)
    bar_w = max(2, width // max(n, 40))  # 确保至少有2像素宽
    
    # 过滤并确定心率范围
    valid_bpms = [bpm for bpm in bpm_history if bpm > 0]
    if not valid_bpms:
        min_bpm, max_bpm = 60, 120
    else:
        min_bpm = min(valid_bpms)
        max_bpm = max(valid_bpms)
    
    # 确保合理的显示范围
    min_bpm = min(min_bpm, 50)
    max_bpm = max(max_bpm, 130)
    
    # 绘制加粗的条形，无背景和坐标轴
    for i, bpm in enumerate(bpm_history):
        if bpm <= 0:
            continue
        
        bar_h = int((bpm - min_bpm) / (max_bpm - min_bpm + 1e-5) * (height-15))
        
        x1 = x + i * bar_w
        y1 = y + height - bar_h
        x2 = x1 + bar_w - 1
        y2 = y + height - 1
        
        cv2.rectangle(image, (x1, y1), (x2, y2), bar_color, -1)
        cv2.rectangle(image, (x1, y1), (x2, y2), (0, 0, 150), 1)  # 深色边框增强可见性
    
    # 使用PIL处理中文显示问题，增加字体大小和描边效果
    try:
        from PIL import ImageFont, ImageDraw, Image
        # 尝试加载系统中常见的中文字体
        font_path = None
        for path in [
            "simhei.ttf",  # 当前目录
            "msyh.ttc",    # 当前目录
            "C:/Windows/Fonts/simhei.ttf",
            "C:/Windows/Fonts/msyh.ttc",
            "/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc",
            "/System/Library/Fonts/PingFang.ttc",
        ]:
            if os.path.exists(path):
                font_path = path
                break
        
        if font_path:
            pil_img = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
            draw = ImageDraw.Draw(pil_img)
            font = ImageFont.truetype(font_path, 24)  # 增大字体尺寸
            
            # 绘制带描边的文本
            def draw_stroked_text(x, y, text, font):
                # 先绘制黑色粗体描边
                draw.text((x, y), text, font=font, fill=(0, 0, 0), stroke_width=2, stroke_fill=(0, 0, 0))
                # 再绘制白色细体文本
                draw.text((x, y), text, font=font, fill=(255, 255, 255))
            
            # 标注“心率趋势”
            draw_stroked_text(x+5, y-30, "心率趋势", font)
            
            # 显示当前平均心率
            if valid_bpms:
                current_avg = sum(valid_bpms[-5:]) // min(5, len(valid_bpms))
                draw_stroked_text(x, y+height+10, f"当前心率: {current_avg} BPM", font)
            
            image[:] = cv2.cvtColor(np.array(pil_img), cv2.COLOR_RGB2BGR)
        else:
            # 如果找不到中文字体，使用默认字体显示英文
            cv2.putText(image, "Heart Rate", (x+5, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 0), 2)
            if valid_bpms:
                current_avg = sum(valid_bpms[-5:]) // min(5, len(valid_bpms))
                cv2.putText(image, f"Current: {current_avg} BPM", (x, y+height+20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 0), 2)
    except Exception as e:
        # 异常处理，使用默认字体
        cv2.putText(image, "Heart Rate", (x+5, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 0), 2)
        if valid_bpms:
            current_avg = sum(valid_bpms[-5:]) // min(5, len(valid_bpms))
            cv2.putText(image, f"Current: {current_avg} BPM", (x, y+height+20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 0), 2)

def write_log(message, log_file='app.log', log_level='INFO'):
    """
    将日志消息写入文本文件
    
    参数:
    message (str): 要记录的消息
    log_file (str): 日志文件路径，默认为 app.log
    log_level (str): 日志级别，默认为 INFO
    """
    # 获取当前时间戳
    timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    
    # 构建完整的日志行
    log_line = f"{timestamp} - {log_level} - {message}\n"
    
    try:
        # 追加模式打开文件，如果文件不存在则创建
        with open(log_file, 'a', encoding='utf-8') as f:
            f.write(log_line)
    except Exception as e:
        print(f"无法写入日志: {str(e)}")

old_mood_info=""
old_text_cn=""
 
def add_text(image, tells, calibrated):
    global mood,logo,old_mood_info,old_text_cn,all_emotions

    # 情绪翻译映射
    emotion_translations = {
        'angry': '生气',
        'disgust': '厌恶',
        'fear': '害怕',
        'happy': '开心',
        'sad': '悲伤',
        'surprise': '惊讶',
        'neutral': '中性'
    }



    # 在翻译字典中增加颜色信息 (BGR格式)
    emotion_translation = {
        "angry": {"text": "生气", "color": (255, 0, 0)},     # 红色 (BGR转RGB)
        "disgust": {"text": "厌恶", "color": (255, 100, 0)},  # 橙色
        "fear": {"text": "害怕", "color": (0, 0, 255)},      # 蓝色
        "happy": {"text": "开心", "color": (0, 255, 0)},      # 绿色
        "sad": {"text": "悲伤", "color": (0, 0, 139)},       # 深蓝色
        "surprise": {"text": "惊讶", "color": (255, 255, 0)}, # 黄色
        "neutral": {"text": "中性", "color": (192, 192, 192)}, # 灰色
        "Heart rate increasing": {"text": "心率加快", "color": (255, 0, 0)}, # 红色
        "Heart rate decreasing": {"text": "心率减慢", "color": (255, 165, 0)}, # 橙色
        "Change in gaze": {"text": "注视位置变化", "color": (255, 255, 0)}, # 黄色
        "Hand covering face": {"text": "手遮盖脸部", "color": (255, 255, 0)}, # 黄色
        "Finger movements": {"text": "手部动作频繁", "color": (255, 255, 0)}, # 黄色
        "Lip compression": {"text": "抿嘴", "color": (255, 0, 0)}, # 红色
        "Increased blinking": {"text": "眨眼增加", "color": (255, 255, 0)}, # 黄色
        "Decreased blinking": {"text": "眨眼减少", "color": (255, 165, 0)}, # 橙色
    }

    text_y = TEXT_HEIGHT
    if mood:
        # 获取情绪翻译和颜色
        mood_info = emotion_translation.get(mood, {"text": mood, "color": (255, 255, 255)})
        write(f"情绪分析: {mood_info['text']}", image, int(.80 * image.shape[1]), TEXT_HEIGHT, color=mood_info['color'])
        if old_mood_info != mood_info['text']:
            write_log(f"情绪分析: {mood_info['text']}", log_level='MOOD')
            old_mood_info=mood_info['text']
                # 可视化情绪置信度
            
        emotion_y = 150  # 起始Y坐标
        for emotion, score in all_emotions.items():
            # 翻译情绪名称
            chinese_name = emotion_translations.get(emotion, emotion)
            # 计算显示条长度 (最大10个字符)
            bar_length = int(score * 10)
            confidence_bar = '|' * bar_length
            # 显示文本
            text = f'{chinese_name}{confidence_bar} ({score:.2f})'
            # 使用中文字体显示
            
            write(f"{chinese_name}{confidence_bar}", image, int(.90 * image.shape[1]), emotion_y, color=(255, 255, 255), font_size=20)

            emotion_y += 30  # 垂直间距
            

    if calibrated:
        for tell in tells.values():
            text = tell['text']
            text_info = emotion_translation.get(text)
            
            if text_info:
                # 使用翻译和颜色
                text_cn = text_info['text']
                color = text_info['color']
            else:
                # 处理带冒号的文本
                if ':' in text:
                    prefix, suffix = text.split(':', 1)
                    suffix = suffix.strip()
                    suffix_info = emotion_translation.get(suffix)
                    
                    if suffix_info:
                        text_cn = f"{prefix}: {suffix_info['text']}"
                        color = suffix_info['color']
                    else:
                        text_cn = text
                        color = (255, 255, 255)  # 默认白色
                else:
                    text_cn = text
                    color = (255, 255, 255)  # 默认白色
            
            write(text_cn, image, 10, text_y, color=color)
            text_y += TEXT_HEIGHT
            
            if old_text_cn != text_cn:
                write_log(text_cn)
                old_text_cn=text_cn


            
    # 在左上角画心率趋势直方图
    draw_bpm_histogram(image, avg_bpms)
    # 在右下角添加logo
    if logo:
        try:
            # 转换为PIL图像
            pil_img = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
            
            # 计算logo位置（右下角）
            position = (pil_img.width - logo.width - 10, pil_img.height - logo.height - 10)
            
            # 确保logo有alpha通道
            if logo.mode != 'RGBA':
                logo = logo.convert('RGBA')
            
            # 创建一个透明的画布用于放置logo
            overlay = Image.new('RGBA', pil_img.size, (0, 0, 0, 0))
            overlay.paste(logo, position, logo)
            
            # 合并图像
            pil_img = Image.alpha_composite(pil_img.convert('RGBA'), overlay)
            
            # 转回OpenCV格式
            image[:] = cv2.cvtColor(np.array(pil_img.convert('RGB')), cv2.COLOR_RGB2BGR)
        except Exception as e:
            print(f"无法添加logo: {e}")


def write(text, image, x, y, color=(255, 255, 255), font_size=32):
    # 1. 转换为PIL的Image
    if image.shape[2] == 3:
        img_pil = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
    else:
        img_pil = Image.fromarray(image)
    draw = ImageDraw.Draw(img_pil)

    # 2. 选择字体
    font_path = None
    for path in [
        "simhei.ttf",  # 当前目录
        "msyh.ttc",    # 当前目录
        "C:/Windows/Fonts/simhei.ttf",
        "C:/Windows/Fonts/msyh.ttc",
        "/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc",
        "/System/Library/Fonts/STHeiti Medium.ttc"
    ]:
        if os.path.exists(path):
            font_path = path
            break
    if font_path is None:
        raise RuntimeError("未找到合适的中文字体文件，请下载simhei.ttf放到脚本目录或自行指定路径。")

    font = ImageFont.truetype(font_path, int(font_size))  # 根据font_size计算字体大小

    # 3. 先画黑色粗体，再画指定颜色的细体（模拟描边）
    draw.text((x, y), text, font=font, fill=(0, 0, 0), stroke_width=2, stroke_fill=(0, 0, 0))
    draw.text((x, y), text, font=font, fill=color)

    # 4. 转回OpenCV格式
    image[:, :, :] = cv2.cvtColor(np.array(img_pil), cv2.COLOR_RGB2BGR)


def get_aspect_ratio(top, bottom, right, left):
  height = dist.euclidean([top.x, top.y], [bottom.x, bottom.y])
  width = dist.euclidean([right.x, right.y], [left.x, left.y])
  return height / width


def get_area(image, draw, topL, topR, bottomR, bottomL):
  topY = int((topR.y+topL.y)/2 * image.shape[0])
  botY = int((bottomR.y+bottomL.y)/2 * image.shape[0])
  leftX = int((topL.x+bottomL.x)/2 * image.shape[1])
  rightX = int((topR.x+bottomR.x)/2 * image.shape[1])

  if draw:
    image = cv2.circle(image, (leftX,topY), 2, (255,0,0), 2)
    image = cv2.circle(image, (leftX,botY), 2, (255,0,0), 2)
    image = cv2.circle(image, (rightX,topY), 2, (255,0,0), 2)
    image = cv2.circle(image, (rightX,botY), 2, (255,0,0), 2)

  return image[topY:botY, rightX:leftX]


def get_bpm_tells(cheekL, cheekR, fps, bpm_chart):
  global hr_times, hr_values, avg_bpms
  global ax, line, peakpts

  cheekLwithoutBlue = np.average(cheekL[:, :, 1:3]) if cheekL.size > 0 else 0
  cheekRwithoutBlue = np.average(cheekR[:, :, 1:3]) if cheekR.size > 0 else 0
  hr_values = hr_values[1:] + [cheekLwithoutBlue + cheekRwithoutBlue]

  if not fps:
    hr_times = hr_times[1:] + [time.time() - EPOCH]

  if bpm_chart:
    line.set_data(hr_times, hr_values)
    ax.relim()
    ax.autoscale()

  peaks, _ = find_peaks(hr_values,
    threshold=.1,
    distance=5,
    prominence=.5,
    wlen=10,
  )

  peak_times = [hr_times[i] for i in peaks]

  if bpm_chart:
    peakpts.set_data(peak_times, [hr_values[i] for i in peaks])

  bpms = 60 * np.diff(peak_times) / (fps or 1)
  bpms = bpms[(bpms > 50) & (bpms < 150)] # filter to reasonable BPM range
  recent_bpms = bpms[(-3 * RECENT_FRAMES):] # HR slower signal than other tells

  recent_avg_bpm = 0
  bpm_display = "心率值: 分析中..."
  if recent_bpms.size > 1:
    recent_avg_bpm = int(np.average(recent_bpms))
    bpm_display = "心率值: {} ({})".format(recent_avg_bpm, len(recent_bpms))

  avg_bpms = avg_bpms[1:] + [recent_avg_bpm]

  bpm_delta = 0
  bpm_change = ""

  if len(recent_bpms) > 2:
    all_bpms = list(filter(lambda bpm: bpm != '-', avg_bpms))
    all_avg_bpm = sum(all_bpms) / len(all_bpms)
    avg_recent_bpm = sum(recent_bpms) / len(recent_bpms)
    bpm_delta = avg_recent_bpm - all_avg_bpm

    if bpm_delta > SIGNIFICANT_BPM_CHANGE:
      bpm_change = "Heart rate increasing"
    elif bpm_delta < -SIGNIFICANT_BPM_CHANGE:
      bpm_change = "Heart rate decreasing"

  return bpm_display, bpm_change


def is_blinking(face):
  eyeR = [face[p] for p in [159, 145, 133, 33]]
  eyeR_ar = get_aspect_ratio(*eyeR)

  eyeL = [face[p] for p in [386, 374, 362, 263]]
  eyeL_ar = get_aspect_ratio(*eyeL)

  eyeA_ar = (eyeR_ar + eyeL_ar) / 2
  return eyeA_ar < EYE_BLINK_HEIGHT


def get_blink_tell(blinks):
  if sum(blinks[:RECENT_FRAMES]) < 3: # not enough blinks for valid comparison
    return None

  recent_closed = 1.0 * sum(blinks[-RECENT_FRAMES:]) / RECENT_FRAMES
  avg_closed = 1.0 * sum(blinks) / MAX_FRAMES

  if recent_closed > (20 * avg_closed):
    return "Increased blinking"
  elif avg_closed >  (20 * recent_closed):
    return "Decreased blinking"
  else:
    return None


def check_hand_on_face(hands_landmarks, face):
  if hands_landmarks:
    face_landmarks = [face[p] for p in FACEMESH_FACE_OVAL]
    face_points = [[[p.x, p.y] for p in face_landmarks]]
    face_contours = np.array(face_points).astype(np.single)

    for hand_landmarks in hands_landmarks:
      hand = []
      for point in hand_landmarks.landmark:
        hand.append( (point.x, point.y) )

      for finger in [4, 8, 20]:
        overlap = cv2.pointPolygonTest(face_contours, hand[finger], False)
        if overlap != -1:
          return True
  return False

# 初始化手指距离历史
finger_distances_history = []
FINGER_RELATIVE_MOVEMENT_THRESHOLD = 0.02  # 手指相对运动阈值，可调整

# 手指尖端关键点索引
FINGER_TIPS = [4, 8, 12, 16, 20]


def get_hand_movement_tell(hands_landmarks):
  global finger_distances_history
  
  if not hands_landmarks:
    # 没有检测到手，重置历史
    if len(finger_distances_history) > 0:
      finger_distances_history = []
    return None
  
  # 计算每只手的手指尖端之间的距离
  current_distances = []
  for hand_landmarks in hands_landmarks:
    hand_tips = []
    for tip_idx in FINGER_TIPS:
      if tip_idx < len(hand_landmarks.landmark):
        point = hand_landmarks.landmark[tip_idx]
        hand_tips.append( (point.x, point.y) )
    
    # 计算这只手的手指尖端之间的所有距离
    for i in range(len(hand_tips)):
      for j in range(i+1, len(hand_tips)):
        distance = dist.euclidean(hand_tips[i], hand_tips[j])
        current_distances.append(distance)
  
  # 如果没有足够的手指尖端来计算距离，返回None
  if len(current_distances) < 2:
    return None
  
  # 计算平均距离
  avg_distance = sum(current_distances) / len(current_distances)
  
  # 跟踪手指距离历史
  if len(finger_distances_history) > 0:
    # 计算与前一帧的距离变化
    prev_avg_distance = finger_distances_history[-1]
    distance_change = abs(avg_distance - prev_avg_distance)
    
    # 添加到历史
    finger_distances_history.append(avg_distance)
    
    # 只保留最近的帧
    if len(finger_distances_history) > RECENT_FRAMES:
      finger_distances_history.pop(0)
    
    # 计算平均距离变化
    total_change = 0
    for i in range(1, len(finger_distances_history)):
      total_change += abs(finger_distances_history[i] - finger_distances_history[i-1])
    
    avg_change = total_change / (len(finger_distances_history) - 1) if len(finger_distances_history) > 1 else 0
    
    # 检查是否超过阈值
    if avg_change > FINGER_RELATIVE_MOVEMENT_THRESHOLD:
      return "Finger movements"
  else:
    # 首次检测到手指，初始化历史
    finger_distances_history.append(avg_distance)
  
  return None


def get_avg_gaze(face):
  gaze_left = get_gaze(face, 476, 474, 263, 362)
  gaze_right = get_gaze(face, 471, 469, 33, 133)
  return round((gaze_left + gaze_right) / 2, 1)


def get_gaze(face, iris_L_side, iris_R_side, eye_L_corner, eye_R_corner):
  iris = (
    face[iris_L_side].x + face[iris_R_side].x,
    face[iris_L_side].y + face[iris_R_side].y,
  )
  eye_center = (
    face[eye_L_corner].x + face[eye_R_corner].x,
    face[eye_L_corner].y + face[eye_R_corner].y,
  )

  gaze_dist = dist.euclidean(iris, eye_center)
  eye_width = abs(face[eye_R_corner].x - face[eye_L_corner].x)
  gaze_relative = gaze_dist / eye_width

  if (eye_center[0] - iris[0]) < 0: # flip along x for looking L vs R
    gaze_relative *= -1

  return gaze_relative


def detect_gaze_change(avg_gaze):
  global gaze_values

  gaze_values = gaze_values[1:] + [avg_gaze]
  gaze_relative_matches = 1.0 * gaze_values.count(avg_gaze) / MAX_FRAMES
  if gaze_relative_matches < .01: # looking in a new direction
    return gaze_relative_matches
  return 0


def get_lip_ratio(face):
  return get_aspect_ratio(face[0], face[17], face[61], face[291])

all_emotions = {}
def get_mood(image):
  global emotion_detector, calculating_mood, mood, all_emotions

  detected_mood, score = emotion_detector.top_emotion(image)
  calculating_mood = False
  if score and (score > .4 or detected_mood == 'neutral'):
    mood = detected_mood
  # Get all emotions using FER's detect_emotions method
  emotion_results = emotion_detector.detect_emotions(image)
  all_emotions = emotion_results[0]['emotions'] if emotion_results else {}
  # Handle case with no detected faces
  if not all_emotions:
      all_emotions = {'angry': 0.0, 'disgust': 0.0, 'fear': 0.0, 'happy': 0.0, 'sad': 0.0, 'surprise': 0.0, 'neutral': 0.0} 


def add_truth_meter(image, tell_count):
  width = image.shape[1]
  sm = int(width / 64)
  bg = int(width / 3.2)

  resized_meter = cv2.resize(meter, (bg,sm), interpolation=cv2.INTER_AREA)
  image[sm:(sm+sm), bg:(bg+bg), 0:3] = resized_meter[:, :, 0:3]

  if tell_count:
    tellX = bg + int(bg/4) * (tell_count - 1) # adjust for always-on BPM
    cv2.rectangle(image, (tellX, int(.9*sm)), (tellX+int(sm/2), int(2.1*sm)), (0,0,0), 2)


def get_face_relative_area(face):
  face_width = abs(max(face[454].x, 0) - max(face[234].x, 0))
  face_height = abs(max(face[152].y, 0) - max(face[10].y, 0))
  return face_width * face_height


def find_face_and_hands(image_original, face_mesh, hands):
  image = image_original.copy()
  image.flags.writeable = False # pass by reference to improve speed
  image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

  faces = face_mesh.process(image)
  hands_landmarks = hands.process(image).multi_hand_landmarks

  face_landmarks = None
  if faces.multi_face_landmarks and len(faces.multi_face_landmarks) > 0:
    face_landmarks = faces.multi_face_landmarks[0] # use first face found

  return face_landmarks, hands_landmarks


def process(image, face_mesh, hands, calibrated=False, draw=False, bpm_chart=False, flip=False, fps=None):
  global tells, calculating_mood
  global blinks, hand_on_face, face_area_size

  tells = decrement_tells(tells)

  face_landmarks, hands_landmarks = find_face_and_hands(image, face_mesh, hands)
  if face_landmarks:
    face = face_landmarks.landmark
    face_area_size = get_face_relative_area(face)

    if not calculating_mood:
      emothread = threading.Thread(target=get_mood, args=(image,))
      emothread.start()
      calculating_mood = True

    # TODO check cheek visibility?
    cheekL = get_area(image, draw, topL=face[449], topR=face[350], bottomR=face[429], bottomL=face[280])
    cheekR = get_area(image, draw, topL=face[121], topR=face[229], bottomR=face[50], bottomL=face[209])

    avg_bpms, bpm_change = get_bpm_tells(cheekL, cheekR, fps, bpm_chart)
    #tells['avg_bpms'] = new_tell(avg_bpms) # always show "..." if BPM missing
    if len(bpm_change):
      tells['bpm_change'] = new_tell(bpm_change)

    # Blinking
    blinks = blinks[1:] + [is_blinking(face)]
    recent_blink_tell = get_blink_tell(blinks)
    if recent_blink_tell:
      tells['blinking'] = new_tell(recent_blink_tell)

    # Hands on face
    recent_hand_on_face = check_hand_on_face(hands_landmarks, face)
    hand_on_face = hand_on_face[1:] + [recent_hand_on_face]
    if recent_hand_on_face:
      tells['hand'] = new_tell("Hand covering face")

    # Hand movement detection
    hand_movement_tell = get_hand_movement_tell(hands_landmarks)
    if hand_movement_tell:
      tells['hand_movement'] = new_tell(hand_movement_tell)

    # Gaze tracking
    avg_gaze = get_avg_gaze(face)
    if detect_gaze_change(avg_gaze):
      tells['gaze'] = new_tell("Change in gaze")

    # Lip compression
    if get_lip_ratio(face) < LIP_COMPRESSION_RATIO:
      tells['lips'] = new_tell("Lip compression")

    if bpm_chart: # update chart
      fig.canvas.draw()
      fig.canvas.flush_events()

    if draw: # overlay face and hand landmarks
      draw_on_frame(image, face_landmarks, hands_landmarks)

  if flip:
    image = cv2.flip(image, 1) # flip image horizontally

  add_text(image, tells, calibrated)
  add_truth_meter(image, len(tells))

  return 1 if (face_landmarks and not calibrated) else 0


def mirror_compare(first, second, rate, less, more):
  if (rate * first) < second:
    return less
  elif first > (rate * second):
    return more
  return None

def get_blink_comparison(blinks1, blinks2):
  return mirror_compare(sum(blinks1), sum(blinks2), 1.8, "Blink less", "Blink more")

def get_hand_face_comparison(hand1, hand2):
  return mirror_compare(sum(hand1), sum(hand2), 2.1, "Stop touching face", "Touch face more")

def get_face_size_comparison(ratio1, ratio2):
  return mirror_compare(ratio1, ratio2, 1.5, "Too close", "Too far")


# process optional second input for mirroring
def process_second(cap, image, face_mesh, hands):
  global blinks, blinks2
  global hand_on_face, hand_on_face2
  global face_area_size

  success2, image2 = cap.read()
  if success2:
    face_landmarks2, hands_landmarks2 = find_face_and_hands(image2, face_mesh, hands)

    if face_landmarks2:
      face2 = face_landmarks2.landmark

      blinks2 = blinks2[1:] + [is_blinking(face2)]
      blink_mirror = get_blink_comparison(blinks, blinks2)

      hand_on_face2 = hand_on_face2[1:] + [check_hand_on_face(hands_landmarks2, face2)]
      hand_face_mirror = get_hand_face_comparison(hand_on_face, hand_on_face2)

      face_area_size2 = get_face_relative_area(face2)
      face_ratio_mirror = get_face_size_comparison(face_area_size, face_area_size2)

      text_y = 2 * TEXT_HEIGHT # show prompts below 'mood' on right side
      for comparison in [blink_mirror, hand_face_mirror, face_ratio_mirror]:
        if comparison:
          write(comparison, image, int(.75 * image.shape[1]), text_y)
          text_y += TEXT_HEIGHT

if __name__ == '__main__':
    write_log("测谎开始", log_level='START')
    main()