import cv2
from PyQt5 import QtCore
from PyQt5.QtCore import Qt, QTimer
from PyQt5.QtGui import QPixmap, QImage
from PyQt5.QtWidgets import (QPushButton, QVBoxLayout, QWidget, QHBoxLayout,
                             QGroupBox, QDialog, QApplication, QLabel, QTextEdit)
import sys
from ultralytics import YOLO
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import time  # 添加这行导入

class UI(QDialog):
    def __init__(self):
        super().__init__()
        self.setWindowTitle('表情识别')
        self.resize(800, 600)
        self.setStyleSheet("""
            color: #333333;
            font-size: 10pt;
            font-family: "黑体";
        """)
        self.cam_img = None

        self.text_edit = QTextEdit(self)
        self.text_edit.setReadOnly(True)
        self.text_edit.setFixedSize(600, 600)

        # Set up video capture
        self.cap = cv2.VideoCapture(0)

        # Initialize UI components
        self.initUI()

        # Set up a timer to update the frame
        self.timer = QTimer()
        self.timer.timeout.connect(self.update_frame)
        self.timer.start(30)  # Update every 30 milliseconds

        # 使用 OpenCV 的人脸检测器替代 YOLO
        self.face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
        self.emotion_model = YOLO("Yolov8-in-facial-expression-classification-main/runs/classify/train_run22/weights/best.pt")  # 表情识别模型

        # 添加识别状态标志
        self.last_recognition_time = 0
        self.recognition_interval = 1.5  # 设置识别间隔为1.5秒
        self.recognition_active = False

        # 添加当前表情状态变量
        self.current_emotion = "等待识别"

    def initUI(self):
        self.label = QLabel(self)
        self.label.setGeometry(QtCore.QRect(0, 0, 600, 600))
        self.label.setScaledContents(True)

        self.button = QPushButton('识别表情', self)
        self.button.clicked.connect(self.recognize_emotion)

        box1 = QGroupBox("实时图像")
        layout_box1 = QHBoxLayout()
        layout_box1.addWidget(self.label)
        layout_box1.addSpacing(30)
        layout_box1.addWidget(self.text_edit)
        box1.setLayout(layout_box1)

        box2 = QGroupBox("功能")
        layout_box2 = QHBoxLayout()
        layout_box2.addWidget(self.button)
        box2.setLayout(layout_box2)

        container = QVBoxLayout()
        container.addWidget(box1)
        container.addWidget(box2)

        self.setLayout(container)

    def get_emotion_name(self, index):
        emotions = {
            0: "愤怒",
            1: "蔑视",
            2: "厌恶",
            3: "恐惧",
            4: "开心",
            5: "中性",
            6: "悲伤",
            7: "惊讶"
        }
        return emotions.get(index, "未知表情")

    def update_frame(self):
        ret, frame = self.cap.read()
        if ret:
            display_frame = frame.copy()
            
            if self.recognition_active:
                # 使用 OpenCV 进行人脸检测
                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                faces = self.face_cascade.detectMultiScale(gray, 1.1, 4)
                
                for (x, y, w, h) in faces:
                    # 在原图上画出人脸框
                    cv2.rectangle(display_frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
                    
                    # 裁剪人脸区域进行表情识别
                    face_crop = frame[y:y+h, x:x+w]
                    if face_crop.size != 0:
                        current_time = time.time()
                        # 检查是否达到识别间隔时间
                        if current_time - self.last_recognition_time >= self.recognition_interval:
                            emotion_results = self.emotion_model(face_crop)[0]
                            probs = emotion_results.probs
                            top1 = probs.top1
                            self.current_emotion = self.get_emotion_name(top1)
                            
                            # 在文本框中显示结果
                            current_time_str = time.strftime("%H:%M:%S")
                            self.text_edit.append(f"[{current_time_str}] 检测到表情：{self.current_emotion}")
                            
                            # 更新最后识别时间
                            self.last_recognition_time = current_time
                        
                        # 每帧都显示当前表情标签
                        frame_pil = Image.fromarray(cv2.cvtColor(display_frame, cv2.COLOR_BGR2RGB))
                        draw = ImageDraw.Draw(frame_pil)
                        
                        # 加载中文字体
                        fontpath = "simhei.ttf"
                        font = ImageFont.truetype(fontpath, 32)
                        
                        # 计算文本大小
                        left, top, right, bottom = draw.textbbox((0, 0), self.current_emotion, font=font)
                        text_width = right - left
                        text_height = bottom - top
                        
                        # 绘制文本背景
                        draw.rectangle(
                            [(x, y - text_height - 10), (x + text_width, y)],
                            fill=(0, 255, 0)
                        )
                        
                        # 绘制中文文本
                        draw.text(
                            (x, y - text_height - 5),
                            self.current_emotion,
                            font=font,
                            fill=(0, 0, 0)
                        )
                        
                        # 转换回OpenCV格式
                        display_frame = cv2.cvtColor(np.array(frame_pil), cv2.COLOR_RGB2BGR)

            # 转换图像格式并显示
            frame_rgb = cv2.cvtColor(display_frame, cv2.COLOR_BGR2RGB)
            image = QImage(frame_rgb, frame_rgb.shape[1], frame_rgb.shape[0], QImage.Format_RGB888)
            pixmap = QPixmap.fromImage(image)
            self.label.setPixmap(pixmap.scaled(450, 450))

    def recognize_emotion(self):
        # 切换识别状态
        self.recognition_active = not self.recognition_active
        if self.recognition_active:
            self.text_edit.append("开始实时识别表情...")
            self.button.setText('停止识别')
        else:
            self.text_edit.append("停止识别")
            self.button.setText('开始识别')

if __name__ == "__main__":
    app = QApplication(sys.argv)
    mainWin = UI()
    mainWin.show()
    sys.exit(app.exec_())
