import cv2
import torch
import torch.nn as nn
from torchvision import transforms
from PIL import Image
import numpy as np
import pyautogui
import time

# === 置顶相关（Windows only） ===
import pygetwindow as gw
import win32gui
import win32con

# === 模型定义 ===
class EmotionCNN(nn.Module):
    def __init__(self, num_classes):
        super(EmotionCNN, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(1, 32, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.BatchNorm2d(32),
            nn.MaxPool2d(2),

            nn.Conv2d(32, 64, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.BatchNorm2d(64),
            nn.MaxPool2d(2),

            nn.Conv2d(64, 128, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.BatchNorm2d(128),
            nn.MaxPool2d(2),
        )
        self.classifier = nn.Sequential(
            nn.Flatten(),
            nn.Linear(128 * 6 * 6, 256),
            nn.ReLU(),
            nn.Dropout(0.5),
            nn.Linear(256, num_classes)
        )

    def forward(self, x):
        x = self.features(x)
        x = self.classifier(x)
        return x

# === 参数设置 ===
device = torch.device('cuda')
print(device)
num_classes = 3
class_labels = ['angry', 'happy', 'other']

# === 模型加载 ===
model = EmotionCNN(num_classes=num_classes).to(device)
model.load_state_dict(torch.load('emotion_model_happy_angry_MyPhoto.pth', map_location=device))
model.eval()

# === 图像预处理 ===
data_transform = transforms.Compose([
    transforms.Grayscale(),
    transforms.Resize((48, 48)),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.5], std=[0.5])
])

# === 人脸检测器 ===
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')

# === 打开摄像头 ===
cap = cv2.VideoCapture(0)
window_name = 'Emotion Recognition'
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)

# 尝试置顶窗口
cv2.imshow(window_name, np.zeros((100, 100), dtype=np.uint8))
cv2.waitKey(1)
time.sleep(0.5)
try:
    win = gw.getWindowsWithTitle(window_name)[0]
    win.minimize()
    win.restore()
    hwnd = win._hWnd
    win32gui.SetWindowPos(hwnd, win32con.HWND_TOPMOST, 100, 100, 640, 480, 0)
    print("✅ 窗口已置顶")
except IndexError:
    print("❌ 找不到窗口进行置顶")

# === 状态记录 ===
previous_label = 'other'

print("Press 'l' to exit.")

while True:
    ret, frame = cap.read()
    if not ret:
        break

    gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faces = face_cascade.detectMultiScale(gray_frame, scaleFactor=1.3, minNeighbors=5)

    current_label = 'other'

    for (x, y, w, h) in faces:
        face_img = gray_frame[y:y+h, x:x+w]
        face_pil = Image.fromarray(face_img)
        input_tensor = data_transform(face_pil).unsqueeze(0).to(device)

        with torch.no_grad():
            output = model(input_tensor)
            _, predicted = torch.max(output, 1)
            label = class_labels[predicted.item()]
            current_label = label

        # 显示检测框
        color = (0, 255, 0) if label == 'happy' else (0, 0, 255) if label == 'angry' else (255, 255, 0)
        cv2.rectangle(frame, (x, y), (x+w, y+h), color, 2)
        cv2.putText(frame, label, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, color, 2)
        break  # 只处理一张脸

    # === 识别状态变化后触发持续按键动作 ===
    if current_label != previous_label:
        # 释放所有按键
        pyautogui.mouseUp(button='left')
        pyautogui.mouseUp(button='right')

        if current_label == 'happy':
            pyautogui.mouseDown(button='left')
            print("😄 Happy → Hold Left Button")
        elif current_label == 'angry':
            pyautogui.click(button='right')
            print("😠 Angry → Right Click Once")
        elif current_label == 'other':
            print("😐 Other → Release All Buttons")

        previous_label = current_label

    cv2.imshow(window_name, frame)
    if cv2.waitKey(1) & 0xFF == ord('l'):
        break

# === 清理资源 ===
pyautogui.mouseUp(button='left')
cap.release()
cv2.destroyAllWindows()
