
# %%
import cv2
from PIL import Image
import torch
from torchvision import transforms, models
import pyttsx3  # 语音合成库（离线）
import threading  # 异步播放语音（避免画面卡顿）
import time
import csv
from datetime import datetime

# 语音配置
engine = pyttsx3.init()
engine.setProperty('rate', 180)  
engine.setProperty('volume', 1.0)  

# 模型配置
class_names = ['Angry', 'Other', 'Sad', 'happy']
model_path = "cat_emotion_best_model.pth"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 日志配置
LOG_FILE = "emotion_log.csv"  # 日志文件名
log_fields = ['timestamp', 'emotion', 'confidence']  # 日志字段

# 加载模型
model = models.mobilenet_v3_small()
model.classifier[-1] = torch.nn.Linear(1024, len(class_names))
model.load_state_dict(torch.load(model_path))
model.to(device).eval()

# 预处理
transform = transforms.Compose([
    transforms.Resize(256),
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

# 初始化日志文件
with open(LOG_FILE, 'w', newline='') as csvfile:
    writer = csv.DictWriter(csvfile, fieldnames=log_fields)
    writer.writeheader()

cap = cv2.VideoCapture(0)
last_emotion = None  # 记录上一次识别的情绪
start_time = time.time()  # 记录开始时间
emotion_start_time = None  # 记录当前情绪开始时间

total_runtime = 300  # 总运行时间（秒），可根据需要修改

last_emotion = None  # 全局变量

def speak_emotion(emotion):
    global last_emotion  # 必须声明，否则函数内是局部变量
    if emotion != last_emotion:  
        engine.say(f"mood:{emotion}")
        engine.runAndWait()
        last_emotion = emotion  # 记得更新
def log_emotion(emotion, confidence):
    """记录情绪到日志文件"""
    timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]  # 精确到毫秒
    with open(LOG_FILE, 'a', newline='') as csvfile:
        writer = csv.DictWriter(csvfile, fieldnames=log_fields)
        writer.writerow({
            'timestamp': timestamp,
            'emotion': emotion,
            'confidence': f"{confidence:.2f}"
        })

while True:
    ret, frame = cap.read()
    if not ret:
        break

    # 图像预处理
    image_pil = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
    input_tensor = transform(image_pil).unsqueeze(0).to(device)
    
    # 推理
    with torch.no_grad():
        outputs = model(input_tensor)
        probs = torch.nn.functional.softmax(outputs, dim=1)
        confidence, pred_idx = torch.max(probs, 1)
        emotion = class_names[pred_idx.item()]
        confidence = confidence.item()  # 转为标量

    # 显示情绪文本和置信度
    cv2.putText(frame, f"Emotion: {emotion} ({confidence:.2f})", 
                (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
    cv2.imwrite("detected_frame.jpg", frame)
    print("帧已保存到 detected_frame.jpg")

    # 记录日志和语音播报
    if emotion != last_emotion:
        # 记录上一个情绪的持续时间（如果不是第一次识别）
        if last_emotion is not None:
            duration = time.time() - emotion_start_time
            print(f"{last_emotion} 持续了 {duration:.2f} 秒")
        
        # 记录新情绪
        log_emotion(emotion, confidence)
        threading.Thread(target=speak_emotion, args=(emotion,), daemon=True).start()
        
        # 更新状态
        last_emotion = emotion
        emotion_start_time = time.time()
    # 显示剩余时间
    remaining_time = total_runtime - (time.time() - start_time)
    minutes, seconds = divmod(int(remaining_time), 60)
    print(f"\r剩余时间: {minutes:02d}:{seconds:02d} | 当前情绪: {emotion} ({confidence:.2f})", end='')
    
    # 短暂休眠减少CPU使用率
    time.sleep(0.1)
# 释放资源
cap.release()

# 记录最后一个情绪的持续时间
if last_emotion is not None:
    duration = time.time() - emotion_start_time
    print(f"最后一个情绪 {last_emotion} 持续了 {duration:.2f} 秒")

print(f"日志已保存到 {LOG_FILE}")


