from flask import Flask, Response, render_template, request
from flask_cors import CORS
from flask_socketio import SocketIO, emit
import cv2
import os
import time
import numpy as np
import face_recognition
import base64
from io import BytesIO
from threading import Thread, Event
import queue
import sys

app = Flask(__name__)
CORS(app)
socketio = SocketIO(app, cors_allowed_origins=["http://192.168.64.144:5000"])

# 初始化摄像头
cap = cv2.VideoCapture(0)

# 设置摄像头分辨率和帧率
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 720)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 540)
cap.set(cv2.CAP_PROP_FPS, 30)

# 加载 Haar 级联分类器
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')

# 创建保存人脸图像的目录（如果不存在）
output_dir = 'detected_faces'
if not os.path.exists(output_dir):
    os.makedirs(output_dir)

# 初始化已保存人脸特征列表
known_face_encodings = []

# 存储连接的客户端
connected_clients = set()

# 创建线程安全的队列和事件
frame_queue = queue.Queue(maxsize=5)
notification_queue = queue.Queue()
stop_event = Event()

def generate_frames():
    frame_count = 0
    last_notification_time = time.time()
    try:
        while not stop_event.is_set():
            ret, frame = cap.read()
            if not ret:
                break

            frame_count += 1
            if frame_count % 5 != 0:  # 每隔5帧处理一次
                continue

            # 转换为灰度图像并缩小尺寸
            small_frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)
            gray = cv2.cvtColor(small_frame, cv2.COLOR_BGR2GRAY)
            gray_eq = cv2.equalizeHist(gray)

            # 检测人脸
            faces = face_cascade.detectMultiScale(gray_eq, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))

            # 在检测到的人脸上绘制矩形框并处理
            for (x, y, w, h) in faces:
                x *= 2
                y *= 2
                w *= 2
                h *= 2
                cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

                face_image = frame[y:y+h, x:x+w]
                if face_image.size != 0:
                    rgb_face = cv2.cvtColor(face_image, cv2.COLOR_BGR2RGB)
                    face_encodings = face_recognition.face_encodings(rgb_face)

                    if len(face_encodings) > 0:
                        face_encoding = face_encodings[0]
                        matches = face_recognition.compare_faces(known_face_encodings, face_encoding)

                        if not any(matches):
                            timestamp = int(time.time() * 1000)
                            filename = f"{output_dir}/face_{timestamp}.jpg"
                            
                            # 在人脸图像上添加时间戳
                            current_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
                            cv2.putText(face_image, current_time, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
                            
                            cv2.imwrite(filename, face_image)
                            known_face_encodings.append(face_encoding)

                            if time.time() - last_notification_time > 5:  # 每5秒最多发送一次通知
                                with open(filename, "rb") as image_file:
                                    encoded_string = base64.b64encode(image_file.read()).decode('utf-8')
                                notification_queue.put({
                                    'message': '检测到新的人脸',
                                    'image': f'data:image/jpeg;base64,{encoded_string}'
                                })
                                last_notification_time = time.time()

            # 将帧放入队列
            if not frame_queue.full():
                ret, buffer = cv2.imencode('.jpg', frame, [int(cv2.IMWRITE_JPEG_QUALITY), 50])
                frame_queue.put(buffer.tobytes())
    except KeyboardInterrupt:
        print("Interrupted by user, stopping frame generation...")
    finally:
        stop_event.set()

def video_feed():
    try:
        while not stop_event.is_set():
            frame = frame_queue.get()
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
    except KeyboardInterrupt:
        print("Interrupted by user, stopping video feed...")
    finally:
        stop_event.set()

@app.route('/video_feed')
def video_feed_route():
    return Response(video_feed(), mimetype='multipart/x-mixed-replace; boundary=frame')

@app.route('/')
def index():
    return render_template('index.html')

@socketio.on('connect')
def handle_connect(auth):
    connected_clients.add(request.sid)
    print(f"Client connected: {request.sid}")
    emit('message', {'data': 'Connected to server'})

@socketio.on('disconnect')
def handle_disconnect():
    connected_clients.discard(request.sid)
    print(f"Client disconnected: {request.sid}")

def send_notifications():
    while not stop_event.is_set():
        if not notification_queue.empty():
            notification = notification_queue.get()
            socketio.emit('notification', notification)

if __name__ == '__main__':
    try:
        frame_thread = Thread(target=generate_frames)
        frame_thread.daemon = True
        frame_thread.start()

        notification_thread = Thread(target=send_notifications)
        notification_thread.daemon = True
        notification_thread.start()

        socketio.run(app, host='0.0.0.0', port=5000)
    except KeyboardInterrupt:
        print("\n程序被用户中断")
    finally:
        stop_event.set()
        frame_thread.join()
        notification_thread.join()
        cap.release()
        print("摄像头资源已释放")
        print("服务器已停止")