import cv2
import pickle
import face_recognition
import numpy as np
import time
import threading
import queue
import ffmpeg
import requests  # 新增：导入 requests 库

# --- 1. 配置参数 ---
FACE_ENCODINGS_FILE = 'models/face_encodings.pickle'
FACE_RECOGNITION_TOLERANCE = 0.6
VIDEO_SOURCE = "rtmp://localhost:1935/live/agv"

# HTTP POST 请求配置
HTTP_POST_URL = "http://localhost:8080/agv/user"  # HTTP 请求的目标URL
HTTP_REQUEST_INTERVAL_SECONDS = 5  # 限制发送请求的频率，例如每隔5秒才允许发送一次

# --- 2. 加载人脸数据 ---
print("[INFO] 正在加载已知人脸编码...")
try:
    with open(FACE_ENCODINGS_FILE, "rb") as f:
        data = pickle.load(f)
    known_face_encodings = data["encodings"]
    known_face_names = data["names"]
    print(f"[INFO] {len(known_face_names)} 个已知人脸加载成功。")
except Exception as e:
    print(f"[ERROR] 加载人脸编码失败: {e}")
    exit()

# --- 全局变量和队列 ---
frame_queue = queue.Queue(maxsize=10)
results_queue = queue.Queue(maxsize=1)
http_request_queue = queue.Queue(maxsize=5)  # 新增：用于发送HTTP请求的队列
stop_event = threading.Event()

STREAM_WIDTH = 1280
STREAM_HEIGHT = 720


# --- 线程函数 ---
def video_capture_thread_ffmpeg_py(video_source, q, stop_event):
    print("[INFO] FFmpeg-Python 视频捕获线程启动...")
    try:
        process = (
            ffmpeg
            .input(video_source, r=30)
            .output('pipe:', format='rawvideo', pix_fmt='bgr24')
            .run_async(pipe_stdout=True)
        )

        bytes_per_frame = STREAM_WIDTH * STREAM_HEIGHT * 3

        while not stop_event.is_set():
            in_bytes = process.stdout.read(bytes_per_frame)
            if not in_bytes:
                print("[INFO] FFmpeg-Python 视频捕获线程: 视频流结束或读取失败。")
                break

            frame = np.frombuffer(in_bytes, np.uint8).reshape([STREAM_HEIGHT, STREAM_WIDTH, 3])
            frame_writable = frame.copy()

            try:
                q.put(frame_writable, timeout=1)
            except queue.Full:
                pass

    except ffmpeg.Error as e:
        print(f"[ERROR] FFmpeg-Python 捕获错误: {e.stderr.decode()}")
        print(f"[ERROR] 无法打开视频源: {video_source}")
    except Exception as e:
        print(f"[ERROR] 视频捕获线程发生错误: {e}")
    finally:
        if 'process' in locals() and process.poll() is None:
            process.terminate()
            process.wait()
        print("[INFO] FFmpeg-Python 视频捕获线程关闭...")
        stop_event.set()


def face_recognition_thread(frame_q, results_q, stop_event, known_encodings, known_names):
    print("[INFO] 人脸识别线程启动...")

    recognition_scale = 0.25

    while not stop_event.is_set():
        try:
            latest_frame = None
            while not frame_q.empty():
                latest_frame = frame_q.get_nowait()

            if latest_frame is None:
                time.sleep(0.001)
                continue

            if not latest_frame.flags['WRITEABLE']:
                latest_frame = latest_frame.copy()

            small_frame = cv2.resize(latest_frame, (0, 0), fx=recognition_scale, fy=recognition_scale)
            rgb_small_frame = cv2.cvtColor(small_frame, cv2.COLOR_BGR2RGB)

            face_locations = face_recognition.face_locations(rgb_small_frame, model="hog")
            face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)

            current_frame_names = []
            current_frame_locations = []

            for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
                matches = face_recognition.compare_faces(known_encodings, face_encoding,
                                                         tolerance=FACE_RECOGNITION_TOLERANCE)
                name = "Unknown"
                if True in matches:
                    face_distances = face_recognition.face_distance(known_encodings, face_encoding)
                    best_match_index = np.argmin(face_distances)
                    if matches[best_match_index]:
                        name = known_names[best_match_index]

                top = int(top / recognition_scale)
                right = int(right / recognition_scale)
                bottom = int(bottom / recognition_scale)
                left = int(left / recognition_scale)

                current_frame_names.append(name)
                current_frame_locations.append((top, right, bottom, left))

            while not results_q.empty():
                results_q.get_nowait()
            results_q.put((current_frame_locations, current_frame_names), timeout=1)

        except queue.Empty:
            time.sleep(0.001)
        except queue.Full:
            pass
        except Exception as e:
            print(f"[ERROR] 人脸识别线程发生错误: {e}")
            break

    print("[INFO] 人脸识别线程关闭...")
    stop_event.set()


def http_post_sender_thread(request_q, stop_event, target_url, min_interval):
    """
    负责从队列中取出标签，并发送HTTP POST请求。
    """
    print("[INFO] HTTP POST 发送线程启动...")
    last_request_time = 0  # 上次发送请求的时间戳

    while not stop_event.is_set():
        current_time = time.time()

        # 检查是否满足发送时间间隔
        if (current_time - last_request_time) < min_interval:
            time.sleep(0.1)  # 短暂等待
            continue

        try:
            # 尝试获取需要发送的数据
            # 如果队列为空，则等待一小段时间
            data_to_send = request_q.get(timeout=0.5)

            # 将标签列表转换为字符串或JSON格式
            # 这里选择简单的JSON格式，包含一个"users"键
            payload = {"users": data_to_send}

            print(f"[INFO] 正在发送 HTTP POST 请求到 {target_url}，数据: {payload}")

            # 发送 POST 请求
            response = requests.post(target_url, json=payload, timeout=5)  # 设置5秒超时

            # 打印响应信息
            if response.status_code == 200:
                print(f"[INFO] 请求成功！响应: {response.text}")
            else:
                print(f"[WARNING] 请求失败！状态码: {response.status_code}, 响应: {response.text}")

            last_request_time = current_time  # 更新上次请求时间

        except queue.Empty:
            time.sleep(0.1)  # 队列为空，短暂等待
        except requests.exceptions.Timeout:
            print(f"[ERROR] HTTP POST 请求超时: {target_url}")
        except requests.exceptions.ConnectionError as e:
            print(f"[ERROR] HTTP POST 连接错误到 {target_url}: {e}")
        except Exception as e:
            print(f"[ERROR] HTTP POST 发送线程发生未知错误: {e}")

    print("[INFO] HTTP POST 发送线程关闭...")
    stop_event.set()


# --- 主程序 (显示和控制) ---
def main():
    global STREAM_WIDTH, STREAM_HEIGHT

    capture_thread = threading.Thread(target=video_capture_thread_ffmpeg_py,
                                      args=(VIDEO_SOURCE, frame_queue, stop_event))
    capture_thread.start()

    recognition_thread = threading.Thread(target=face_recognition_thread,
                                          args=(frame_queue, results_queue, stop_event,
                                                known_face_encodings, known_face_names))
    recognition_thread.start()

    # 新增：启动 HTTP POST 发送线程
    http_sender_thread = threading.Thread(target=http_post_sender_thread,
                                          args=(http_request_queue, stop_event,
                                                HTTP_POST_URL, HTTP_REQUEST_INTERVAL_SECONDS))
    http_sender_thread.start()

    print("[INFO] 主显示线程启动，按'q'键退出...")

    last_face_locations = []
    last_face_names = []

    last_sent_labels = set()  # 记录上次发送过的标签集合，用于去重和避免频繁发送

    start_time = time.time()
    fps_display_interval = 30
    processed_frames_for_fps = 0

    while not stop_event.is_set():
        try:
            display_frame = None
            while not frame_queue.empty():
                display_frame = frame_queue.get_nowait()

            if display_frame is None:
                time.sleep(0.01)
                continue

            if STREAM_WIDTH == 0:
                STREAM_WIDTH = display_frame.shape[1]
                STREAM_HEIGHT = display_frame.shape[0]
                print(f"[INFO] 原始视频源分辨率: {STREAM_WIDTH}x{STREAM_HEIGHT}")

            try:
                current_locations, current_names = results_queue.get_nowait()
                last_face_locations = current_locations
                last_face_names = current_names

                # --- 在这里将识别到的标签放入 HTTP 请求队列 ---
                if last_face_names:
                    print(f"[INFO] 识别到的人脸: {', '.join(last_face_names)}")

                    # 将当前识别到的人脸名称转换为集合，方便与上次发送的进行比较
                    current_labels_set = set(last_face_names)

                    # 如果当前识别到的人脸标签与上次发送的有变化（有新增或减少），则发送请求
                    # 避免在连续多帧识别到相同的人脸时频繁发送请求
                    if current_labels_set != last_sent_labels:
                        try:
                            # 将列表放入队列，HTTP线程会将其转换为JSON
                            http_request_queue.put(last_face_names, timeout=0.1)
                            last_sent_labels = current_labels_set  # 更新上次发送的标签
                        except queue.Full:
                            print("[WARNING] HTTP 请求队列已满，跳过本次请求。")
                else:
                    # 如果当前帧没有人脸，并且上次发送过人脸信息，考虑发送空列表表示无人
                    if last_sent_labels:
                        try:
                            http_request_queue.put([], timeout=0.1)  # 发送空列表表示无人
                            last_sent_labels = set()  # 重置为无标签
                        except queue.Full:
                            print("[WARNING] HTTP 请求队列已满，无法发送无人信息。")


            except queue.Empty:
                pass  # 识别人脸结果队列为空，继续使用旧结果

            if not display_frame.flags['WRITEABLE']:
                display_frame = display_frame.copy()

            for (top, right, bottom, left), name in zip(last_face_locations, last_face_names):
                cv2.rectangle(display_frame, (left, top), (right, bottom), (0, 0, 255), 2)
                text_y = max(top - 10, 20)
                cv2.putText(display_frame, name, (left, text_y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)

            display_scale_factor = 1.0
            if display_scale_factor != 1.0:
                display_frame = cv2.resize(display_frame,
                                           (int(STREAM_WIDTH * display_scale_factor),
                                            int(STREAM_HEIGHT * display_scale_factor)),
                                           interpolation=cv2.INTER_AREA)

            cv2.imshow("Face Recognition Display (Multi-threaded)", display_frame)

            processed_frames_for_fps += 1
            if processed_frames_for_fps % fps_display_interval == 0:
                end_time = time.time()
                fps = processed_frames_for_fps / (end_time - start_time)
                print(f"[INFO] 主显示线程帧率: {fps:.2f} FPS")
                processed_frames_for_fps = 0
                start_time = time.time()

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        except Exception as e:
            print(f"[ERROR] 主显示线程发生错误: {e}")
            break

    print("[INFO] 主显示线程关闭...")
    stop_event.set()

    # 等待所有线程完成
    capture_thread.join()
    recognition_thread.join()
    http_sender_thread.join()  # 新增：等待HTTP发送线程

    cv2.destroyAllWindows()
    print("[INFO] 所有线程已关闭，程序退出。")


if __name__ == '__main__':
    main()