from stereo.POSE_process import PoseEstimationWorker, BODY_PARTS
from multiprocessing import Process, Event, Queue
from stereo.SGBM_process import StereoMatcherWorker
import numpy as np
import time
import cv2
import traceback
import atexit

from stereo.utils.Filter.Filter_disp import fill_disp_near_keypoints
from stereo.utils.stereoRectify_process import stereo_rectify
from stereo.utils.Filter.OneEuroFilter_python import OneEuroFilter, EMAFilter
from stereo.utils.config import Config
EMAFilter_ins = EMAFilter()
sender_flag = True

class StereoProcessor:
    def __init__(self, pose_task_queue, pose_result_queue, pose_event,
                       depth_task_queue, depth_result_queue, depth_event):
        self.pose_worker = PoseEstimationWorker(pose_task_queue, pose_result_queue, pose_event)
        self.pose_worker.start()
        
        self.depth_worker = StereoMatcherWorker(depth_task_queue, depth_result_queue, depth_event)
        self.depth_worker.start()

    # 一个进程不断存入depth和pose队列图像，然后进行处理，一个进程不断存入depth和pose队列图像，然后进行处理
    def process_frame(self, left_image, right_image):
        self.depth_task_queue.put((left_image, right_image))
        self.pose_task_queue.put(left_image)

    # 一个进程不断的从depth和pose取同帧数据
    def get_frame(self):
        while True:
            if not self.depth_result_queue.empty() and not self.pose_result_queue.empty():
                depth_image, pose_image = self.depth_result_queue.get(), self.pose_result_queue.get()
                return depth_image, pose_image
            else:
                time.sleep(0.01)
                continue
def min_depth(depth: np.ndarray, x_point: int, y_point: int, val_range=5):
    if x_point < 0:
        x_point = 0
    if y_point < 0:
        y_point = 0
    x_max = min((x_point + val_range), 640 - 1)
    x_min = max((x_point - val_range), 0)
    y_max = min((y_point + val_range), 480 - 1)
    y_min = max((y_point - val_range), 0)
    dep_img = depth[y_min:y_max, x_min:x_max]
    valid = dep_img > 0
    if np.any(valid):
        result = np.min(dep_img[valid])
    else:
        result = 0  # 或你想设定的默认值，例如 None 或 np.nan
    return result
def depth_points_2_world_points_pixel(pts_2d, depth):

    depth_points_with_depth, pts_length = [], len(pts_2d)
    for i in range(pts_length):
        x, y = pts_2d[i]
        depth_value = depth[int(y), int(x)]

        if depth_value > 0:
            depth_points_with_depth.append([y, x, depth_value])
        else:
            # 如果深度值为0，则使用最小深度值
            min_depth_value = min_depth(depth, int(x), int(y))
            depth_points_with_depth.append([y, x, min_depth_value])
    depth_points_with_depth = np.array(depth_points_with_depth)

    hip_index = 19
    hip_depth = depth_points_with_depth[hip_index][2]

    for i, point in enumerate(depth_points_with_depth):
        if i == hip_index:
            continue  # 跳过对自身的比较
        current_depth = point[2]
        if abs(current_depth - hip_depth) <= 500:
            continue  # 深度差在容忍范围内，无需修正
        # 超出阈值，尝试重估深度
        new_depth = min_depth(depth, *pts_2d[i], val_range=10)
        depth_points_with_depth[i][2] = new_depth if (new_depth > 0 and abs(current_depth - hip_depth) <= 500) else hip_depth



    # 如果17头号点深度值和0号点深度值相差太大，则认为17号点深度值为错误
    if abs(depth_points_with_depth[17][2] - depth_points_with_depth[19][2]) > 500:
        depth_points_with_depth[17][2] = min_depth(depth, pts_2d[17][0], pts_2d[17][1],10)
        if depth_points_with_depth[17][2] == 0:
            depth_points_with_depth[17][2] = depth_points_with_depth[0][2]
    # 对右耳
    if abs(depth_points_with_depth[4][2] - depth_points_with_depth[3][2]) > 500:
        depth_points_with_depth[4][2] = min_depth(depth, pts_2d[4][0], pts_2d[4][1],10)
        if depth_points_with_depth[4][2] == 0:
            depth_points_with_depth[4][2] = depth_points_with_depth[3][2]

    keypoints3d = np.zeros((pts_length, 3), dtype=np.float32)
    for i, (y, x, depth_value) in enumerate(depth_points_with_depth):
        X = (x - stereo_rectify.cx) * depth_value / stereo_rectify.fx 
        Y = (y - stereo_rectify.cy) * depth_value / stereo_rectify.fy 
        Z = depth_value 
        world_point = np.array([Z, -X, -Y]) * 0.001  # 转换为米
        keypoints3d[i] = world_point
    return keypoints3d
    
def pts3d_to_dict(pts3d):
    pts3d_dict = {}
    for i in range(pts3d.shape[0]):
        pts3d_dict[list(BODY_PARTS.keys())[i]] = pts3d[i]
    return pts3d_dict

def process_3dkpt_depth(kps_dict_2d, kps_2d_list, disp):
    disp = fill_disp_near_keypoints(disp, kps_2d_list)
    depth = stereo_rectify.disparity_to_depth(disp, stereo_rectify.fx, stereo_rectify.baseline)
    # 将2D点转换为3D点
    pts_3d_list = depth_points_2_world_points_pixel(kps_2d_list, depth)
    pts_3d_list = EMAFilter_ins(pts_3d_list)
    pts_3d_dict = pts3d_to_dict(pts_3d_list)
    
    return kps_dict_2d, kps_2d_list, pts_3d_dict, pts_3d_list, disp

# 一个进程不断存入depth和pose队列图像，然后进行处理，一个进程不断存入depth和pose队列图像，然后进行处理
def put_frame(pose_queue, depth_queue):
    try:
        from stereo.utils.stereo_camera_threaded import StereoCameraCapture_threaded_ins
        job_id = 0
        t1 = time.time()
        frames = 0

        # 初始化动态FPS参数
        target_fps = Config.fps
        min_fps = Config.fps - 10
        max_fps = Config.fps
        fps_adjust_step = 1.0
        fps_adjust_interval = 2.0  # 每2秒调整一次
        last_fps_adjust_time = time.time()
        frame_interval = 1.0 / target_fps

        qsize_increase_threshold = 8
        qsize_decrease_threshold = 3

        while True:
            right_image, left_image , _ = StereoCameraCapture_threaded_ins.get_stereo_frames()
            # left_image, right_image , _ = cv2.imread("data/im0.png"), cv2.imread("data/im1.png"), None
            left_image = cv2.resize(left_image, (640, 480))
            right_image = cv2.resize(right_image, (640, 480))
            if left_image is None or right_image is None:
                continue
            # 这里需要修改，因为pose和depth的图像需要是同一个图像
            pose_queue.put((left_image, job_id))
            gray_left_image = cv2.cvtColor(left_image, cv2.COLOR_BGR2GRAY)
            gray_right_image = cv2.cvtColor(right_image, cv2.COLOR_BGR2GRAY)
            depth_queue.put((gray_left_image, gray_right_image, left_image, job_id))
            frames += 1
            job_id += 1

            now = time.time()

            # FPS统计输出
            if now - t1 >= 2.0:
                # print(f"[输入] FPS: {frames//2} 帧/秒, 队列大小: pose={pose_queue.qsize()}, depth={depth_queue.qsize()}, cam={StereoCameraCapture_threaded_ins.get_queue_size()}, target_fps={target_fps}")
                print(f"[输入] FPS: {frames//2} 帧/秒, 队列大小: pose={pose_queue.qsize()}, depth={depth_queue.qsize()}, target_fps={target_fps}")
                frames = 0
                t1 = now

                # ---------- 动态FPS调节逻辑 ----------
                if now - last_fps_adjust_time > fps_adjust_interval:
                    queue_size = max(pose_queue.qsize(), depth_queue.qsize())
                    if queue_size > qsize_increase_threshold and target_fps > min_fps:
                        target_fps = max(min_fps, target_fps - fps_adjust_step)
                        frame_interval = 1.0 / target_fps
                        print(f"[调节] 队列较满({queue_size}),降低FPS → {target_fps}")
                    elif queue_size < qsize_decrease_threshold and target_fps < max_fps:
                        target_fps = min(max_fps, target_fps + fps_adjust_step)
                        frame_interval = 1.0 / target_fps
                        print(f"[调节] 队列较空({queue_size}),提升FPS → {target_fps}")
                    last_fps_adjust_time = now
                # -----------------------------------
                
                StereoCameraCapture_threaded_ins.capture_interval = frame_interval
    except Exception as e:
        traceback.print_exc()
def get_frame(pose_result_queue, depth_result_queue):
    start_time = time.time()
    last_frame_time = time.time()
    frame_count = 0
    total_time = 0.0
    if sender_flag:
        from stereo.utils.stereo_tcp_server import StereoDataSender
        # from stereo.utils.stereo_tcp_server import StereoImagePublisher_ins
        sender = StereoDataSender()
    while True:
        if not pose_result_queue.empty() and not depth_result_queue.empty():
            kps_dict_2d, kps_2d_list, box, job_id_pose = pose_result_queue.get()
            disp, rectified_left, job_id_depth = depth_result_queue.get()
            kps_dict_2d, kps_2d_list, pts_3d_dict, pts_3d_list, disp = process_3dkpt_depth(kps_dict_2d, kps_2d_list, disp)
            if sender_flag:
                x1, y1, x2, y2 = np.array(box, dtype=np.int32)
                rectified_left = cv2.rectangle(rectified_left, (x1, y1), (x2, y2), (0, 255, 0), 2)
                sender.send_data(kps_dict_2d, kps_2d_list, pts_3d_dict, pts_3d_list, rectified_left, disp)
            now = time.time()
            elapsed = now - last_frame_time
            total_time += elapsed
            frame_count += 1
            last_frame_time = now
            if now - start_time >= 1.0:
                fps = frame_count / (now - start_time)
                avg_time = total_time / frame_count if frame_count > 0 else 0
                print(f"[输出] FPS: {fps:.2f}, 平均耗时: {avg_time*1000:.2f} ms, 队列大小: {pose_result_queue.qsize()}, {depth_result_queue.qsize()}, 帧ID: {job_id_pose}, {job_id_depth}")
                start_time = now
                total_time = 0
                frame_count = 0
        else:
            time.sleep(0.005)

import psutil
def terminate_all():
    current = psutil.Process()
    children = current.children(recursive=True)
    for child in children:
        print(f"正在终止子进程: {child.pid}")
        child.terminate()
        child.wait()


if __name__ == "__main__":
    """
    将一个设为主进程
    """
    try:

        # 创建共享资源
        pose_task_queue = Queue()
        pose_result_queue = Queue()
        pose_event = Event()

        depth_task_queue = Queue()
        depth_result_queue = Queue()
        depth_event = Event()

        # 启动推理子进程
        processor = StereoProcessor(
            pose_task_queue, pose_result_queue, pose_event,
            depth_task_queue, depth_result_queue, depth_event
        )

        # 生产者/消费者进程
        put_process = Process(target=put_frame, args=(pose_task_queue, depth_task_queue))

        put_process.start()
        get_frame(pose_result_queue, depth_result_queue)
        # 阻塞主进程
        put_process.join()

    except KeyboardInterrupt:
        print("检测到 Ctrl+C,准备退出...")
        atexit.register(terminate_all)  # ✅ 一定放在主逻辑前
    finally:
        print("主进程结束，资源即将释放")