from stereo.POSE_process import PoseEstimationWorker, BODY_PARTS
from multiprocessing import Process, Event, Queue
from stereo.SGBM_process_crop import StereoMatcherWorker
import numpy as np
import time
import cv2
import traceback
import atexit

from stereo.utils.Filter.Filter_disp import fill_disp_near_keypoints
from stereo.utils.stereoRectify_process import stereo_rectify
from stereo.utils.Filter.OneEuroFilter_python import OneEuroFilter, EMAFilter
from stereo.utils.config import Config
from multiprocessing import Value, Array
import requests,json
EMAFilter_ins = EMAFilter()
sender_flag = False

class StereoProcessor:
    def __init__(self, pose_task_queue, pose_result_queue, pose_event,
                       depth_task_queue, depth_result_queue, depth_event):
        self.pose_worker = PoseEstimationWorker(pose_task_queue, pose_result_queue, pose_event)
        self.pose_worker.start()
        
        self.depth_worker = StereoMatcherWorker(depth_task_queue, depth_result_queue, depth_event)
        self.depth_worker.start()

    # 一个进程不断存入depth和pose队列图像，然后进行处理，一个进程不断存入depth和pose队列图像，然后进行处理
    def process_frame(self, left_image, right_image):
        self.depth_task_queue.put((left_image, right_image))
        self.pose_task_queue.put(left_image)

    # 一个进程不断的从depth和pose取同帧数据
    def get_frame(self):
        while True:
            if not self.depth_result_queue.empty() and not self.pose_result_queue.empty():
                depth_image, pose_image = self.depth_result_queue.get(), self.pose_result_queue.get()
                return depth_image, pose_image
            else:
                time.sleep(0.01)
                continue
def min_depth(depth: np.ndarray, x_point: int, y_point: int, val_range=5):
    if x_point < 0:
        x_point = 0
    if y_point < 0:
        y_point = 0
    x_max = min((x_point + val_range), 640 - 1)
    x_min = max((x_point - val_range), 0)
    y_max = min((y_point + val_range), 480 - 1)
    y_min = max((y_point - val_range), 0)
    dep_img = depth[y_min:y_max, x_min:x_max]
    valid = dep_img > 0
    if np.any(valid):
        result = np.min(dep_img[valid])
    else:
        result = 0  # 或你想设定的默认值，例如 None 或 np.nan
    return result
def depth_points_2_world_points_pixel(pts_2d, depth):

    depth_points_with_depth, pts_length = [], len(pts_2d)
    for i in range(pts_length):
        x, y = pts_2d[i]
        depth_value = depth[int(y), int(x)]

        if depth_value > 0:
            depth_points_with_depth.append([y, x, depth_value])
        else:
            # 如果深度值为0，则使用最小深度值
            min_depth_value = min_depth(depth, int(x), int(y))
            depth_points_with_depth.append([y, x, min_depth_value])
    depth_points_with_depth = np.array(depth_points_with_depth)

    hip_index = 19
    hip_depth = depth_points_with_depth[hip_index][2]

    for i, point in enumerate(depth_points_with_depth):
        if i == hip_index:
            continue  # 跳过对自身的比较
        current_depth = point[2]
        if abs(current_depth - hip_depth) <= 500:
            continue  # 深度差在容忍范围内，无需修正
        # 超出阈值，尝试重估深度
        new_depth = min_depth(depth, *pts_2d[i], val_range=10)
        depth_points_with_depth[i][2] = new_depth if (new_depth > 0 and abs(current_depth - hip_depth) <= 500) else hip_depth



    # 如果17头号点深度值和0号点深度值相差太大，则认为17号点深度值为错误
    if abs(depth_points_with_depth[17][2] - depth_points_with_depth[19][2]) > 500:
        depth_points_with_depth[17][2] = min_depth(depth, pts_2d[17][0], pts_2d[17][1],10)
        if depth_points_with_depth[17][2] == 0:
            depth_points_with_depth[17][2] = depth_points_with_depth[0][2]
    # 对右耳
    if abs(depth_points_with_depth[4][2] - depth_points_with_depth[3][2]) > 500:
        depth_points_with_depth[4][2] = min_depth(depth, pts_2d[4][0], pts_2d[4][1],10)
        if depth_points_with_depth[4][2] == 0:
            depth_points_with_depth[4][2] = depth_points_with_depth[3][2]

    keypoints3d = np.zeros((pts_length, 3), dtype=np.float32)
    for i, (y, x, depth_value) in enumerate(depth_points_with_depth):
        X = (x - stereo_rectify.cali_cx) * depth_value / stereo_rectify.fx 
        Y = (y - stereo_rectify.cali_cy) * depth_value / stereo_rectify.fy 
        Z = depth_value 
        world_point = np.array([Z, -X, -Y]) * 0.001  # 转换为米
        keypoints3d[i] = world_point
    return keypoints3d

def pts3d_to_dict(pts3d):
    pts3d_dict = {}
    for i in range(pts3d.shape[0]):
        pts3d_dict[list(BODY_PARTS.keys())[i]] = pts3d[i]
    return pts3d_dict

def process_3dkpt_depth(kps_2d_list, disp):
    disp = fill_disp_near_keypoints(disp, kps_2d_list)
    depth = stereo_rectify.disparity_to_depth(disp, stereo_rectify.fx, stereo_rectify.baseline)
    # 将2D点转换为3D点
    pts_3d_list = depth_points_2_world_points_pixel(kps_2d_list, depth)
    pts_3d_list = EMAFilter_ins(pts_3d_list)
    pts_3d_dict = pts3d_to_dict(pts_3d_list)
    
    return kps_2d_list, pts_3d_dict, pts_3d_list, disp
def expand_bbox(box, x_expansion=1.2, y_expansion=1.2):
    """
    扩展一个 bounding box，支持 x/y 不同比例。
    
    Args:
        box (array-like): [x1, y1, x2, y2] 格式的原始框
        x_expansion (float): x 方向扩展比例（如 1.2 表示增加 20% 宽度）
        y_expansion (float): y 方向扩展比例

    Returns:
        np.ndarray: 扩展后的 bbox [x1, y1, x2, y2]
    """
    x1, y1, x2, y2 = box
    x_center = (x1 + x2) / 2
    y_center = (y1 + y2) / 2
    half_width = (x2 - x1) * x_expansion / 2
    half_height = (y2 - y1) * y_expansion / 2

    new_x1 = x_center - half_width
    new_y1 = y_center - half_height
    new_x2 = x_center + half_width
    new_y2 = y_center + half_height
    new_x1 = np.clip(new_x1, 0, 640-1)
    new_y1 = np.clip(new_y1, 0, 480-1)
    new_x2 = np.clip(new_x2, 0, 640-1)
    new_y2 = np.clip(new_y2, 0, 480-1)
    return [int(new_x1), int(new_y1), int(new_x2), int(new_y2)]

# 记录一个全局变量的人体box
# 使用共享数组存储box坐标
global_box = Array('i', [0, 0, 0, 0])


# 一个进程不断存入depth和pose队列图像，然后进行处理，一个进程不断存入depth和pose队列图像，然后进行处理
def put_frame(pose_queue, depth_queue, global_box):
    try:
        from stereo.utils.stereo_camera_threaded import StereoCameraCapture_threaded_ins
        job_id = 0
        t1 = time.time()
        frames = 0

        # 初始化动态FPS参数
        target_fps = Config.fps
        min_fps = Config.fps - 10
        max_fps = Config.fps
        fps_adjust_step = 1.0
        fps_adjust_interval = 2.0  # 每2秒调整一次
        last_fps_adjust_time = time.time()
        frame_interval = 1.0 / target_fps

        qsize_increase_threshold = 8
        qsize_decrease_threshold = 3

        # left_image, right_image , _ = cv2.imread("data/im0.png"), cv2.imread("data/im1.png"), None
        # left_image = cv2.resize(left_image, (640, 480))
        # right_image = cv2.resize(right_image, (640, 480))
        while True:
            right_image, left_image , _ = StereoCameraCapture_threaded_ins.get_stereo_frames()

            if left_image is None or right_image is None:
                continue
            # 这里需要修改，因为pose和depth的图像需要是同一个图像
            pose_queue.put((left_image, job_id))
            gray_left_image = cv2.cvtColor(left_image, cv2.COLOR_BGR2GRAY)
            gray_right_image = cv2.cvtColor(right_image, cv2.COLOR_BGR2GRAY)
                        # 使用临时变量获取global_box的值，避免在检查和使用之间发生变化
            # print(f"global_box ex: {global_box}")
            # 使用临时变量获取global_box的值，避免在检查和使用之间发生变化
            x1, y1, x2, y2 = global_box[0], global_box[1], global_box[2], global_box[3]
            x1, y1, x2, y2 = expand_bbox((x1, y1, x2, y2), x_expansion=1.3, y_expansion=1.0)
            
            has_valid_box = x2 > x1 and y2 > y1 and x2 > 0 and y2 > 0
            
            if has_valid_box:
                # 截取ROI区域
                try:
                    gray_left_image = gray_left_image[y1:y2, x1:x2]
                    gray_right_image = gray_right_image[y1:y2, x1:x2]
                except Exception as e:
                    print(f"截取ROI区域出错: {e}, box={[x1, y1, x2, y2]}, 图像大小={gray_left_image.shape}")
                    # 出错时重置global_box
                    with global_box.get_lock():
                        for i in range(4):
                            global_box[i] = 0
            
            depth_queue.put((gray_left_image, gray_right_image, left_image, [x1, y1, x2, y2], job_id))
            frames += 1
            job_id += 1

            now = time.time()

            # FPS统计输出
            if now - t1 >= 1.0:
                # print(f"[输入] FPS: {frames//2} 帧/秒, 队列大小: pose={pose_queue.qsize()}, depth={depth_queue.qsize()}, cam={StereoCameraCapture_threaded_ins.get_queue_size()}, target_fps={target_fps}, box={list(global_box)}, shape={gray_left_image.shape}")
                print(f"[输入] FPS: {frames} 帧/秒, 队列大小: pose={pose_queue.qsize()}, depth={depth_queue.qsize()}, target_fps={target_fps}, box={list(global_box)}, shape={gray_left_image.shape}")
                frames = 0
                t1 = now

                # ---------- 动态FPS调节逻辑 ----------
                if now - last_fps_adjust_time > fps_adjust_interval:
                    queue_size = max(pose_queue.qsize(), depth_queue.qsize())
                    if queue_size > qsize_increase_threshold and target_fps > min_fps:
                        target_fps = max(min_fps, target_fps - fps_adjust_step)
                        frame_interval = 1.0 / target_fps
                        print(f"[调节] 队列较满({queue_size}),降低FPS → {target_fps}")
                    elif queue_size < qsize_decrease_threshold and target_fps < max_fps:
                        target_fps = min(max_fps, target_fps + fps_adjust_step)
                        frame_interval = 1.0 / target_fps
                        print(f"[调节] 队列较空({queue_size}),提升FPS → {target_fps}")
                    last_fps_adjust_time = now
                # -----------------------------------
            # time.sleep(frame_interval)
                StereoCameraCapture_threaded_ins.capture_interval = frame_interval
    except Exception as e:
        traceback.print_exc()
        
def get_frame(pose_result_queue, depth_result_queue, global_box):
    start_time = time.time()
    last_frame_time = time.time()
    frame_count = 0
    total_time = 0.0
    
    # 保存原始的cx和cy值
    stereo_rectify.cali_cx = stereo_rectify.cx
    stereo_rectify.cali_cy = stereo_rectify.cy
    
    if sender_flag:
        from stereo.utils.stereo_tcp_server import StereoDataSender
        # from stereo.utils.stereo_tcp_server import StereoImagePublisher_ins
        sender = StereoDataSender()
    while True:
        disp_final = np.ones((480, 640), dtype=np.float32)
        
        if not pose_result_queue.empty() and not depth_result_queue.empty():
            kps_dict_2d, kps_2d_list, box, job_id_pose = pose_result_queue.get()
            disp, rectified_left, job_box, job_id_depth = depth_result_queue.get()
            # 使用临时变量获取global_box的值
            x1, y1, x2, y2 = job_box[0], job_box[1], job_box[2], job_box[3]
            has_valid_box = x2 > x1 and y2 > y1 and x2 > 0 and y2 > 0
            
            if has_valid_box:
                try:
                    # disp叠加到原disp上
                    if disp.shape[0] != 480 or disp.shape[1] != 640:
                        disp_final[y1:y2, x1:x2] = disp
                    else:
                        disp_final = disp
                    # 调整cx和cy
                    stereo_rectify.cali_cx = stereo_rectify.cx - x1
                    stereo_rectify.cali_cy = stereo_rectify.cy - y1
                except Exception as e:
                    traceback.print_exc()
                
                    print(f"处理视差图出错: , box={[x1, y1, x2, y2]}")
                    # 出错时恢复原始值
                    stereo_rectify.cali_cx = stereo_rectify.cx
                    stereo_rectify.cali_cy = stereo_rectify.cy
            else:
                # 没有有效box时恢复原始值
                stereo_rectify.cali_cx = stereo_rectify.cx
                stereo_rectify.cali_cy = stereo_rectify.cy
            
            # 处理关键点和视差
            kps_2d_list, pts_3d_dict, pts_3d_list, disp = process_3dkpt_depth(kps_2d_list, disp_final)
            
            # 验证box的有效性并更新global_box
            if box is not None and len(box) == 4:
                box_valid = all(isinstance(x, (int, float)) for x in box) and box[2] > box[0] and box[3] > box[1]
                if box_valid:
                    with global_box.get_lock():  # 获取锁以确保原子操作
                        for i in range(4):
                            global_box[i] = int(box[i])
                else:
                    print(f"收到无效的box: {box}")
            kps_dict_2d = {k: v.tolist() if isinstance(v, np.ndarray) else v 
            for k, v in kps_dict_2d.items()}
            pts_3d_dict = {k: v.tolist() if isinstance(v, np.ndarray) else v 
            for k, v in pts_3d_dict.items()}
            
            result = {
                'data_2d':kps_dict_2d,
                'data_3d':pts_3d_dict
            }
            # print(result)
            response = requests.post(
                "http://192.168.1.164:8000/get_data",
                headers={"Content-Type": "application/json"},
                data=json.dumps(result)
            )
            if sender_flag and box is not None and len(box) == 4:
                try:
                    x1, y1, x2, y2 = np.array(box, dtype=np.int32)
                    rectified_left = cv2.rectangle(rectified_left, (x1, y1), (x2, y2), (0, 255, 0), 2)
                    sender.send_data(kps_dict_2d, kps_2d_list, pts_3d_dict, pts_3d_list, rectified_left, disp)
                except Exception as e:
                    print(f"发送数据出错: {e}")
                    
            now = time.time()
            elapsed = now - last_frame_time
            total_time += elapsed
            frame_count += 1
            last_frame_time = now
            if now - start_time >= 1.0:
                fps = frame_count / (now - start_time)
                avg_time = total_time / frame_count if frame_count > 0 else 0
                print(f"[输出] FPS: {fps:.2f}, 平均耗时: {avg_time*1000:.2f} ms, 队列大小: {pose_result_queue.qsize()}, {depth_result_queue.qsize()}, 帧ID: {job_id_pose}, {job_id_depth}, box={list(global_box)}")
                start_time = now
                total_time = 0
                frame_count = 0
        else:
            time.sleep(0.005)

import psutil
def terminate_all():
    current = psutil.Process()
    children = current.children(recursive=True)
    for child in children:
        print(f"正在终止子进程: {child.pid}")
        child.terminate()
        child.wait()


if __name__ == "__main__":
    """
    将一个设为主进程
    """
    try:

        # 创建共享资源
        pose_task_queue = Queue()
        pose_result_queue = Queue()
        pose_event = Event()

        depth_task_queue = Queue()
        depth_result_queue = Queue()
        depth_event = Event()

        # 启动推理子进程
        processor = StereoProcessor(
            pose_task_queue, pose_result_queue, pose_event,
            depth_task_queue, depth_result_queue, depth_event
        )

        # 生产者/消费者进程
        put_process = Process(target=put_frame, args=(pose_task_queue, depth_task_queue, global_box))

        put_process.start()
        get_frame(pose_result_queue, depth_result_queue, global_box)
        # 阻塞主进程
        put_process.join()

    except KeyboardInterrupt:
        print("检测到 Ctrl+C,准备退出...")
        atexit.register(terminate_all)  # ✅ 一定放在主逻辑前
    finally:
        print("主进程结束，资源即将释放")