from stereo.POSE_process import PoseEstimationWorker, BODY_PARTS
from multiprocessing import Process, Event, Queue, Array
from stereo.SGBM_process_crop import StereoMatcherWorker
import numpy as np
import time
import cv2
import traceback
import atexit

from stereo.utils.Filter.Filter_disp import fill_disp_near_keypoints
from stereo.utils.stereoRectify_process import stereo_rectify
from stereo.utils.Filter.OneEuroFilter_python import OneEuroFilter, EMAFilter
from stereo.utils.config import Config
from multiprocessing import Value, Array

# 全局配置
EMAFilter_ins = EMAFilter()
sender_flag = False


class StereoProcessor:
    """立体视觉处理器"""

    def __init__(self, pose_task_queue, pose_result_queue, pose_event,
                 depth_task_queue, depth_result_queue, depth_event):
        self.pose_worker = PoseEstimationWorker(pose_task_queue, pose_result_queue, pose_event)
        self.pose_worker.start()

        self.depth_worker = StereoMatcherWorker(depth_task_queue, depth_result_queue, depth_event)
        self.depth_worker.start()


class FrameProcessor:
    """帧处理器"""

    def __init__(self, global_box):
        self.global_box = global_box
        self.frame_count = 0
        self.stable_frames_needed = 10  # 需要稳定帧数后才开始crop

    def should_use_crop(self):
        """判断是否应该使用crop模式"""
        return self.frame_count >= self.stable_frames_needed

    def expand_bbox(self, box, x_expansion=1.2, y_expansion=1.2):
        """扩展边界框"""
        x1, y1, x2, y2 = box
        x_center = (x1 + x2) / 2
        y_center = (y1 + y2) / 2
        half_width = (x2 - x1) * x_expansion / 2
        half_height = (y2 - y1) * y_expansion / 2

        new_x1 = x_center - half_width
        new_y1 = y_center - half_height
        new_x2 = x_center + half_width
        new_y2 = y_center + half_height

        new_x1 = np.clip(new_x1, 0, 640-1)
        new_y1 = np.clip(new_y1, 0, 480-1)
        new_x2 = np.clip(new_x2, 0, 640-1)
        new_y2 = np.clip(new_y2, 0, 480-1)

        return [int(new_x1), int(new_y1), int(new_x2), int(new_y2)]

    def process_frame_for_depth(self, left_image, right_image):
        """为深度处理准备帧"""
        gray_left = cv2.cvtColor(left_image, cv2.COLOR_BGR2GRAY)
        gray_right = cv2.cvtColor(right_image, cv2.COLOR_BGR2GRAY)

        # 获取当前box
        x1, y1, x2, y2 = self.global_box[0], self.global_box[1], self.global_box[2], self.global_box[3]
        has_valid_box = x2 > x1 and y2 > y1 and x2 > 0 and y2 > 0

        # 前10帧或无效box时使用全图
        if not self.should_use_crop() or not has_valid_box:
            return gray_left, gray_right, left_image, [0, 0, 640, 480], self.frame_count

        # 稳定后使用crop
        expanded_box = self.expand_bbox((x1, y1, x2, y2), x_expansion=1.3, y_expansion=1.0)
        ex1, ey1, ex2, ey2 = expanded_box

        try:
            crop_gray_left = gray_left[ey1:ey2, ex1:ex2]
            crop_gray_right = gray_right[ey1:ey2, ex1:ex2]
            return crop_gray_left, crop_gray_right, left_image, expanded_box, self.frame_count
        except Exception as e:
            print(f"Crop失败，回退到全图: {e}")
            return gray_left, gray_right, left_image, [0, 0, 640, 480], self.frame_count

    def increment_frame_count(self):
        """增加帧计数"""
        self.frame_count += 1


class StereoPipeline:
    """立体视觉处理管道"""

    def __init__(self):
        # 共享资源
        self.global_box = Array('i', [0, 0, 0, 0])

        # 队列和事件
        self.pose_task_queue = Queue()
        self.pose_result_queue = Queue(maxsize=3)
        self.pose_event = Event()

        self.depth_task_queue = Queue()
        self.depth_result_queue = Queue(maxsize=3)
        self.depth_event = Event()

        # 处理器
        self.stereo_processor = StereoProcessor(
            self.pose_task_queue, self.pose_result_queue, self.pose_event,
            self.depth_task_queue, self.depth_result_queue, self.depth_event
        )

        self.frame_processor = FrameProcessor(self.global_box)

        # 网络发送器
        self.sender = None
        if sender_flag:
            from stereo.utils.stereo_tcp_server import StereoDataSender
            self.sender = StereoDataSender()

    def put_frame_loop(self):
        """输入帧循环"""
        try:
            from stereo.utils.stereo_camera_threaded import StereoCameraCapture_threaded_ins

            # 动态FPS参数
            target_fps = Config.fps
            min_fps = Config.fps - 10
            max_fps = Config.fps
            fps_adjust_step = 1.0
            fps_adjust_interval = 2.0
            last_fps_adjust_time = time.time()
            frame_interval = 1.0 / target_fps

            qsize_increase_threshold = 8
            qsize_decrease_threshold = 3

            job_id = 0
            frames = 0
            t1 = time.time()

            while True:
                right_image, left_image, _ = StereoCameraCapture_threaded_ins.get_stereo_frames()

                if left_image is None or right_image is None:
                    continue

                # 发送Pose任务（始终使用全图）
                self.pose_task_queue.put((left_image, job_id))

                # 为深度处理准备帧
                gray_left, gray_right, orig_left, box_info, frame_id = \
                    self.frame_processor.process_frame_for_depth(left_image, right_image)

                self.depth_task_queue.put((gray_left, gray_right, orig_left, box_info, job_id))

                frames += 1
                job_id += 1
                self.frame_processor.increment_frame_count()

                # FPS统计
                now = time.time()
                if now - t1 >= 5.0:
                    mode = "全图" if not self.frame_processor.should_use_crop() else "Crop"
                    print(f"[输入] FPS: {frames}, 模式: {mode}, 队列: pose={self.pose_task_queue.qsize()}, depth={self.depth_task_queue.qsize()}, box={list(self.global_box)}")
                    frames = 0
                    t1 = now

                    # 动态FPS调节
                    if now - last_fps_adjust_time > fps_adjust_interval:
                        queue_size = max(self.pose_task_queue.qsize(), self.depth_task_queue.qsize())
                        if queue_size > qsize_increase_threshold and target_fps > min_fps:
                            target_fps = max(min_fps, target_fps - fps_adjust_step)
                            frame_interval = 1.0 / target_fps
                            print(f"[调节] 队列满({queue_size}), 降FPS → {target_fps}")
                        elif queue_size < qsize_decrease_threshold and target_fps < max_fps:
                            target_fps = min(max_fps, target_fps + fps_adjust_step)
                            frame_interval = 1.0 / target_fps
                            print(f"[调节] 队列空({queue_size}), 升FPS → {target_fps}")
                        last_fps_adjust_time = now

                time.sleep(frame_interval)

        except Exception as e:
            traceback.print_exc()

    def get_frame_loop(self):
        """输出帧循环"""
        start_time = time.time()
        last_frame_time = time.time()
        frame_count = 0
        total_time = 0.0

        # 保存原始相机参数
        stereo_rectify.cali_cx = stereo_rectify.cx
        stereo_rectify.cali_cy = stereo_rectify.cy

        while True:
            if not self.pose_result_queue.empty() and not self.depth_result_queue.empty():
                kps_dict_2d, kps_2d_list, box, job_id_pose = self.pose_result_queue.get()
                disp, rectified_left, job_box, job_id_depth = self.depth_result_queue.get()

                # 合并视差图
                disp_final = self.merge_disparity_map(disp, job_box)

                # 处理3D关键点（检查视差图有效性）
                if disp_final is None or disp_final.size == 0:
                    print("警告: 视差图为空，跳过此帧处理")
                    continue

                kps_2d_list, pts_3d_dict, pts_3d_list, disp = process_3dkpt_depth(kps_2d_list, disp_final)

                # 更新全局box
                self.update_global_box(box)

                # 发送数据
                if self.sender and box is not None and len(box) == 4:
                    self.send_data_safe(kps_dict_2d, kps_2d_list, pts_3d_dict, pts_3d_list,
                                      rectified_left, disp, box)

                # 统计输出
                self.output_performance_stats(start_time, last_frame_time, frame_count,
                                            total_time, job_id_pose, job_id_depth)

                last_frame_time = time.time()
                frame_count += 1
            else:
                time.sleep(0.005)

    def merge_disparity_map(self, disp, job_box):
        """合并视差图到全图"""
        disp_final = np.ones((480, 640), dtype=np.float32)

        if job_box is None or len(job_box) != 4:
            return disp_final

        x1, y1, x2, y2 = job_box
        has_valid_box = x2 > x1 and y2 > y1 and x2 > 0 and y2 > 0

        if has_valid_box:
            try:
                # 如果是crop结果，合并到全图
                if disp.shape[0] != 480 or disp.shape[1] != 640:
                    disp_final[y1:y2, x1:x2] = disp
                    # 调整相机参数
                    stereo_rectify.cali_cx = stereo_rectify.cx
                    stereo_rectify.cali_cy = stereo_rectify.cy
                else:
                    disp_final = disp
            except Exception as e:
                print(f"视差图合并失败: {e}")
                # 恢复原始参数
                stereo_rectify.cali_cx = stereo_rectify.cx
                stereo_rectify.cali_cy = stereo_rectify.cy
        else:
            # 恢复原始参数
            stereo_rectify.cali_cx = stereo_rectify.cx
            stereo_rectify.cali_cy = stereo_rectify.cy

        return disp_final

    def update_global_box(self, box):
        """更新全局box"""
        if box is not None and len(box) == 4:
            box_valid = all(isinstance(x, (int, float)) for x in box) and box[2] > box[0] and box[3] > box[1]
            if box_valid:
                with self.global_box.get_lock():
                    for i in range(4):
                        self.global_box[i] = int(box[i])
            else:
                print(f"收到无效box: {box}")

    def send_data_safe(self, kps_dict_2d, kps_2d_list, pts_3d_dict, pts_3d_list, rectified_left, disp, box):
        """安全发送数据"""
        try:
            # 验证图像有效性
            if rectified_left is None or rectified_left.size == 0:
                print("警告: rectified_left为空，跳过发送")
                return

            x1, y1, x2, y2 = np.array(box, dtype=np.int32)
            rectified_left = cv2.rectangle(rectified_left.copy(), (x1, y1), (x2, y2), (0, 255, 0), 2)

            # 生成右图（如果为None则使用左图副本）
            right_image = disp.copy() if True else rectified_left  # 暂时使用相同图像

            self.sender.send_data(kps_dict_2d, kps_2d_list, pts_3d_dict, pts_3d_list,
                                rectified_left, right_image)
        except Exception as e:
            print(f"发送数据失败: {e}")
            import traceback
            traceback.print_exc()

    def output_performance_stats(self, start_time, last_frame_time, frame_count, total_time, job_id_pose, job_id_depth):
        """输出性能统计"""
        now = time.time()
        elapsed = now - last_frame_time
        total_time += elapsed

        if now - start_time >= 5.0:
            print(now - start_time,'now - start_time')
            fps = frame_count / (now - start_time)
            avg_time = total_time / frame_count if frame_count > 0 else 0
            print(f"[输出] FPS: {fps:.2f}, 平均耗时: {avg_time*1000:.2f}ms, "
                  f"队列: {self.pose_result_queue.qsize()}, {self.depth_result_queue.qsize()}, "
                  f"帧ID: {job_id_pose}, {job_id_depth}, box={list(self.global_box)}")

            return now, 0.0, 0
        return start_time, total_time, frame_count

    def run(self):
        """运行管道"""
        try:
            # 启动输入进程
            put_process = Process(target=self.put_frame_loop)
            put_process.start()

            # 主进程处理输出
            self.get_frame_loop()

            # 等待子进程
            put_process.join()

        except KeyboardInterrupt:
            print("检测到 Ctrl+C, 准备退出...")
            terminate_all()
        finally:
            print("主进程结束，资源即将释放")


# 辅助函数
def min_depth(depth: np.ndarray, x_point: int, y_point: int, val_range=5):
    """获取最小深度值"""
    x_point = max(0, min(x_point, 639))
    y_point = max(0, min(y_point, 479))

    x_max = min(x_point + val_range, 639)
    x_min = max(x_point - val_range, 0)
    y_max = min(y_point + val_range, 479)
    y_min = max(y_point - val_range, 0)

    dep_img = depth[y_min:y_max, x_min:x_max]
    valid = dep_img > 0

    if np.any(valid):
        return np.min(dep_img[valid])
    return 0


def depth_points_2_world_points_pixel(pts_2d, depth):
    """深度点转世界坐标"""
    depth_points_with_depth, pts_length = [], len(pts_2d)

    for i in range(pts_length):
        x, y = pts_2d[i]
        depth_value = depth[int(y), int(x)]

        if depth_value > 0:
            depth_points_with_depth.append([y, x, depth_value])
        else:
            min_depth_value = min_depth(depth, int(x), int(y))
            depth_points_with_depth.append([y, x, min_depth_value])

    depth_points_with_depth = np.array(depth_points_with_depth)

    # 深度修正逻辑
    hip_index = 19
    hip_depth = depth_points_with_depth[hip_index][2]

    for i, point in enumerate(depth_points_with_depth):
        if i == hip_index:
            continue
        current_depth = point[2]
        if abs(current_depth - hip_depth) <= 500:
            continue
        new_depth = min_depth(depth, *pts_2d[i], val_range=10)
        depth_points_with_depth[i][2] = new_depth if (new_depth > 0 and abs(new_depth - hip_depth) <= 500) else hip_depth

    # 特殊点修正
    if abs(depth_points_with_depth[17][2] - depth_points_with_depth[19][2]) > 500:
        depth_points_with_depth[17][2] = min_depth(depth, pts_2d[17][0], pts_2d[17][1], 10)
        if depth_points_with_depth[17][2] == 0:
            depth_points_with_depth[17][2] = depth_points_with_depth[0][2]

    if abs(depth_points_with_depth[4][2] - depth_points_with_depth[3][2]) > 500:
        depth_points_with_depth[4][2] = min_depth(depth, pts_2d[4][0], pts_2d[4][1], 10)
        if depth_points_with_depth[4][2] == 0:
            depth_points_with_depth[4][2] = depth_points_with_depth[3][2]

    # 转换为3D世界坐标
    keypoints3d = np.zeros((pts_length, 3), dtype=np.float32)
    for i, (y, x, depth_value) in enumerate(depth_points_with_depth):
        X = (x - stereo_rectify.cali_cx) * depth_value / stereo_rectify.fx
        Y = (y - stereo_rectify.cali_cy) * depth_value / stereo_rectify.fy
        Z = depth_value
        world_point = np.array([Z, -X, -Y]) * 0.001
        keypoints3d[i] = world_point

    return keypoints3d


def pts3d_to_dict(pts3d):
    """3D点转字典"""
    pts3d_dict = {}
    for i in range(pts3d.shape[0]):
        pts3d_dict[list(BODY_PARTS.keys())[i]] = pts3d[i]
    return pts3d_dict


def process_3dkpt_depth(kps_2d_list, disp):
    """处理3D关键点深度"""
    disp = fill_disp_near_keypoints(disp, kps_2d_list)
    depth = stereo_rectify.disparity_to_depth(disp, stereo_rectify.fx, stereo_rectify.baseline)
    pts_3d_list = depth_points_2_world_points_pixel(kps_2d_list, depth)
    pts_3d_list = EMAFilter_ins(pts_3d_list)
    pts_3d_dict = pts3d_to_dict(pts_3d_list)

    return kps_2d_list, pts_3d_dict, pts_3d_list, disp


def terminate_all():
    """终止所有子进程"""
    import psutil
    current = psutil.Process()
    children = current.children(recursive=True)
    for child in children:
        print(f"正在终止子进程: {child.pid}")
        child.terminate()
        child.wait()


if __name__ == "__main__":
    pipeline = StereoPipeline()
    pipeline.run()