import numpy as np
import cv2
import time
import threading
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from queue import Queue, Empty
from stereo.utils.SGBM import SGBM
from stereo.ppnet.ppnet_rknn_infer_32 import PPnet_Pose_Track_ins, BODY_PARTS
from stereo.utils.stereoRectify_process import stereo_rectify
from stereo.yolov6.yolov6 import model_inference
from stereo.utils.Filter.OneEuroFilter_python import OneEuroFilter
from multiprocessing import Process, Queue as ProcessQueue, Event as ProcessEvent, shared_memory
from multiprocessing.queues import Empty as MpQueueEmpty
import traceback
from stereo.utils.Filter.Filter_disp import fill_disp_near_keypoints
OneEuroFilter_ins = OneEuroFilter(te=0.01)
import psutil
# p = psutil.Process(os.getpid())
# p.cpu_affinity([4])  # 对当前主进程限制 CPU
def set_process_affinity(core_ids):
    try:
        pid = os.getpid()
        os.sched_setaffinity(pid, set(core_ids))  # 支持列表、集合都行
        print(f"[进程 {pid}] 成功绑定到核心: {core_ids}")
    except Exception as e:
        print(f"[进程 {os.getpid()}] 设置亲和性失败: {e}")
cv2.setNumThreads(4)
# cv2.setUseOptimized(True)
# set_process_affinity({6, 7}) 
class ParallelStereoProcessor:
    def __init__(self):
        # Initialize camera and calibration
        self.fx, self.fy, self.cx, self.cy = stereo_rectify.fx, stereo_rectify.fy, stereo_rectify.cx, stereo_rectify.cy
        self.baseline = stereo_rectify.baseline
        # 初始化处理组件
        # self.model_depth = SGBM().create_bm_matcher()
        self.model_depth = SGBM(use_blur=True)
        self.ppnet = PPnet_Pose_Track_ins
        
        # 线程同步
        self.depth_event = ProcessEvent()
        self.pose_event = threading.Event()
        self.depth_result = None
        self.pose_result = None
        self.running = True
        
        # 任务队列
        self.depth_task_queue = ProcessQueue()
        self.pose_task_queue = Queue()
        
        self.disp_shape = (480, 640)  # 你实际的大小
        self.disp_dtype = np.float32
        self.disp_shm = shared_memory.SharedMemory(create=True, size=np.prod(self.disp_shape) * np.dtype(self.disp_dtype).itemsize)

        self.shared_disp_array = np.ndarray((480, 640), dtype=np.float32, buffer=self.disp_shm.buf)
        # 线程
        # self.depth_thread = threading.Thread(target=self.depth_estimation_worker)
        self.pose_thread = threading.Thread(target=self.pose_estimation_worker)
        self.depth_process = Process(target=self.depth_estimation_worker)
        self.depth_process.start()
        # 设置进程运行在大核（例如核心 4-5）
        try:
            os.sched_setaffinity(self.depth_process.pid, {4,5,6,7})  # 假设你的大核是 CPU4 和 CPU5
            print(f"[进程 {self.depth_process.pid}] 成功绑定到核心: {4,5,6,7}")
        except AttributeError:
            print("sched_setaffinity not supported on this platform")
        # self.depth_thread.start()
        self.pose_thread.start()
        
        # 追踪器
        self.frame_count = 0

        self.cap = None

    def min_depth(self,depth: np.ndarray, x_point: int, y_point: int, val_range=5):
        if x_point < 0:
            x_point = 0
        if y_point < 0:
            y_point = 0
        x_max = min((x_point + val_range), 640 - 1)
        x_min = max((x_point - val_range), 0)
        y_max = min((y_point + val_range), 480 - 1)
        y_min = max((y_point - val_range), 0)
        dep_img = depth[y_min:y_max, x_min:x_max]
        valid = dep_img > 0
        if np.any(valid):
            result = np.min(dep_img[valid])
        else:
            result = 0  # 或你想设定的默认值，例如 None 或 np.nan
        return result
    def depth_points_2_world_points_pixel(self, pts_2d, depth):

        depth_points_with_depth, pts_length = [], len(pts_2d)
        for i in range(pts_length):
            x, y = pts_2d[i]
            depth_value = depth[int(y), int(x)]

            if depth_value > 0:
                depth_points_with_depth.append([y, x, depth_value])
            else:
                # 如果深度值为0，则使用最小深度值
                min_depth_value = self.min_depth(depth, int(x), int(y))
                depth_points_with_depth.append([y, x, min_depth_value])
        depth_points_with_depth = np.array(depth_points_with_depth)

        hip_index = 19
        hip_depth = depth_points_with_depth[hip_index][2]

        for i, point in enumerate(depth_points_with_depth):
            if i == hip_index:
                continue  # 跳过对自身的比较
            current_depth = point[2]
            if abs(current_depth - hip_depth) <= 500:
                continue  # 深度差在容忍范围内，无需修正
            # 超出阈值，尝试重估深度
            new_depth = self.min_depth(depth, *pts_2d[i], val_range=10)
            depth_points_with_depth[i][2] = new_depth if (new_depth > 0 and abs(current_depth - hip_depth) <= 500) else hip_depth



        # 如果17头号点深度值和0号点深度值相差太大，则认为17号点深度值为错误
        if abs(depth_points_with_depth[17][2] - depth_points_with_depth[19][2]) > 500:
            depth_points_with_depth[17][2] = self.min_depth(depth, pts_2d[17][0], pts_2d[17][1],10)
            if depth_points_with_depth[17][2] == 0:
                depth_points_with_depth[17][2] = depth_points_with_depth[0][2]
        # 对右耳
        if abs(depth_points_with_depth[4][2] - depth_points_with_depth[3][2]) > 500:
            depth_points_with_depth[4][2] = self.min_depth(depth, pts_2d[4][0], pts_2d[4][1],10)
            if depth_points_with_depth[4][2] == 0:
                depth_points_with_depth[4][2] = depth_points_with_depth[3][2]

        keypoints3d = np.zeros((pts_length, 3), dtype=np.float32)
        for i, (y, x, depth_value) in enumerate(depth_points_with_depth):
            X = (x - self.cx) * depth_value / self.fx 
            Y = (y - self.cy) * depth_value / self.fy 
            Z = depth_value 
            world_point = np.array([Z, -X, -Y]) * 0.001  # 转换为米
            keypoints3d[i] = world_point
        return keypoints3d

    def pts3d_to_dict(self, pts3d):
        pts3d_dict = {}
        for i in range(pts3d.shape[0]):
            pts3d_dict[list(BODY_PARTS.keys())[i]] = pts3d[i]
        return pts3d_dict
    def depth_estimation_worker(self):
        while self.running:
            try:
                left_image, right_image = self.depth_task_queue.get(timeout=0.01)
                t1 = time.time()
                disp = self.model_depth.estimate_depth(left_image, right_image)
                np.copyto(self.shared_disp_array, disp)
                self.depth_event.set()
                # print(f"[Process] disp estimation time: {time.time() - t1:.4f} seconds", disp.shape)
            except MpQueueEmpty:
                continue  # 没任务就等待下次
            except Exception as e:
                traceback.print_exc()  # 打印完整堆栈信息

                print(f"[Process] Depth estimation error: {e}")
                self.depth_event.set()

    def depth_estimation_worker2(self):
        while self.running:
            try:
                left_image, right_image = self.depth_task_queue.get(timeout=0.1)
                self.depth_task_queue.task_done()
                t1 = time.time()
                disp = self.model_depth.estimate_depth(left_image, right_image)
                # print(f"disp estimation time: {time.time() - t1:.4f} seconds")
                # cv2.imwrite('disp2.png', disp)
                # print(Stereo_ins.fx, Stereo_ins.baseline)
                # depth = Stereo_ins.disparity_to_depth(disp, Stereo_ins.fx, Stereo_ins.baseline)
                # cv2.imwrite('depth2.png', depth)
                # cv2.imwrite('depth.png', depth.astype(np.uint16))
                # print('depth_estimation_worker',time.time()-t1)
                self.disp = disp
                # self.depth_result = depth
                self.depth_event.set()
            except Empty:
                continue
            except Exception as e:
                print(f"Depth estimation error: {e}")
                self.depth_event.set()

    def pose_estimation_worker(self):
        while self.running:
            try:
                left_image = self.pose_task_queue.get(timeout=0.1)
                self.pose_task_queue.task_done()
                # Pose estimation
                # t1 = time.time()
                if self.frame_count == 0:
                    left_image = cv2.resize(left_image, (640, 480))
                    boxes= model_inference(left_image)
                    if boxes is None:
                        self.frame_count += 1
                    else:
                        boxes = None
                        self.frame_count =0
                    # if boxes is not None:
                    #     img_1 = draw(left_image, [boxes])
                    #     cv2.imwrite('result.jpg', img_1)
                    kps_dict_2d, kps_2d_list = self.ppnet(left_image, boxes)
                    self.frame_count += 1
                else:
                    kps_dict_2d, kps_2d_list = self.ppnet(left_image)
                # print('pose_estimation_worker',time.time()-t1)
                self.pose_result = (kps_dict_2d, kps_2d_list)
                self.pose_event.set()

            except Empty:
                continue
            except Exception as e:
                traceback.print_exc()
                print(f"Pose estimation error: {e}")
            self.pose_event.set()

    def process_frame(self, left_image, right_image):
        # 重置事件
        self.depth_event.clear()
        self.pose_event.clear()
        self.depth_result = None
        self.pose_result = None
        # 将任务放入队列
        self.depth_task_queue.put((left_image, right_image))
        self.pose_task_queue.put(left_image)
        

        # 等待两个事件同时完成
        t1 = time.time()
        # 使用事件的wait的同时等待：
        while not (self.pose_event.is_set() and self.depth_event.is_set()):
            time.sleep(0.001)  # 或用别的非阻塞等待方式
        # print(f"Pose and depth processing time: {time.time() - t1:.4f} seconds")
        try:
            t1 = time.time()
            # 处理结果
            self.disp = np.array(self.shared_disp_array, copy=True)
            if self.disp is not None and self.pose_result is not None:

                kps_dict_2d, kps_2d_list = self.pose_result
                depth = self.depth_result
                
                
                # 转换2D点到3D点
                # self.disp = fill_disp_near_keypoints(self.disp, kps_2d_list)
                # print(f"Processing 2d points to 3D pointstime  aaaa: {time.time() - t1:.4f} seconds")
                depth = stereo_rectify.disparity_to_depth(self.disp, stereo_rectify.fx, stereo_rectify.baseline)
                # print(f"Processing 2d points to 3D pointstime  aaaa: {time.time() - t1:.4f} seconds")
                t1 = time.time()
                pts_3d_list = self.depth_points_2_world_points_pixel(kps_2d_list, depth)
                pts_3d_list = OneEuroFilter_ins(pts_3d_list, 0.01)

                pts_3d_dict = self.pts3d_to_dict(pts_3d_list) 
                # print(f"Processing 2d points to 3D pointstime  bbbb: {time.time() - t1:.4f} seconds")

                return kps_dict_2d, kps_2d_list, pts_3d_dict, pts_3d_list, left_image, self.disp
                # assert self.disp is None, "Disp must be provided"
                # assert self.pose_result is None, "Pose result must be provided"
        except AssertionError as e:
            traceback.print_exc()
            return None
        
    def rectified_image(self, left_image):
        rectified_left = stereo_rectify.apply_rectification_single(
            left_image, self.map1x, self.map1y#, self.k_left, self.dist_left
        )
        rectified_left = cv2.resize(rectified_left, (640, 480))
        return rectified_left
    def stop(self):
        self.running = False
        # if self.depth_thread and self.depth_thread.is_alive():
        #     self.depth_thread.join()
        # if self.pose_thread and self.pose_thread.is_alive():
        #     self.pose_thread.join()


    
    def get_video_imgs(self):
        if self.cap == None:
            video_path = '/home/orangepi/sjh/SGBM_port_sjh/data/rectified_video_ai_425_10_mjpg.avi'
            self.cap = cv2.VideoCapture(video_path)
        ret, frame = self.cap.read()
        if not ret:
            return None, None  # 视频结束或读取失败
        rectified_left = frame[:frame.shape[0]//2, :]
        rectified_right = frame[frame.shape[0]//2:, :]
        rectified_left = cv2.resize(rectified_left, (640, 480))
        rectified_right = cv2.resize(rectified_right, (640, 480))
        
        # cv2.imwrite('left.png', rectified_left)
        # cv2.imwrite('right.png', rectified_right)
        return rectified_left, rectified_right
    def get_no_rectified_video_imgs(self):
        if self.cap == None:
            video_path = "data/a0_merged5.avi"
            self.cap = cv2.VideoCapture(video_path)
        ret, frame = self.cap.read()
        if not ret:
            return None, None  # 视频结束或读取失败
        rectified_left = frame[:frame.shape[0]//2, :]
        rectified_right = frame[frame.shape[0]//2:, :]
        rectified_left, rectified_right = stereo_rectify.apply_rectification(rectified_left, rectified_right, stereo_rectify.map1x, stereo_rectify.map1y, stereo_rectify.map2x, stereo_rectify.map2y)
        rectified_left = cv2.resize(rectified_left, (640, 480))
        rectified_right = cv2.resize(rectified_right, (640, 480))
        # cv2.imwrite('left.png', rectified_left)
        # cv2.imwrite('right.png', rectified_right)
        return rectified_left,rectified_right
    def get_image_imgs(self):
        rectified_left = cv2.imread('./data/im0.png')
        rectified_right = cv2.imread('./data/im1.png')
        # rectified_left, rectified_right = stereo_rectify.apply_rectification(rectified_left, rectified_right, self.map1x, self.map1y, self.map2x, self.map2y)
        rectified_left = cv2.resize(rectified_left, (640, 480))
        rectified_right = cv2.resize(rectified_right, (640, 480))
        return rectified_left, rectified_right

sender_flag = True
def main():
    processor = ParallelStereoProcessor()
    if sender_flag:
        from stereo.utils.stereo_tcp_server import StereoDataSender
        sender = StereoDataSender()
    
    try:
        while True:

            left_image, right_image = processor.get_image_imgs()
            results = processor.process_frame(left_image, right_image)
            if results is not None:
                kps_dict_2d, kps_2d_list, pts_3d_dict, pts_3d_list, left_image, right_image = results
                if sender_flag:
                    sender.send_data(kps_dict_2d, kps_2d_list, pts_3d_dict, pts_3d_list, left_image, right_image)
            # time.sleep(500000000000000)
            
    except KeyboardInterrupt:
        print("Stopping processor...")
        processor.stop()
        if sender_flag:
            sender.stop()

if __name__ == "__main__":
    main() 