import sys
from pathlib import Path
PATH_ROOT = Path(__file__).parents[4]
sys.path.append(str(PATH_ROOT))
PATH_LOGS = PATH_ROOT / "manipulation/logs"
PATH_LOGS.mkdir(exist_ok=True)

import cv2
import numpy as np
from tqdm import tqdm
import pyrealsense2 as rs
from manipulation.scripts.detection.cameras import BaseCamera
from manipulation.scripts.detection.commans.utils import fill_holes_inpaint
from multiprocessing import Process, Queue

import subprocess
import time

class D435Camera(BaseCamera):
    dist_coefs = {  # D435 kuavo robot (TODO: 标定畸变参数)
        '1920x1080': [0, 0, 0, 0],
        '1280x720': [0, 0, 0, 0],
        '640x480': [0, 0, 0, 0]
    }

    def __init__(self, rgb_width=640, rgb_height=480, fps=15, only_rgb=False, depth_width=640, depth_height=480, **kwargs):
        super().__init__(rgb_width, rgb_height, fps, only_rgb, depth_width, depth_height, **kwargs)
        self.pipeline = rs.pipeline()
        self.config = rs.config()
        self.config.enable_stream(rs.stream.color, rgb_width, rgb_height, rs.format.bgr8, fps)
        if not only_rgb:
            self.config.enable_stream(rs.stream.depth, depth_width, depth_height, rs.format.z16, fps)
        print("Building D435 camera frame pipline...", flush=True, end='')
        self.profile = self.pipeline.start(self.config)
        print("OK")
        self.align = rs.align(rs.stream.color)
        color_stream = self.profile.get_stream(rs.stream.color)
        self.intr = color_stream.as_video_stream_profile().get_intrinsics()
        self.K_coefs = [self.intr.fx, self.intr.fy, self.intr.ppx, self.intr.ppy]
        self.K = np.array([
            [self.K_coefs[0], 0, self.K_coefs[2]],
            [0, self.K_coefs[1], self.K_coefs[3]],
            [0, 0, 1],
        ], np.float32)
        self.dist = self.dist_coefs[f"{rgb_width}x{rgb_height}"]
        print("Waiting for camera start...", end='', flush=True)
        for i in range(fps*3):
            if i % fps == 0:
                print(f"{3-i//fps}s...", end='', flush=True)
            self.get_frame()
        print("OK")
    
    def get_frame(self) -> tuple[np.ndarray, np.ndarray]:
        while True:
            frames = self.pipeline.wait_for_frames()
            if not self.only_rgb:
                aligned_frames = self.align.process(frames)
            else:
                aligned_frames = frames

            color_frame = aligned_frames.get_color_frame()
            rgb_img = np.asanyarray(color_frame.get_data())
            if not self.only_rgb:
                depth_frame = aligned_frames.get_depth_frame()
                depth_img = np.asanyarray(depth_frame.get_data())
            else:
                depth_img = None

            return rgb_img, depth_img
    
    
def save_frame_data(queue: Queue):
    """ 为避免I/O占用主线程视频流时间, 放到守护进程中 """
    path_files = PATH_LOGS / "files" / time.strftime("%Y%m%d_%H%M%S")
    path_files.mkdir(exist_ok=True, parents=True)
    count = 0
    while True:
        data = queue.get()
        count += 1
        timestamp = time.strftime("%Y%m%d_%H%M%S_") + str(count)
        np.savez_compressed(path_files / f"{timestamp}.npz", **data)
        cv2.imwrite(str(path_files / f"{timestamp}_rgb.jpg"), data['rgb'])

    
def debug_camera(rgb_width=640, rgb_height=480, depth_width=640, depth_height=480, fps=15, only_rgb=False, save_video=False, save_files=False, show=True):
    if (save_files or save_video) and fps > 15:
        print("[WARNING] save_files or save_video will slow the video save, try to make fps lower")
    last_save_time = time.time()
    camera = D435Camera(rgb_width=rgb_width, rgb_height=rgb_height, depth_width=depth_width, depth_height=depth_height, fps=fps, only_rgb=only_rgb)
    fps_bar = tqdm()
    avg_fps, frame_count = 0, 0
    if save_video:
        path_video = PATH_LOGS / time.strftime("%Y%m%d_%H%M%S.avi")
        writer = cv2.VideoWriter(str(path_video), cv2.VideoWriter_fourcc(*'XVID'), fps=fps, frameSize=(rgb_width, rgb_height))
        print("path_video location:", path_video)
    if save_files:
        save_queue = Queue(maxsize=1)
        save_process = Process(target=save_frame_data, args=(save_queue,), daemon=True)
        save_process.start()
    while True:
        start_time = time.time()
        rgb_img, depth_img = camera.get_frame()

        if depth_img is not None:
            # filled_depth_img = fill_holes_inpaint(depth_img)  # !!注意!!补全空洞算法非常慢用时0.2s, 会极大影响视频流fps
            depth_normalized = cv2.normalize(depth_img, None, 0, 255, cv2.NORM_MINMAX).astype(np.uint8)
            # filled_depth_normalized = cv2.normalize(filled_depth_img, None, 0, 255, cv2.NORM_MINMAX).astype(np.uint8)
        
        if save_files and time.time() - last_save_time > 1:
            last_save_time = time.time()
            data = {
                'rgb': rgb_img,
                'depth': depth_img,
                # 'filled_depth': filled_depth_img,
            }
            save_queue.put(data)

        if save_video:
            writer.write(rgb_img)

        if show:
            cv2.imshow("debug rgb", rgb_img)
            if depth_img is not None:
                # print(rgb_img.shape, depth_img.min(), depth_img.max())
                cv2.imshow("debug depth", depth_normalized)
                # cv2.imshow("debug filled depth", filled_depth_normalized)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        time_used = time.time() - start_time
        frame_count += 1
        avg_fps += ((1 / time_used) - avg_fps) / frame_count
        fps_bar.set_description(f"avg_fps={avg_fps:.2f}")
    
    writer.release()
    fps_bar.close()
    
if __name__ == '__main__':
    debug_camera(rgb_width=1920, rgb_height=1080, depth_width=1280, depth_height=720, fps=30, only_rgb=False, save_video=False, save_files=False, show=True)
    # debug_camera(rgb_width=1920, rgb_height=1080, depth_width=1280, depth_height=720, fps=15, only_rgb=False, save_video=True, save_files=True, show=True)
    # debug_camera(save_video=False, save_files=False)
