import cv2
import numpy as np
import zmq
import time
import json
import threading
import sys
import socket
from queue import Queue
import os
import traceback
from datetime import datetime
import pyrealsense2 as rs


class RealSenseCamera(object):
    def __init__(self, img_shape, fps, serial_number=None, enable_depth=False) -> None:
        """
        img_shape: [height, width]
        serial_number: serial number
        """
        self.img_shape = img_shape
        self.fps = fps
        self.serial_number = serial_number
        self.enable_depth = enable_depth

        align_to = rs.stream.color
        self.align = rs.align(align_to)
        self.init_realsense()

    def init_realsense(self):

        self.pipeline = rs.pipeline()
        config = rs.config()
        if self.serial_number is not None:
            config.enable_device(self.serial_number)

        config.enable_stream(rs.stream.color, self.img_shape[1], self.img_shape[0], rs.format.bgr8, self.fps)

        if self.enable_depth:
            config.enable_stream(rs.stream.depth, self.img_shape[1], self.img_shape[0], rs.format.z16, self.fps)

        profile = self.pipeline.start(config)
        self._device = profile.get_device()
        if self._device is None:
            print('[Image Server] pipe_profile.get_device() is None .')
        if self.enable_depth:
            assert self._device is not None
            depth_sensor = self._device.first_depth_sensor()
            self.g_depth_scale = depth_sensor.get_depth_scale()

        self.intrinsics = profile.get_stream(rs.stream.color).as_video_stream_profile().get_intrinsics()

    def get_frame(self):
        frames = self.pipeline.wait_for_frames()
        aligned_frames = self.align.process(frames)
        color_frame = aligned_frames.get_color_frame()

        if self.enable_depth:
            depth_frame = aligned_frames.get_depth_frame()

        if not color_frame:
            return None

        color_image = np.asanyarray(color_frame.get_data())
        # color_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
        depth_image = np.asanyarray(depth_frame.get_data()) if self.enable_depth else None
        return color_image, depth_image

    def release(self):
        self.pipeline.stop()


class OpenCVCamera():
    def __init__(self, device_id, img_shape, fps,type='head'):
        """
        decive_id: /dev/video* or *
        img_shape: [height, width]
        """
        self.id = device_id
        self.type = type
        self.fps = fps
        self.img_shape = img_shape
        self.cap = cv2.VideoCapture(self.id, cv2.CAP_V4L2)
        self.cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter.fourcc('M', 'J', 'P', 'G'))
        if self.type == 'head':
            self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
            self.cap.set(cv2.CAP_PROP_FRAME_WIDTH,  1280)
        elif self.type == 'wrist':
            self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.img_shape[0])
            self.cap.set(cv2.CAP_PROP_FRAME_WIDTH,  self.img_shape[1])
        self.cap.set(cv2.CAP_PROP_FPS, self.fps)

        # Test if the camera can read frames
        if not self._can_read_frame():
            print(f"[Image Server] Camera {self.id} Error: Failed to initialize the camera or read frames. Exiting...")
            self.release()

    def _can_read_frame(self):
        success, _ = self.cap.read()
        return success

    def release(self):
        self.cap.release()

    def get_frame(self):
        ret, color_image = self.cap.read()
        if not ret:
            return None
        return color_image


class ImageServer:
    def __init__(self, config, port = 5555, Unit_Test = False):
        """
        config example1:
        {
            'fps':30                                                          # frame per second
            'head_camera_type': 'opencv',                                     # opencv or realsense
            'head_camera_image_shape': [480, 1280],                           # Head camera resolution  [height, width]
            'head_camera_id_numbers': [0],                                    # '/dev/video0' (opencv)
            'wrist_camera_type': 'realsense', 
            'wrist_camera_image_shape': [480, 640],                           # Wrist camera resolution  [height, width]
            'wrist_camera_id_numbers': ["218622271789", "241222076627"],      # realsense camera's serial number
        }

        config example2:
        {
            'fps':30                                                          # frame per second
            'head_camera_type': 'realsense',                                  # opencv or realsense
            'head_camera_image_shape': [480, 640],                            # Head camera resolution  [height, width]
            'head_camera_id_numbers': ["218622271739"],                       # realsense camera's serial number
            'wrist_camera_type': 'opencv', 
            'wrist_camera_image_shape': [480, 640],                           # Wrist camera resolution  [height, width]
            'wrist_camera_id_numbers': [0,1],                                 # '/dev/video0' and '/dev/video1' (opencv)
        }

        If you are not using the wrist camera, you can comment out its configuration, like this below:
        config:
        {
            'fps':30                                                          # frame per second
            'head_camera_type': 'opencv',                                     # opencv or realsense
            'head_camera_image_shape': [480, 1280],                           # Head camera resolution  [height, width]
            'head_camera_id_numbers': [0],                                    # '/dev/video0' (opencv)
            #'wrist_camera_type': 'realsense', 
            #'wrist_camera_image_shape': [480, 640],                           # Wrist camera resolution  [height, width]
            #'wrist_camera_id_numbers': ["218622271789", "241222076627"],      # serial number (realsense)
        }
        """
        print(config)
        self.fps = config.get('fps', 30)
        
        # 检查配置中是否有头部摄像头
        self.head_camera_type = config.get('head_camera_type', None)
        self.head_image_shape = config.get('head_camera_image_shape', [480, 640])      # (height, width)
        self.head_camera_id_numbers = config.get('head_camera_id_numbers', None)

        # 检查配置中是否有腕部摄像头
        self.wrist_camera_type = config.get('wrist_camera_type', None)
        self.wrist_image_shape = config.get('wrist_camera_image_shape', [480, 640])    # (height, width)
        self.wrist_camera_id_numbers = config.get('wrist_camera_id_numbers', None)

        self.port = port
        self.Unit_Test = Unit_Test

        # 初始化头部摄像头（如果在配置中指定）
        self.head_cameras = []
        if self.head_camera_type and self.head_camera_id_numbers:
            if self.head_camera_type == 'opencv':
                for device_id in self.head_camera_id_numbers:
                    camera = OpenCVCamera(device_id=device_id, img_shape=self.head_image_shape, fps=self.fps,type='head')
                    self.head_cameras.append(camera)
            elif self.head_camera_type == 'realsense':
                for serial_number in self.head_camera_id_numbers:
                    camera = RealSenseCamera(img_shape=self.head_image_shape, fps=self.fps, serial_number=serial_number)
                    self.head_cameras.append(camera)
            else:
                print(f"[Image Server] Unsupported head_camera_type: {self.head_camera_type}")
        else:
            print("[Image Server] No head cameras configured.")

        # Initialize wrist cameras if provided
        self.wrist_cameras = []
        if self.wrist_camera_type and self.wrist_camera_id_numbers:
            if self.wrist_camera_type == 'opencv':
                for device_id in self.wrist_camera_id_numbers:
                    camera = OpenCVCamera(device_id=device_id, img_shape=self.wrist_image_shape, fps=self.fps,type='wrist')
                    self.wrist_cameras.append(camera)
            elif self.wrist_camera_type == 'realsense':
                for serial_number in self.wrist_camera_id_numbers:
                    camera = RealSenseCamera(img_shape=self.wrist_image_shape, fps=self.fps, serial_number=serial_number)
                    self.wrist_cameras.append(camera)
            else:
                print(f"[Image Server] Unsupported wrist_camera_type: {self.wrist_camera_type}")

        # Set ZeroMQ context and socket
        self.context = zmq.Context()
        self.socket = self.context.socket(zmq.PUB)
        self.socket.bind(f"tcp://*:{self.port}")
        # 设置ZeroMQ socket选项
        # 设置发送缓冲区大小为4MB
        self.socket.setsockopt(zmq.SNDBUF, 4 * 1024 * 1024)
        # 设置高水位线为1000条消息
        self.socket.setsockopt(zmq.SNDHWM, 1000)
        # 设置TCP_NODELAY禁用Nagle算法
        self.socket.setsockopt(zmq.TCP_NODELAY, 1) if hasattr(zmq, 'TCP_NODELAY') else None
        # 设置ZMQ_IMMEDIATE选项为1，确保消息立即发送
        self.socket.setsockopt(zmq.IMMEDIATE, 1) if hasattr(zmq, 'IMMEDIATE') else None
        # 设置ZMQ_LINGER为0，确保socket关闭时不等待未发送的消息
        self.socket.setsockopt(zmq.LINGER, 0)
        print(f"[Image Server] Image server has started, waiting for client connections... on port {self.port}")

        if self.Unit_Test:
            self._init_performance_metrics()

        for cam in self.head_cameras:
            if isinstance(cam, OpenCVCamera):
                print(f"[Image Server] Head camera {cam.id} resolution: {cam.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)} x {cam.cap.get(cv2.CAP_PROP_FRAME_WIDTH)}")
            elif isinstance(cam, RealSenseCamera):
                print(f"[Image Server] Head camera {cam.serial_number} resolution: {cam.img_shape[0]} x {cam.img_shape[1]}")
            else:
                print("[Image Server] Unknown camera type in head_cameras.")

        for cam in self.wrist_cameras:
            if isinstance(cam, OpenCVCamera):
                print(f"[Image Server] Wrist camera {cam.id} resolution: {cam.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)} x {cam.cap.get(cv2.CAP_PROP_FRAME_WIDTH)}")
            elif isinstance(cam, RealSenseCamera):
                print(f"[Image Server] Wrist camera {cam.serial_number} resolution: {cam.img_shape[0]} x {cam.img_shape[1]}")
            else:
                print("[Image Server] Unknown camera type in wrist_cameras.")

    def _init_performance_metrics(self):
        self.frame_count = 0  # Total frames sent
        self.time_window = 1.0  # Time window for FPS calculation (in seconds)
        self.frame_times = deque()  # Timestamps of frames sent within the time window
        self.start_time = time.time()  # Start time of the streaming

    def _update_performance_metrics(self, current_time):
        # Add current time to frame times deque
        self.frame_times.append(current_time)
        # Remove timestamps outside the time window
        while self.frame_times and self.frame_times[0] < current_time - self.time_window:
            self.frame_times.popleft()
        # Increment frame count
        self.frame_count += 1

    def _print_performance_metrics(self, current_time):
        if self.frame_count % 30 == 0:
            elapsed_time = current_time - self.start_time
            real_time_fps = len(self.frame_times) / self.time_window
            print(f"[Image Server] Real-time FPS: {real_time_fps:.2f}, Total frames sent: {self.frame_count}, Elapsed time: {elapsed_time:.2f} sec")

    def _close(self):
        for cam in self.head_cameras:
            cam.release()
        for cam in self.wrist_cameras:
            cam.release()
        self.socket.close()
        self.context.term()
        print("[Image Server] The server has been closed.")

    def send_process(self):
        # 添加帧跳过计数器和配置
        frame_counter = 0
        skip_frames = 1  # 每隔1帧发送一次，可以根据网络状况动态调整
        
        try:
            while True:
                # 帧跳过逻辑
                frame_counter += 1
                if frame_counter % (skip_frames + 1) != 0:
                    # 跳过这一帧，但仍然从摄像头读取以避免缓冲区积累
                    for cam in self.head_cameras + self.wrist_cameras:
                        if hasattr(cam, 'get_frame'):
                            _ = cam.get_frame()
                    time.sleep(1.0 / (self.fps * 2))  # 短暂休眠，减少CPU使用
                    continue
                
                # 处理头部摄像头（如果有）
                if self.head_cameras:
                    head_frames = []
                    for cam in self.head_cameras:
                        if self.head_camera_type == 'opencv':
                            color_image = cam.get_frame()
                            if color_image is None:
                                print("[Image Server] Head camera frame read is error.")
                                break
                        elif self.head_camera_type == 'realsense':
                            color_image, depth_iamge = cam.get_frame()
                            if color_image is None:
                                print("[Image Server] Head camera frame read is error.")
                                break
                        color_image = cv2.resize(color_image, (self.head_image_shape[1], self.head_image_shape[0]))
                        head_frames.append(color_image)
                    if len(head_frames) != len(self.head_cameras):
                        break
                    head_color = cv2.hconcat(head_frames) if head_frames else None
                else:
                    head_color = None
                
                # 处理腕部摄像头（如果有）
                if self.wrist_cameras:
                    wrist_frames = []
                    for cam in self.wrist_cameras:
                        if self.wrist_camera_type == 'opencv':
                            color_image = cam.get_frame()
                            if color_image is None:
                                print("[Image Server] Wrist camera frame read is error.")
                                break
                        elif self.wrist_camera_type == 'realsense':
                            color_image, depth_iamge = cam.get_frame()
                            if color_image is None:
                                print("[Image Server] Wrist camera frame read is error.")
                                break
                        wrist_frames.append(color_image)
                    if len(wrist_frames) != len(self.wrist_cameras):
                        break
                    wrist_color = cv2.hconcat(wrist_frames) if wrist_frames else None
                else:
                    wrist_color = None

                # 根据可用的摄像头图像决定最终输出
                if head_color is not None and wrist_color is not None:
                    # 同时有头部和腕部摄像头
                    full_color = cv2.hconcat([head_color, wrist_color])
                elif head_color is not None:
                    # 只有头部摄像头
                    full_color = head_color
                elif wrist_color is not None:
                    # 只有腕部摄像头
                    full_color = wrist_color
                else:
                    # 没有可用的摄像头图像
                    print("[Image Server] No camera frames available.")
                    break

                ret, buffer = cv2.imencode('.jpg', full_color)
                if not ret:
                    print("[Image Server] Frame imencode is failed.")
                    continue

                jpg_bytes = buffer.tobytes()

                if self.Unit_Test:
                    # add header info
                    timestamp = time.time()
                    frame_id = self.frame_count
                    header = struct.pack('dI', timestamp, frame_id)  # 8-byte double, 4-byte unsigned int
                    message = header + jpg_bytes
                else:
                    message = jpg_bytes

                self.socket.send(message)

                if self.Unit_Test:
                    current_time = time.time()
                    self._update_performance_metrics(current_time)
                    self._print_performance_metrics(current_time)

        except KeyboardInterrupt:
            print("[Image Server] Interrupted by user.")
        finally:
            self._close()


if __name__ == "__main__":
    config = {
        'fps': 30,
        'head_camera_type': 'opencv',
        'head_camera_image_shape': [480, 640],  # Head camera resolution
        'head_camera_id_numbers': [4],
        'wrist_camera_type': 'opencv',
        'wrist_camera_image_shape': [480, 640],  # Wrist camera resolution
        'wrist_camera_id_numbers': [6, 8],
    }

    # config = {
    #     'fps': 30,
    #     'wrist_camera_type': 'opencv',
    #     'wrist_camera_image_shape': [480, 640],  # Wrist camera resolution
    #     'wrist_camera_id_numbers': [2],  # 只使用摄像头 2，移除摄像头 0
    # }

    server = ImageServer(config, Unit_Test=False)
    server.send_process()