#!/usr/bin/env python3
"""
VideoStreamProcessor ROS2 Node

This node handles video stream reception from ESP32 cameras, processes the streams,
and publishes image data to ROS2 topics for vision analysis.

Features:
- RTSP and HTTP stream reception
- Image format conversion and preprocessing
- ROS2 topic publishing for camera data
- Frame buffering and synchronization
"""

import rclpy
from rclpy.node import Node
from rclpy.qos import QoSProfile, ReliabilityPolicy, HistoryPolicy, DurabilityPolicy
from sensor_msgs.msg import Image, CameraInfo
from std_msgs.msg import Header
from echokitbot_microros_voice.srv import GetSynchronizedFrame
from cv_bridge import CvBridge
import cv2
import numpy as np
import threading
import time
from typing import Optional, Tuple, Dict, Any
import asyncio
import aiohttp
from urllib.parse import urlparse
import logging


class VideoStreamProcessor(Node):
    """
    ROS2 node for processing video streams from ESP32 cameras.
    
    Handles RTSP and HTTP stream reception, image processing, and ROS2 topic publishing.
    """
    
    def __init__(self):
        super().__init__('video_stream_processor')
        
        # Initialize logging
        self.logger = self.get_logger()
        self.logger.info("Initializing VideoStreamProcessor node")
        
        # Initialize CV bridge for ROS image conversion
        self.cv_bridge = CvBridge()
        
        # Stream configuration
        self.declare_parameters()
        self.load_parameters()
        
        # Initialize QoS profiles
        self.setup_qos_profiles()
        
        # Initialize publishers
        self.setup_publishers()
        
        # Stream processing state
        self.current_frame: Optional[np.ndarray] = None
        self.frame_lock = threading.Lock()
        self.stream_active = False
        self.frame_count = 0
        self.last_frame_time = time.time()
        
        # Frame buffer and synchronization
        self.frame_buffer = []
        self.buffer_lock = threading.Lock()
        self.frame_timestamps = []
        self.sync_enabled = True
        
        # Camera info
        self.camera_info = self.create_camera_info()
        
        # Start stream processing
        self.start_stream_processing()
        
        self.logger.info("VideoStreamProcessor node initialized successfully")
    
    def declare_parameters(self):
        """Declare ROS2 parameters for configuration."""
        # Stream source configuration
        self.declare_parameter('stream_url', 'rtsp://<CAMERA_IP>:8554/stream')
        self.declare_parameter('stream_type', 'rtsp')  # 'rtsp' or 'http'
        self.declare_parameter('http_stream_port', 8080)
        
        # Image processing parameters
        self.declare_parameter('target_width', 640)
        self.declare_parameter('target_height', 480)
        self.declare_parameter('jpeg_quality', 85)
        self.declare_parameter('frame_rate', 15.0)
        
        # Buffer and sync parameters
        self.declare_parameter('frame_buffer_size', 5)
        self.declare_parameter('max_frame_age', 1.0)  # seconds
        self.declare_parameter('enable_frame_sync', True)
        self.declare_parameter('sync_tolerance', 0.1)  # seconds
        
        # Topic names
        self.declare_parameter('image_raw_topic', '/camera/image_raw')
        self.declare_parameter('current_frame_topic', '/vision/current_frame')
        self.declare_parameter('camera_info_topic', '/camera/camera_info')
    
    def load_parameters(self):
        """Load parameters from ROS2 parameter server."""
        self.stream_url = self.get_parameter('stream_url').get_parameter_value().string_value
        self.stream_type = self.get_parameter('stream_type').get_parameter_value().string_value
        self.http_stream_port = self.get_parameter('http_stream_port').get_parameter_value().integer_value
        
        self.target_width = self.get_parameter('target_width').get_parameter_value().integer_value
        self.target_height = self.get_parameter('target_height').get_parameter_value().integer_value
        self.jpeg_quality = self.get_parameter('jpeg_quality').get_parameter_value().integer_value
        self.frame_rate = self.get_parameter('frame_rate').get_parameter_value().double_value
        
        self.frame_buffer_size = self.get_parameter('frame_buffer_size').get_parameter_value().integer_value
        self.max_frame_age = self.get_parameter('max_frame_age').get_parameter_value().double_value
        self.enable_frame_sync = self.get_parameter('enable_frame_sync').get_parameter_value().bool_value
        self.sync_tolerance = self.get_parameter('sync_tolerance').get_parameter_value().double_value
        
        self.image_raw_topic = self.get_parameter('image_raw_topic').get_parameter_value().string_value
        self.current_frame_topic = self.get_parameter('current_frame_topic').get_parameter_value().string_value
        self.camera_info_topic = self.get_parameter('camera_info_topic').get_parameter_value().string_value
        
        self.logger.info(f"Loaded parameters: stream_url={self.stream_url}, type={self.stream_type}")
    
    def setup_qos_profiles(self):
        """Setup QoS profiles for different types of data."""
        # Best effort for real-time image streaming
        self.image_qos = QoSProfile(
            reliability=ReliabilityPolicy.BEST_EFFORT,
            history=HistoryPolicy.KEEP_LAST,
            depth=1,
            durability=DurabilityPolicy.VOLATILE
        )
        
        # Reliable for camera info
        self.info_qos = QoSProfile(
            reliability=ReliabilityPolicy.RELIABLE,
            history=HistoryPolicy.KEEP_LAST,
            depth=1,
            durability=DurabilityPolicy.TRANSIENT_LOCAL
        )
    
    def setup_publishers(self):
        """Initialize ROS2 publishers."""
        self.image_raw_publisher = self.create_publisher(
            Image, 
            self.image_raw_topic, 
            self.image_qos
        )
        
        self.current_frame_publisher = self.create_publisher(
            Image, 
            self.current_frame_topic, 
            self.image_qos
        )
        
        self.camera_info_publisher = self.create_publisher(
            CameraInfo, 
            self.camera_info_topic, 
            self.info_qos
        )
        
        # Timer for publishing camera info
        self.camera_info_timer = self.create_timer(1.0, self.publish_camera_info)
        
        # Service for synchronized frame requests
        self.sync_frame_service = self.create_service(
            GetSynchronizedFrame,
            'get_synchronized_frame',
            self.handle_sync_frame_request
        )
    
    def create_camera_info(self) -> CameraInfo:
        """Create camera info message."""
        camera_info = CameraInfo()
        camera_info.header.frame_id = "camera_link"
        camera_info.width = self.target_width
        camera_info.height = self.target_height
        
        # Basic camera matrix (assuming no calibration data)
        fx = fy = self.target_width  # Rough estimate
        cx = self.target_width / 2.0
        cy = self.target_height / 2.0
        
        camera_info.k = [fx, 0.0, cx, 0.0, fy, cy, 0.0, 0.0, 1.0]
        camera_info.d = [0.0, 0.0, 0.0, 0.0, 0.0]  # No distortion
        camera_info.r = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
        camera_info.p = [fx, 0.0, cx, 0.0, 0.0, fy, cy, 0.0, 0.0, 0.0, 1.0, 0.0]
        
        return camera_info
    
    def start_stream_processing(self):
        """Start video stream processing in a separate thread."""
        if self.stream_type.lower() == 'rtsp':
            self.stream_thread = threading.Thread(target=self.process_rtsp_stream, daemon=True)
        elif self.stream_type.lower() == 'http':
            self.stream_thread = threading.Thread(target=self.process_http_stream, daemon=True)
        else:
            self.logger.error(f"Unsupported stream type: {self.stream_type}")
            return
        
        self.stream_thread.start()
        self.logger.info(f"Started {self.stream_type.upper()} stream processing thread")
    
    def process_rtsp_stream(self):
        """Process RTSP video stream."""
        self.logger.info(f"Connecting to RTSP stream: {self.stream_url}")
        
        cap = cv2.VideoCapture(self.stream_url)
        if not cap.isOpened():
            self.logger.error(f"Failed to open RTSP stream: {self.stream_url}")
            return
        
        # Configure capture properties
        cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.target_width)
        cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.target_height)
        cap.set(cv2.CAP_PROP_FPS, self.frame_rate)
        cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)  # Minimize latency
        
        self.stream_active = True
        frame_interval = 1.0 / self.frame_rate
        
        try:
            while self.stream_active and rclpy.ok():
                ret, frame = cap.read()
                if not ret:
                    self.logger.warning("Failed to read frame from RTSP stream")
                    time.sleep(0.1)
                    continue
                
                # Process and publish frame
                self.process_frame(frame)
                
                # Control frame rate
                time.sleep(frame_interval)
                
        except Exception as e:
            self.logger.error(f"Error in RTSP stream processing: {e}")
        finally:
            cap.release()
            self.stream_active = False
            self.logger.info("RTSP stream processing stopped")
    
    def process_http_stream(self):
        """Process HTTP video stream (MJPEG)."""
        self.logger.info(f"Connecting to HTTP stream: {self.stream_url}")
        
        # Use asyncio for HTTP stream processing
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)
        loop.run_until_complete(self._process_http_stream_async())
    
    async def _process_http_stream_async(self):
        """Async HTTP stream processing."""
        try:
            async with aiohttp.ClientSession() as session:
                async with session.get(self.stream_url) as response:
                    if response.status != 200:
                        self.logger.error(f"HTTP stream error: {response.status}")
                        return
                    
                    self.stream_active = True
                    buffer = b''
                    
                    async for chunk in response.content.iter_chunked(1024):
                        if not self.stream_active or not rclpy.ok():
                            break
                        
                        buffer += chunk
                        
                        # Look for JPEG boundaries
                        while b'\xff\xd8' in buffer and b'\xff\xd9' in buffer:
                            start = buffer.find(b'\xff\xd8')
                            end = buffer.find(b'\xff\xd9', start) + 2
                            
                            if end > start:
                                jpeg_data = buffer[start:end]
                                buffer = buffer[end:]
                                
                                # Decode JPEG
                                frame = cv2.imdecode(
                                    np.frombuffer(jpeg_data, dtype=np.uint8), 
                                    cv2.IMREAD_COLOR
                                )
                                
                                if frame is not None:
                                    self.process_frame(frame)
                            else:
                                break
                    
        except Exception as e:
            self.logger.error(f"Error in HTTP stream processing: {e}")
        finally:
            self.stream_active = False
            self.logger.info("HTTP stream processing stopped")
    
    def process_frame(self, frame: np.ndarray):
        """Process a single frame and publish to ROS topics."""
        try:
            # Resize frame if needed
            if frame.shape[1] != self.target_width or frame.shape[0] != self.target_height:
                frame = cv2.resize(frame, (self.target_width, self.target_height))
            
            current_time = time.time()
            
            # Add to frame buffer with synchronization
            self.add_frame_to_buffer(frame, current_time)
            
            # Update current frame with thread safety
            with self.frame_lock:
                self.current_frame = frame.copy()
                self.frame_count += 1
                self.last_frame_time = current_time
            
            # Convert to ROS Image message
            ros_image = self.cv_bridge.cv2_to_imgmsg(frame, encoding='bgr8')
            ros_image.header = self.create_header()
            
            # Publish to both topics
            self.image_raw_publisher.publish(ros_image)
            self.current_frame_publisher.publish(ros_image)
            
            # Log frame processing stats periodically
            if self.frame_count % 100 == 0:
                fps = self.calculate_fps()
                self.logger.info(f"Processed {self.frame_count} frames, FPS: {fps:.2f}")
                
        except Exception as e:
            self.logger.error(f"Error processing frame: {e}")
    
    def add_frame_to_buffer(self, frame: np.ndarray, timestamp: float):
        """Add frame to buffer with synchronization."""
        with self.buffer_lock:
            # Add frame and timestamp
            self.frame_buffer.append(frame.copy())
            self.frame_timestamps.append(timestamp)
            
            # Maintain buffer size
            while len(self.frame_buffer) > self.frame_buffer_size:
                self.frame_buffer.pop(0)
                self.frame_timestamps.pop(0)
            
            # Remove old frames
            current_time = time.time()
            while (self.frame_timestamps and 
                   current_time - self.frame_timestamps[0] > self.max_frame_age):
                self.frame_buffer.pop(0)
                self.frame_timestamps.pop(0)
    
    def get_synchronized_frame(self, target_timestamp: Optional[float] = None) -> Optional[np.ndarray]:
        """Get a frame synchronized to a specific timestamp."""
        if not self.enable_frame_sync or not target_timestamp:
            return self.get_current_frame()
        
        with self.buffer_lock:
            if not self.frame_buffer or not self.frame_timestamps:
                return None
            
            # Find closest frame to target timestamp
            best_idx = 0
            best_diff = abs(self.frame_timestamps[0] - target_timestamp)
            
            for i, ts in enumerate(self.frame_timestamps):
                diff = abs(ts - target_timestamp)
                if diff < best_diff:
                    best_diff = diff
                    best_idx = i
            
            # Check if within sync tolerance
            if best_diff <= self.sync_tolerance:
                return self.frame_buffer[best_idx].copy()
            
            return None
    
    def calculate_fps(self) -> float:
        """Calculate current FPS based on frame timestamps."""
        with self.buffer_lock:
            if len(self.frame_timestamps) < 2:
                return 0.0
            
            time_span = self.frame_timestamps[-1] - self.frame_timestamps[0]
            if time_span > 0:
                return (len(self.frame_timestamps) - 1) / time_span
            
            return 0.0
    
    def get_buffer_stats(self) -> Dict[str, Any]:
        """Get frame buffer statistics."""
        with self.buffer_lock:
            return {
                'buffer_size': len(self.frame_buffer),
                'max_buffer_size': self.frame_buffer_size,
                'oldest_frame_age': (time.time() - self.frame_timestamps[0]) if self.frame_timestamps else 0,
                'newest_frame_age': (time.time() - self.frame_timestamps[-1]) if self.frame_timestamps else 0,
                'sync_enabled': self.enable_frame_sync,
                'current_fps': self.calculate_fps()
            }
    
    def create_header(self) -> Header:
        """Create a standard ROS header with current timestamp."""
        header = Header()
        header.stamp = self.get_clock().now().to_msg()
        header.frame_id = "camera_link"
        return header
    
    def publish_camera_info(self):
        """Publish camera info periodically."""
        if self.stream_active:
            self.camera_info.header = self.create_header()
            self.camera_info_publisher.publish(self.camera_info)
    
    def handle_sync_frame_request(self, request, response):
        """Handle synchronized frame service requests."""
        try:
            target_timestamp = request.target_timestamp if request.target_timestamp > 0 else None
            
            if target_timestamp:
                frame = self.get_synchronized_frame(target_timestamp)
                actual_timestamp = target_timestamp
            else:
                frame = self.get_current_frame()
                actual_timestamp = self.last_frame_time
            
            if frame is not None:
                # Convert to ROS Image message
                ros_image = self.cv_bridge.cv2_to_imgmsg(frame, encoding='bgr8')
                ros_image.header = self.create_header()
                
                response.success = True
                response.frame = ros_image
                response.actual_timestamp = actual_timestamp
                response.error_message = ""
            else:
                response.success = False
                response.error_message = "No frame available or synchronization failed"
                
        except Exception as e:
            response.success = False
            response.error_message = f"Service error: {str(e)}"
            self.logger.error(f"Error in sync frame service: {e}")
        
        return response
    
    def get_current_frame(self) -> Optional[np.ndarray]:
        """Get the current frame safely."""
        with self.frame_lock:
            if self.current_frame is not None:
                # Check if frame is not too old
                age = time.time() - self.last_frame_time
                if age <= self.max_frame_age:
                    return self.current_frame.copy()
        return None
    
    def convert_frame_format(self, frame: np.ndarray, target_format: str) -> bytes:
        """Convert frame to specified format."""
        if target_format.lower() == 'jpeg':
            encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), self.jpeg_quality]
            result, encoded_img = cv2.imencode('.jpg', frame, encode_param)
            if result:
                return encoded_img.tobytes()
        elif target_format.lower() == 'png':
            result, encoded_img = cv2.imencode('.png', frame)
            if result:
                return encoded_img.tobytes()
        elif target_format.lower() == 'raw':
            return frame.tobytes()
        
        raise ValueError(f"Unsupported format: {target_format}")
    
    def get_stream_stats(self) -> Dict[str, Any]:
        """Get stream processing statistics."""
        stats = {
            'stream_active': self.stream_active,
            'frame_count': self.frame_count,
            'last_frame_time': self.last_frame_time,
            'stream_url': self.stream_url,
            'stream_type': self.stream_type,
            'target_resolution': f"{self.target_width}x{self.target_height}",
            'frame_rate': self.frame_rate
        }
        
        # Add buffer stats
        stats.update(self.get_buffer_stats())
        
        return stats
    
    def stop_stream(self):
        """Stop video stream processing."""
        self.logger.info("Stopping video stream processing")
        self.stream_active = False
        
        if hasattr(self, 'stream_thread') and self.stream_thread.is_alive():
            self.stream_thread.join(timeout=5.0)
    
    def destroy_node(self):
        """Clean up resources when node is destroyed."""
        self.stop_stream()
        super().destroy_node()


def main(args=None):
    """Main entry point for the video stream processor node."""
    rclpy.init(args=args)
    
    try:
        node = VideoStreamProcessor()
        rclpy.spin(node)
    except KeyboardInterrupt:
        pass
    except Exception as e:
        print(f"Error in VideoStreamProcessor: {e}")
    finally:
        if 'node' in locals():
            node.destroy_node()
        rclpy.shutdown()


if __name__ == '__main__':
    main()