#!/usr/bin/env python3
"""
VisionInteractionNode ROS2 Node

This node serves as the core vision interaction component, integrating Coze API and local vision models
to process vision queries triggered by voice commands. It coordinates multimodal data (vision + audio)
and provides intelligent routing between different AI services.

Features:
- Integration with Coze API and local vision models
- Vision query processing pipeline
- Multimodal data coordination (image + audio)
- Intelligent service routing and fallback mechanisms
- Voice-triggered vision analysis
- Multi-turn conversation state management
"""

import rclpy
from rclpy.node import Node
from rclpy.qos import QoSProfile, ReliabilityPolicy, HistoryPolicy, DurabilityPolicy
from sensor_msgs.msg import Image
from std_msgs.msg import String, Header
from echokitbot_microros_voice.msg import VisionAnalysis
from cv_bridge import CvBridge

import asyncio
import threading
import time
import uuid
import json
import logging
from typing import Optional, Dict, Any, List, Callable
from dataclasses import dataclass, asdict
from datetime import datetime
from enum import Enum
import numpy as np
import cv2

# Import vision components
from .coze_integration import (
    CozeIntegration, 
    VisionQueryRequest, 
    VisionQueryResponse,
    CozeIntegrationError
)
from .local_vision_model import LocalVisionModel, create_local_vision_model
from .vision_service_router import VisionServiceRouter, ServiceType, create_vision_service_router
from .voice_vision_integration import VoiceVisionIntegration, create_voice_vision_integration


class VisionServiceType(Enum):
    """Available vision service types"""
    COZE_API = "coze_api"
    LOCAL_MODEL = "local_model"
    HYBRID = "hybrid"


class VisionQueryState(Enum):
    """Vision query processing states"""
    PENDING = "pending"
    PROCESSING = "processing"
    COMPLETED = "completed"
    FAILED = "failed"
    TIMEOUT = "timeout"


@dataclass
class ConversationContext:
    """Multi-turn conversation context"""
    conversation_id: str
    user_id: str
    start_time: datetime
    last_activity: datetime
    query_history: List[Dict[str, Any]]
    current_image: Optional[np.ndarray] = None
    
    def add_query(self, query: str, response: str, model_used: str):
        """Add a query-response pair to history"""
        self.query_history.append({
            'timestamp': datetime.now().isoformat(),
            'query': query,
            'response': response,
            'model_used': model_used
        })
        self.last_activity = datetime.now()


class VisionInteractionNode(Node):
    """
    Core vision interaction ROS2 node.
    
    Integrates Coze API and local vision models to process vision queries
    triggered by voice commands, with intelligent routing and fallback mechanisms.
    """
    
    def __init__(self):
        super().__init__('vision_interaction_node')
        
        # Initialize logging
        self.logger = self.get_logger()
        self.logger.info("Initializing VisionInteractionNode")
        
        # Initialize CV bridge for image conversion
        self.cv_bridge = CvBridge()
        
        # Declare and load parameters
        self.declare_parameters()
        self.load_parameters()
        
        # Initialize QoS profiles
        self.setup_qos_profiles()
        
        # Initialize publishers and subscribers
        self.setup_publishers()
        self.setup_subscribers()
        
        # Vision service components
        self.coze_integration: Optional[CozeIntegration] = None
        self.local_vision_model: Optional[LocalVisionModel] = None
        self.service_router: Optional[VisionServiceRouter] = None
        self.voice_vision_integration: Optional[VoiceVisionIntegration] = None
        
        # Current state
        self.current_frame: Optional[np.ndarray] = None
        self.frame_lock = threading.Lock()
        self.service_status = {
            'coze_available': False,
            'local_model_available': False,
            'preferred_service': VisionServiceType.COZE_API
        }
        
        # Query management
        self.active_queries: Dict[str, VisionQueryState] = {}
        self.query_lock = threading.Lock()
        
        # Conversation management
        self.conversations: Dict[str, ConversationContext] = {}
        self.conversation_lock = threading.Lock()
        self.conversation_timeout = 300  # 5 minutes
        
        # Performance metrics
        self.metrics = {
            'total_queries': 0,
            'successful_queries': 0,
            'failed_queries': 0,
            'coze_queries': 0,
            'local_queries': 0,
            'average_processing_time': 0.0
        }
        
        # Initialize service router
        self.service_router = create_vision_service_router(self.logger)
        
        # Initialize voice-vision integration
        self.voice_vision_integration = create_voice_vision_integration(self.logger)
        self.voice_vision_integration.set_vision_query_callback(self._handle_voice_vision_query)
        
        # Initialize vision services
        self.initialize_vision_services()
        
        # Start background tasks
        self.start_background_tasks()
        
        self.logger.info("VisionInteractionNode initialized successfully")
    
    def declare_parameters(self):
        """Declare ROS2 parameters for configuration."""
        # Service configuration
        self.declare_parameter('preferred_service', 'coze_api')  # 'coze_api', 'local_model', 'hybrid'
        self.declare_parameter('enable_coze_api', True)
        self.declare_parameter('enable_local_model', True)
        self.declare_parameter('coze_config_dir', '')
        self.declare_parameter('local_model_path', '')
        
        # Query processing parameters
        self.declare_parameter('query_timeout', 30.0)  # seconds
        self.declare_parameter('max_concurrent_queries', 3)
        self.declare_parameter('enable_conversation_context', True)
        self.declare_parameter('conversation_timeout', 300.0)  # seconds
        
        # Image processing parameters
        self.declare_parameter('max_image_size', 1920)
        self.declare_parameter('jpeg_quality', 85)
        self.declare_parameter('enable_image_preprocessing', True)
        
        # Topic names
        self.declare_parameter('current_frame_topic', '/vision/current_frame')
        self.declare_parameter('voice_text_topic', '/voice_text')
        self.declare_parameter('vision_analysis_topic', '/vision_analysis')
        self.declare_parameter('vision_response_topic', '/vision_response')
        
        # Fallback and retry parameters
        self.declare_parameter('enable_service_fallback', True)
        self.declare_parameter('max_retry_attempts', 2)
        self.declare_parameter('retry_delay', 1.0)  # seconds
        
        # Performance monitoring
        self.declare_parameter('enable_metrics', True)
        self.declare_parameter('metrics_publish_interval', 30.0)  # seconds
    
    def load_parameters(self):
        """Load parameters from ROS2 parameter server."""
        self.preferred_service = VisionServiceType(
            self.get_parameter('preferred_service').get_parameter_value().string_value
        )
        self.enable_coze_api = self.get_parameter('enable_coze_api').get_parameter_value().bool_value
        self.enable_local_model = self.get_parameter('enable_local_model').get_parameter_value().bool_value
        self.coze_config_dir = self.get_parameter('coze_config_dir').get_parameter_value().string_value
        self.local_model_path = self.get_parameter('local_model_path').get_parameter_value().string_value
        
        self.query_timeout = self.get_parameter('query_timeout').get_parameter_value().double_value
        self.max_concurrent_queries = self.get_parameter('max_concurrent_queries').get_parameter_value().integer_value
        self.enable_conversation_context = self.get_parameter('enable_conversation_context').get_parameter_value().bool_value
        self.conversation_timeout = self.get_parameter('conversation_timeout').get_parameter_value().double_value
        
        self.max_image_size = self.get_parameter('max_image_size').get_parameter_value().integer_value
        self.jpeg_quality = self.get_parameter('jpeg_quality').get_parameter_value().integer_value
        self.enable_image_preprocessing = self.get_parameter('enable_image_preprocessing').get_parameter_value().bool_value
        
        self.current_frame_topic = self.get_parameter('current_frame_topic').get_parameter_value().string_value
        self.voice_text_topic = self.get_parameter('voice_text_topic').get_parameter_value().string_value
        self.vision_analysis_topic = self.get_parameter('vision_analysis_topic').get_parameter_value().string_value
        self.vision_response_topic = self.get_parameter('vision_response_topic').get_parameter_value().string_value
        
        self.enable_service_fallback = self.get_parameter('enable_service_fallback').get_parameter_value().bool_value
        self.max_retry_attempts = self.get_parameter('max_retry_attempts').get_parameter_value().integer_value
        self.retry_delay = self.get_parameter('retry_delay').get_parameter_value().double_value
        
        self.enable_metrics = self.get_parameter('enable_metrics').get_parameter_value().bool_value
        self.metrics_publish_interval = self.get_parameter('metrics_publish_interval').get_parameter_value().double_value
        
        self.logger.info(f"Loaded parameters: preferred_service={self.preferred_service.value}")
    
    def setup_qos_profiles(self):
        """Setup QoS profiles for different types of data."""
        # Best effort for real-time image data
        self.image_qos = QoSProfile(
            reliability=ReliabilityPolicy.BEST_EFFORT,
            history=HistoryPolicy.KEEP_LAST,
            depth=1,
            durability=DurabilityPolicy.VOLATILE
        )
        
        # Reliable for text messages and analysis results
        self.text_qos = QoSProfile(
            reliability=ReliabilityPolicy.RELIABLE,
            history=HistoryPolicy.KEEP_LAST,
            depth=10,
            durability=DurabilityPolicy.VOLATILE
        )
    
    def setup_publishers(self):
        """Initialize ROS2 publishers."""
        self.vision_analysis_publisher = self.create_publisher(
            VisionAnalysis,
            self.vision_analysis_topic,
            self.text_qos
        )
        
        self.vision_response_publisher = self.create_publisher(
            String,
            self.vision_response_topic,
            self.text_qos
        )
        
        self.logger.info("Publishers initialized")
    
    def setup_subscribers(self):
        """Initialize ROS2 subscribers."""
        self.current_frame_subscriber = self.create_subscription(
            Image,
            self.current_frame_topic,
            self.current_frame_callback,
            self.image_qos
        )
        
        self.voice_text_subscriber = self.create_subscription(
            String,
            self.voice_text_topic,
            self.voice_text_callback,
            self.text_qos
        )
        
        self.logger.info("Subscribers initialized")
    
    def initialize_vision_services(self):
        """Initialize vision service components."""
        asyncio.create_task(self._async_initialize_vision_services())
    
    async def _async_initialize_vision_services(self):
        """Asynchronously initialize vision services."""
        try:
            # Initialize Coze integration
            if self.enable_coze_api:
                await self._initialize_coze_integration()
                if self.coze_integration:
                    self.service_router.register_service(ServiceType.COZE_API, self.coze_integration)
            
            # Initialize local vision model
            if self.enable_local_model and self.local_model_path:
                await self._initialize_local_model()
                if self.local_vision_model:
                    self.service_router.register_service(ServiceType.LOCAL_MODEL, self.local_vision_model)
            
            # Configure service router
            if self.preferred_service == VisionServiceType.COZE_API:
                self.service_router.set_preferred_service(ServiceType.COZE_API)
            elif self.preferred_service == VisionServiceType.LOCAL_MODEL:
                self.service_router.set_preferred_service(ServiceType.LOCAL_MODEL)
            
            self.service_router.enable_service_fallback(self.enable_service_fallback)
            
            # Update service status
            self._update_service_status()
            
            self.logger.info("Vision services initialization completed")
            
        except Exception as e:
            self.logger.error(f"Failed to initialize vision services: {e}")
    
    async def _initialize_coze_integration(self):
        """Initialize Coze integration."""
        try:
            config_dir = self.coze_config_dir if self.coze_config_dir else None
            self.coze_integration = CozeIntegration(config_dir)
            
            if await self.coze_integration.initialize():
                self.service_status['coze_available'] = True
                self.logger.info("Coze integration initialized successfully")
            else:
                self.logger.warning("Coze integration initialization failed")
                
        except Exception as e:
            self.logger.error(f"Failed to initialize Coze integration: {e}")
    
    async def _initialize_local_model(self):
        """Initialize local vision model."""
        try:
            self.local_vision_model = create_local_vision_model(self.local_model_path)
            
            if self.local_vision_model.is_model_loaded():
                self.service_status['local_model_available'] = True
                self.logger.info("Local vision model initialized successfully")
            else:
                self.logger.warning("Local vision model initialization failed")
                
        except Exception as e:
            self.logger.error(f"Failed to initialize local vision model: {e}")
    
    def _update_service_status(self):
        """Update service availability status."""
        # Determine preferred service based on availability
        if self.preferred_service == VisionServiceType.COZE_API:
            if not self.service_status['coze_available'] and self.service_status['local_model_available']:
                self.service_status['preferred_service'] = VisionServiceType.LOCAL_MODEL
                self.logger.info("Switched to local model due to Coze unavailability")
        elif self.preferred_service == VisionServiceType.LOCAL_MODEL:
            if not self.service_status['local_model_available'] and self.service_status['coze_available']:
                self.service_status['preferred_service'] = VisionServiceType.COZE_API
                self.logger.info("Switched to Coze API due to local model unavailability")
        
        self.logger.info(f"Service status: {self.service_status}")
    
    def current_frame_callback(self, msg: Image):
        """Handle incoming current frame messages."""
        try:
            # Convert ROS Image to OpenCV format
            cv_image = self.cv_bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8')
            
            with self.frame_lock:
                self.current_frame = cv_image
                
        except Exception as e:
            self.logger.error(f"Failed to process current frame: {e}")
    
    def voice_text_callback(self, msg: String):
        """Handle incoming voice text messages and trigger vision analysis if needed."""
        voice_text = msg.data.strip()
        
        # Use voice-vision integration to process input
        if self.voice_vision_integration:
            result = self.voice_vision_integration.process_voice_input(voice_text)
            
            if result:
                if result['type'] == 'vision_query':
                    self.logger.info(f"Vision query detected: {voice_text} (confidence: {result['confidence']:.2f})")
                    asyncio.create_task(self._process_voice_vision_query(result))
                elif result['type'] == 'error':
                    self.logger.warning(f"Voice processing error: {result['message']}")
                    asyncio.create_task(self._publish_vision_response(result['message']))
        else:
            # Fallback to simple detection
            if self._is_vision_query(voice_text.lower()):
                self.logger.info(f"Vision query detected (fallback): {voice_text}")
                asyncio.create_task(self._process_vision_query(voice_text))
    
    def _is_vision_query(self, text: str) -> bool:
        """Determine if the voice text is a vision-related query."""
        vision_keywords = [
            '你看到', '看到了什么', '这是什么', '描述一下', '图像', '画面',
            'what do you see', 'describe', 'what is this', 'image', 'picture',
            '分析', '识别', 'analyze', 'identify', '观察', 'observe'
        ]
        
        return any(keyword in text for keyword in vision_keywords)
    
    async def _process_voice_vision_query(self, voice_result: Dict[str, Any]):
        """Process voice vision query using the voice-vision integration."""
        turn = voice_result['turn']
        conversation_id = voice_result['conversation_id']
        vision_query = voice_result['vision_query']
        
        try:
            # Check if we have a current frame
            with self.frame_lock:
                if self.current_frame is None:
                    error_response = self.voice_vision_integration.handle_error_response('no_image')
                    await self._publish_vision_response(error_response)
                    return
                
                current_image = self.current_frame.copy()
            
            # Register query
            with self.query_lock:
                if len(self.active_queries) >= self.max_concurrent_queries:
                    error_response = self.voice_vision_integration.handle_error_response('service_unavailable')
                    await self._publish_vision_response(error_response)
                    return
                
                self.active_queries[turn.query_id] = VisionQueryState.PROCESSING
            
            # Update metrics
            self.metrics['total_queries'] += 1
            
            # Process the query
            start_time = time.time()
            response = await self._execute_vision_query(turn.query_id, vision_query, current_image, turn.user_input)
            processing_time = int((time.time() - start_time) * 1000)
            
            # Format response using voice-vision integration
            formatted_response = self.voice_vision_integration.process_vision_response(
                turn, response.response_text, processing_time
            )
            
            # Publish results
            await self._publish_vision_analysis(turn.query_id, vision_query, response, processing_time)
            await self._publish_vision_response(formatted_response)
            
            # Update metrics
            self.metrics['successful_queries'] += 1
            self._update_average_processing_time(processing_time)
            
            # Update query state
            with self.query_lock:
                self.active_queries[turn.query_id] = VisionQueryState.COMPLETED
            
            self.logger.info(f"Voice vision query completed: {turn.query_id}")
            
        except Exception as e:
            self.logger.error(f"Failed to process voice vision query: {e}")
            
            # Update metrics
            self.metrics['failed_queries'] += 1
            
            # Update query state
            with self.query_lock:
                self.active_queries[turn.query_id] = VisionQueryState.FAILED
            
            error_response = self.voice_vision_integration.handle_error_response('processing_error')
            await self._publish_vision_response(error_response)
        
        finally:
            # Clean up query after some time
            self.create_timer(30.0, lambda: self._cleanup_query(turn.query_id))
    
    def _handle_voice_vision_query(self, query: str, user_id: str, conversation_id: str):
        """Handle voice vision query callback (placeholder for future use)."""
        # This callback can be used for additional processing if needed
        self.logger.debug(f"Voice vision query callback: {query} for user {user_id}")
        pass
    
    async def _process_vision_query(self, query: str, user_id: str = "default"):
        """Process a vision query using available services."""
        query_id = str(uuid.uuid4())
        
        try:
            # Check if we have a current frame
            with self.frame_lock:
                if self.current_frame is None:
                    await self._publish_error_response(query_id, "没有可用的图像数据")
                    return
                
                current_image = self.current_frame.copy()
            
            # Register query
            with self.query_lock:
                if len(self.active_queries) >= self.max_concurrent_queries:
                    await self._publish_error_response(query_id, "查询队列已满，请稍后重试")
                    return
                
                self.active_queries[query_id] = VisionQueryState.PROCESSING
            
            # Update metrics
            self.metrics['total_queries'] += 1
            
            # Process the query
            start_time = time.time()
            response = await self._execute_vision_query(query_id, query, current_image, user_id)
            processing_time = int((time.time() - start_time) * 1000)
            
            # Update conversation context if enabled
            if self.enable_conversation_context:
                self._update_conversation_context(user_id, query, response.response_text, response.model_used)
            
            # Publish results
            await self._publish_vision_analysis(query_id, query, response, processing_time)
            await self._publish_vision_response(response.response_text)
            
            # Update metrics
            self.metrics['successful_queries'] += 1
            self._update_average_processing_time(processing_time)
            
            # Update query state
            with self.query_lock:
                self.active_queries[query_id] = VisionQueryState.COMPLETED
            
            self.logger.info(f"Vision query completed: {query_id}")
            
        except Exception as e:
            self.logger.error(f"Failed to process vision query: {e}")
            
            # Update metrics
            self.metrics['failed_queries'] += 1
            
            # Update query state
            with self.query_lock:
                self.active_queries[query_id] = VisionQueryState.FAILED
            
            await self._publish_error_response(query_id, f"处理查询时出错: {str(e)}")
        
        finally:
            # Clean up query after some time
            self.create_timer(30.0, lambda: self._cleanup_query(query_id))
    
    async def _execute_vision_query(self, query_id: str, query: str, image: np.ndarray, user_id: str) -> VisionQueryResponse:
        """Execute vision query using the service router."""
        # Preprocess image if enabled
        if self.enable_image_preprocessing:
            image = self._preprocess_image(image)
        
        # Convert image to bytes
        image_bytes = self._image_to_bytes(image)
        
        # Create vision query request
        request = VisionQueryRequest(
            query_id=query_id,
            user_query=query,
            image_data=image_bytes,
            use_websocket=False,  # Default to OpenAPI for now
            timeout=int(self.query_timeout)
        )
        
        # Use service router for intelligent routing and fallback
        if self.service_router:
            response = await self.service_router.route_request(request)
        else:
            # Fallback to direct service calls if router is not available
            response = await self._try_vision_service_direct(request, self.service_status['preferred_service'])
        
        return response
    
    async def _try_vision_service_direct(self, request: VisionQueryRequest, service_type: VisionServiceType) -> VisionQueryResponse:
        """Try to process vision query with specified service."""
        try:
            if service_type == VisionServiceType.COZE_API and self.coze_integration:
                response = await self.coze_integration.process_vision_query(request)
                self.metrics['coze_queries'] += 1
                return response
                
            elif service_type == VisionServiceType.LOCAL_MODEL and self.local_vision_model:
                # Convert bytes back to numpy array for local model
                image = self._bytes_to_image(request.image_data)
                result_text = self.local_vision_model.analyze_image(image, request.user_query)
                
                response = VisionQueryResponse(
                    query_id=request.query_id,
                    response_text=result_text,
                    confidence_score=0.8,  # Default confidence for local model
                    processing_time_ms=0,  # Will be calculated by caller
                    model_used="local_model",
                    method_used="local",
                    timestamp=datetime.now()
                )
                
                self.metrics['local_queries'] += 1
                return response
            
            else:
                return VisionQueryResponse(
                    query_id=request.query_id,
                    response_text="服务不可用",
                    confidence_score=0.0,
                    processing_time_ms=0,
                    model_used="none",
                    method_used="error",
                    timestamp=datetime.now(),
                    error=f"Service {service_type.value} not available"
                )
                
        except Exception as e:
            self.logger.error(f"Vision service {service_type.value} failed: {e}")
            return VisionQueryResponse(
                query_id=request.query_id,
                response_text="处理请求时出现错误",
                confidence_score=0.0,
                processing_time_ms=0,
                model_used="error",
                method_used="error",
                timestamp=datetime.now(),
                error=str(e)
            )
    
    def _get_fallback_service(self) -> VisionServiceType:
        """Get fallback service type."""
        if self.service_status['preferred_service'] == VisionServiceType.COZE_API:
            return VisionServiceType.LOCAL_MODEL
        else:
            return VisionServiceType.COZE_API
    
    def _preprocess_image(self, image: np.ndarray) -> np.ndarray:
        """Preprocess image for vision analysis."""
        try:
            # Resize if too large
            height, width = image.shape[:2]
            if max(height, width) > self.max_image_size:
                scale = self.max_image_size / max(height, width)
                new_width = int(width * scale)
                new_height = int(height * scale)
                image = cv2.resize(image, (new_width, new_height), interpolation=cv2.INTER_AREA)
            
            return image
            
        except Exception as e:
            self.logger.error(f"Image preprocessing failed: {e}")
            return image
    
    def _image_to_bytes(self, image: np.ndarray) -> bytes:
        """Convert numpy image to bytes."""
        try:
            import cv2
            _, buffer = cv2.imencode('.jpg', image, [cv2.IMWRITE_JPEG_QUALITY, self.jpeg_quality])
            return buffer.tobytes()
        except Exception as e:
            self.logger.error(f"Image to bytes conversion failed: {e}")
            return b''
    
    def _bytes_to_image(self, image_bytes: bytes) -> np.ndarray:
        """Convert bytes to numpy image."""
        try:
            import cv2
            nparr = np.frombuffer(image_bytes, np.uint8)
            image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
            return image
        except Exception as e:
            self.logger.error(f"Bytes to image conversion failed: {e}")
            return np.array([])
    
    def _update_conversation_context(self, user_id: str, query: str, response: str, model_used: str):
        """Update conversation context for multi-turn dialogue."""
        with self.conversation_lock:
            if user_id not in self.conversations:
                self.conversations[user_id] = ConversationContext(
                    conversation_id=str(uuid.uuid4()),
                    user_id=user_id,
                    start_time=datetime.now(),
                    last_activity=datetime.now(),
                    query_history=[]
                )
            
            self.conversations[user_id].add_query(query, response, model_used)
            
            # Update current image
            with self.frame_lock:
                if self.current_frame is not None:
                    self.conversations[user_id].current_image = self.current_frame.copy()
    
    async def _publish_vision_analysis(self, query_id: str, query: str, response: VisionQueryResponse, processing_time: int):
        """Publish vision analysis result."""
        try:
            analysis_msg = VisionAnalysis()
            analysis_msg.header = Header()
            analysis_msg.header.stamp = self.get_clock().now().to_msg()
            analysis_msg.header.frame_id = "vision_analysis"
            
            analysis_msg.query_id = query_id
            analysis_msg.user_query = query
            analysis_msg.analysis_result = response.response_text
            analysis_msg.confidence_score = response.confidence_score
            analysis_msg.processing_time_ms = processing_time
            analysis_msg.model_used = response.model_used
            
            self.vision_analysis_publisher.publish(analysis_msg)
            
        except Exception as e:
            self.logger.error(f"Failed to publish vision analysis: {e}")
    
    async def _publish_vision_response(self, response_text: str):
        """Publish vision response for voice synthesis."""
        try:
            response_msg = String()
            response_msg.data = response_text
            
            self.vision_response_publisher.publish(response_msg)
            
        except Exception as e:
            self.logger.error(f"Failed to publish vision response: {e}")
    
    async def _publish_error_response(self, query_id: str, error_message: str):
        """Publish error response."""
        try:
            # Publish analysis with error
            analysis_msg = VisionAnalysis()
            analysis_msg.header = Header()
            analysis_msg.header.stamp = self.get_clock().now().to_msg()
            analysis_msg.header.frame_id = "vision_analysis"
            
            analysis_msg.query_id = query_id
            analysis_msg.user_query = "error"
            analysis_msg.analysis_result = error_message
            analysis_msg.confidence_score = 0.0
            analysis_msg.processing_time_ms = 0
            analysis_msg.model_used = "error"
            
            self.vision_analysis_publisher.publish(analysis_msg)
            
            # Publish response
            response_msg = String()
            response_msg.data = error_message
            
            self.vision_response_publisher.publish(response_msg)
            
        except Exception as e:
            self.logger.error(f"Failed to publish error response: {e}")
    
    def _cleanup_query(self, query_id: str):
        """Clean up completed query."""
        with self.query_lock:
            if query_id in self.active_queries:
                del self.active_queries[query_id]
    
    def _update_average_processing_time(self, processing_time: int):
        """Update average processing time metric."""
        current_avg = self.metrics['average_processing_time']
        successful_queries = self.metrics['successful_queries']
        
        if successful_queries == 1:
            self.metrics['average_processing_time'] = float(processing_time)
        else:
            # Calculate running average
            self.metrics['average_processing_time'] = (
                (current_avg * (successful_queries - 1) + processing_time) / successful_queries
            )
    
    def start_background_tasks(self):
        """Start background maintenance tasks."""
        # Conversation cleanup timer
        if self.enable_conversation_context:
            self.create_timer(60.0, self._cleanup_conversations)  # Every minute
        
        # Metrics publishing timer
        if self.enable_metrics:
            self.create_timer(self.metrics_publish_interval, self._publish_metrics)
        
        # Service health check timer
        self.create_timer(30.0, self._check_service_health)  # Every 30 seconds
    
    def _cleanup_conversations(self):
        """Clean up expired conversations."""
        try:
            current_time = datetime.now()
            expired_conversations = []
            
            with self.conversation_lock:
                for user_id, context in self.conversations.items():
                    time_diff = (current_time - context.last_activity).total_seconds()
                    if time_diff > self.conversation_timeout:
                        expired_conversations.append(user_id)
                
                for user_id in expired_conversations:
                    del self.conversations[user_id]
            
            if expired_conversations:
                self.logger.info(f"Cleaned up {len(expired_conversations)} expired conversations")
                
        except Exception as e:
            self.logger.error(f"Conversation cleanup failed: {e}")
    
    def _publish_metrics(self):
        """Publish performance metrics."""
        try:
            metrics_msg = String()
            metrics_msg.data = json.dumps(self.metrics)
            
            # Create a metrics publisher if it doesn't exist
            if not hasattr(self, 'metrics_publisher'):
                self.metrics_publisher = self.create_publisher(String, '/vision/metrics', self.text_qos)
            
            self.metrics_publisher.publish(metrics_msg)
            
        except Exception as e:
            self.logger.error(f"Failed to publish metrics: {e}")
    
    def _check_service_health(self):
        """Check health of vision services."""
        asyncio.create_task(self._async_check_service_health())
    
    async def _async_check_service_health(self):
        """Asynchronously check service health."""
        try:
            if self.service_router:
                # Get status from service router
                router_status = self.service_router.get_service_status()
                
                # Update local service status
                self.service_status['coze_available'] = router_status['services'].get('coze_api', {}).get('available', False)
                self.service_status['local_model_available'] = router_status['services'].get('local_model', {}).get('available', False)
            else:
                # Fallback to direct checks
                if self.coze_integration:
                    connection_status = await self.coze_integration.test_connection()
                    self.service_status['coze_available'] = connection_status.get('overall', False)
                
                if self.local_vision_model:
                    self.service_status['local_model_available'] = self.local_vision_model.is_model_loaded()
            
            # Update preferred service if needed
            self._update_service_status()
            
        except Exception as e:
            self.logger.error(f"Service health check failed: {e}")
    
    def get_node_status(self) -> Dict[str, Any]:
        """Get current node status."""
        status = {
            'service_status': self.service_status,
            'active_queries': len(self.active_queries),
            'active_conversations': len(self.conversations),
            'metrics': self.metrics,
            'has_current_frame': self.current_frame is not None
        }
        
        # Add router status if available
        if self.service_router:
            status['router_status'] = self.service_router.get_service_status()
            status['router_metrics'] = self.service_router.get_metrics_summary()
        
        # Add voice-vision integration status if available
        if self.voice_vision_integration:
            status['voice_conversations'] = self.voice_vision_integration.get_all_conversations_status()
        
        return status
    
    async def shutdown(self):
        """Shutdown the node and cleanup resources."""
        try:
            # Shutdown service router
            if self.service_router:
                await self.service_router.shutdown()
            
            # Shutdown Coze integration
            if self.coze_integration:
                await self.coze_integration.shutdown()
            
            self.logger.info("VisionInteractionNode shutdown complete")
            
        except Exception as e:
            self.logger.error(f"Error during shutdown: {e}")


def main(args=None):
    """Main entry point for the vision interaction node."""
    rclpy.init(args=args)
    
    try:
        node = VisionInteractionNode()
        
        # Use MultiThreadedExecutor to handle async operations
        from rclpy.executors import MultiThreadedExecutor
        executor = MultiThreadedExecutor()
        
        executor.add_node(node)
        
        try:
            executor.spin()
        except KeyboardInterrupt:
            pass
        finally:
            executor.shutdown()
            node.destroy_node()
            
    except Exception as e:
        print(f"Failed to start VisionInteractionNode: {e}")
    finally:
        rclpy.shutdown()


if __name__ == '__main__':
    main()