#!/usr/bin/env python3
"""
Voice-Vision Integration Module

Handles the integration between voice commands and vision analysis,
including voice trigger detection, multi-turn conversation management,
and response formatting for voice synthesis.

Features:
- Voice trigger pattern recognition for vision queries
- Multi-turn conversation state management
- Context-aware response generation
- Voice response formatting and optimization
- Conversation flow control
"""

import re
import logging
from typing import Optional, Dict, Any, List, Tuple, Callable
from dataclasses import dataclass, field
from datetime import datetime, timedelta
from enum import Enum
import json
import uuid

import rclpy
from rclpy.node import Node
from std_msgs.msg import String


class VoiceTriggerType(Enum):
    """Types of voice triggers for vision queries"""
    DIRECT_VISION = "direct_vision"  # "你看到了什么"
    OBJECT_INQUIRY = "object_inquiry"  # "这是什么"
    SCENE_DESCRIPTION = "scene_description"  # "描述一下画面"
    SPECIFIC_QUESTION = "specific_question"  # "有多少个人"
    COMPARISON = "comparison"  # "和刚才的图片比较"
    FOLLOW_UP = "follow_up"  # "还有呢", "详细一点"


@dataclass
class VoiceTriggerPattern:
    """Voice trigger pattern definition"""
    trigger_type: VoiceTriggerType
    patterns: List[str]
    confidence_threshold: float = 0.8
    requires_context: bool = False
    response_template: Optional[str] = None


@dataclass
class ConversationTurn:
    """Single conversation turn"""
    turn_id: str
    timestamp: datetime
    user_input: str
    trigger_type: VoiceTriggerType
    vision_query: str
    vision_response: str
    formatted_response: str
    confidence: float
    processing_time_ms: int


@dataclass
class VoiceConversationContext:
    """Voice conversation context for multi-turn dialogue"""
    conversation_id: str
    user_id: str
    start_time: datetime
    last_activity: datetime
    turns: List[ConversationTurn] = field(default_factory=list)
    current_topic: Optional[str] = None
    context_keywords: List[str] = field(default_factory=list)
    
    def add_turn(self, turn: ConversationTurn):
        """Add a conversation turn"""
        self.turns.append(turn)
        self.last_activity = datetime.now()
        
        # Update context keywords
        self._update_context_keywords(turn)
    
    def _update_context_keywords(self, turn: ConversationTurn):
        """Update context keywords based on the turn"""
        # Extract keywords from user input and vision response
        keywords = self._extract_keywords(turn.user_input + " " + turn.vision_response)
        
        # Add new keywords, keep most recent 20
        for keyword in keywords:
            if keyword not in self.context_keywords:
                self.context_keywords.append(keyword)
        
        if len(self.context_keywords) > 20:
            self.context_keywords = self.context_keywords[-20:]
    
    def _extract_keywords(self, text: str) -> List[str]:
        """Extract keywords from text"""
        # Simple keyword extraction (can be enhanced with NLP)
        common_words = {'的', '了', '是', '在', '有', '和', '与', '或', '但', '而', '也', '都', '很', '非常', '比较', '一个', '一些', '这个', '那个', '什么', '怎么', '为什么', 'the', 'is', 'are', 'and', 'or', 'but', 'with', 'in', 'on', 'at', 'to', 'for', 'of', 'a', 'an', 'this', 'that', 'what', 'how', 'why'}
        
        words = re.findall(r'\b\w+\b', text.lower())
        keywords = [word for word in words if len(word) > 2 and word not in common_words]
        
        return keywords[:10]  # Return top 10 keywords
    
    def get_recent_context(self, max_turns: int = 3) -> str:
        """Get recent conversation context"""
        if not self.turns:
            return ""
        
        recent_turns = self.turns[-max_turns:]
        context_parts = []
        
        for turn in recent_turns:
            context_parts.append(f"用户: {turn.user_input}")
            context_parts.append(f"回复: {turn.vision_response}")
        
        return "\n".join(context_parts)


class VoiceVisionIntegration:
    """
    Voice-Vision integration manager.
    
    Handles voice trigger detection, conversation management,
    and response formatting for seamless voice-vision interaction.
    """
    
    def __init__(self, logger=None):
        """
        Initialize voice-vision integration.
        
        Args:
            logger: Logger instance
        """
        self.logger = logger or logging.getLogger(__name__)
        
        # Voice trigger patterns
        self.trigger_patterns = self._initialize_trigger_patterns()
        
        # Conversation management
        self.conversations: Dict[str, VoiceConversationContext] = {}
        self.conversation_timeout = 300  # 5 minutes
        self.max_conversation_turns = 20
        
        # Response formatting
        self.response_templates = self._initialize_response_templates()
        self.max_response_length = 200  # characters for voice synthesis
        
        # Callbacks
        self.vision_query_callback: Optional[Callable[[str, str, str], Any]] = None
        
        self.logger.info("Voice-Vision integration initialized")
    
    def _initialize_trigger_patterns(self) -> Dict[VoiceTriggerType, VoiceTriggerPattern]:
        """Initialize voice trigger patterns."""
        patterns = {
            VoiceTriggerType.DIRECT_VISION: VoiceTriggerPattern(
                trigger_type=VoiceTriggerType.DIRECT_VISION,
                patterns=[
                    r'你看到.*什么',
                    r'看到.*什么',
                    r'画面.*什么',
                    r'图像.*什么',
                    r'what.*see',
                    r'what.*visible',
                    r'describe.*image',
                    r'tell.*about.*picture'
                ],
                response_template="我看到{content}"
            ),
            
            VoiceTriggerType.OBJECT_INQUIRY: VoiceTriggerPattern(
                trigger_type=VoiceTriggerType.OBJECT_INQUIRY,
                patterns=[
                    r'这是什么',
                    r'那是什么',
                    r'这个.*什么',
                    r'那个.*什么',
                    r'what.*this',
                    r'what.*that',
                    r'identify.*object',
                    r'recognize.*item'
                ],
                response_template="这是{content}"
            ),
            
            VoiceTriggerType.SCENE_DESCRIPTION: VoiceTriggerPattern(
                trigger_type=VoiceTriggerType.SCENE_DESCRIPTION,
                patterns=[
                    r'描述.*画面',
                    r'描述.*图像',
                    r'说说.*看到',
                    r'介绍.*场景',
                    r'describe.*scene',
                    r'explain.*picture',
                    r'tell.*about.*scene'
                ],
                response_template="画面中{content}"
            ),
            
            VoiceTriggerType.SPECIFIC_QUESTION: VoiceTriggerPattern(
                trigger_type=VoiceTriggerType.SPECIFIC_QUESTION,
                patterns=[
                    r'有多少.*',
                    r'几个.*',
                    r'什么颜色',
                    r'多大.*',
                    r'在哪里.*',
                    r'how many.*',
                    r'what color.*',
                    r'how big.*',
                    r'where.*located'
                ],
                response_template="根据图像，{content}"
            ),
            
            VoiceTriggerType.COMPARISON: VoiceTriggerPattern(
                trigger_type=VoiceTriggerType.COMPARISON,
                patterns=[
                    r'和.*比较',
                    r'与.*对比',
                    r'有什么不同',
                    r'有什么变化',
                    r'compare.*with',
                    r'difference.*between',
                    r'what.*changed'
                ],
                requires_context=True,
                response_template="对比之前的图像，{content}"
            ),
            
            VoiceTriggerType.FOLLOW_UP: VoiceTriggerPattern(
                trigger_type=VoiceTriggerType.FOLLOW_UP,
                patterns=[
                    r'^还有.*',
                    r'^继续.*',
                    r'^详细.*',
                    r'^更多.*',
                    r'^more.*',
                    r'^continue.*',
                    r'^detail.*',
                    r'^elaborate.*'
                ],
                requires_context=True,
                response_template="另外，{content}"
            )
        }
        
        return patterns
    
    def _initialize_response_templates(self) -> Dict[str, str]:
        """Initialize response templates for different scenarios."""
        return {
            'no_image': "抱歉，我现在看不到任何图像。请确保摄像头正常工作。",
            'processing_error': "处理图像时出现了问题，请稍后再试。",
            'service_unavailable': "视觉分析服务暂时不可用，请稍后再试。",
            'timeout': "图像分析超时，请重新尝试。",
            'context_required': "请先问一个关于图像的问题，然后我可以提供更多信息。",
            'conversation_limit': "对话已达到最大轮数，让我们开始新的对话吧。"
        }
    
    def set_vision_query_callback(self, callback: Callable[[str, str, str], Any]):
        """
        Set callback function for vision queries.
        
        Args:
            callback: Function to call for vision queries (query, user_id, conversation_id)
        """
        self.vision_query_callback = callback
    
    def process_voice_input(self, voice_text: str, user_id: str = "default") -> Optional[Dict[str, Any]]:
        """
        Process voice input and determine if it's a vision query.
        
        Args:
            voice_text: Voice input text
            user_id: User identifier
            
        Returns:
            Processing result or None if not a vision query
        """
        # Detect trigger type
        trigger_result = self._detect_trigger(voice_text)
        
        if not trigger_result:
            return None
        
        trigger_type, confidence, matched_pattern = trigger_result
        
        # Get or create conversation context
        conversation = self._get_or_create_conversation(user_id)
        
        # Check if context is required but not available
        if self.trigger_patterns[trigger_type].requires_context and not conversation.turns:
            return {
                'type': 'error',
                'message': self.response_templates['context_required'],
                'trigger_type': trigger_type.value,
                'confidence': confidence
            }
        
        # Check conversation limits
        if len(conversation.turns) >= self.max_conversation_turns:
            self._reset_conversation(user_id)
            return {
                'type': 'error',
                'message': self.response_templates['conversation_limit'],
                'trigger_type': trigger_type.value,
                'confidence': confidence
            }
        
        # Generate vision query
        vision_query = self._generate_vision_query(voice_text, trigger_type, conversation)
        
        # Create conversation turn
        turn = ConversationTurn(
            turn_id=str(uuid.uuid4()),
            timestamp=datetime.now(),
            user_input=voice_text,
            trigger_type=trigger_type,
            vision_query=vision_query,
            vision_response="",  # Will be filled later
            formatted_response="",  # Will be filled later
            confidence=confidence,
            processing_time_ms=0  # Will be filled later
        )
        
        return {
            'type': 'vision_query',
            'turn': turn,
            'conversation_id': conversation.conversation_id,
            'vision_query': vision_query,
            'trigger_type': trigger_type.value,
            'confidence': confidence,
            'requires_context': self.trigger_patterns[trigger_type].requires_context
        }
    
    def _detect_trigger(self, text: str) -> Optional[Tuple[VoiceTriggerType, float, str]]:
        """
        Detect voice trigger type and confidence.
        
        Args:
            text: Input text
            
        Returns:
            Tuple of (trigger_type, confidence, matched_pattern) or None
        """
        text_lower = text.lower().strip()
        best_match = None
        best_confidence = 0.0
        best_pattern = ""
        
        for trigger_type, pattern_info in self.trigger_patterns.items():
            for pattern in pattern_info.patterns:
                match = re.search(pattern, text_lower)
                if match:
                    # Calculate confidence based on match quality
                    match_length = len(match.group(0))
                    text_length = len(text_lower)
                    confidence = min(1.0, match_length / text_length * 1.5)
                    
                    if confidence > best_confidence and confidence >= pattern_info.confidence_threshold:
                        best_match = trigger_type
                        best_confidence = confidence
                        best_pattern = pattern
        
        return (best_match, best_confidence, best_pattern) if best_match else None
    
    def _get_or_create_conversation(self, user_id: str) -> VoiceConversationContext:
        """Get existing conversation or create new one."""
        # Clean up expired conversations
        self._cleanup_expired_conversations()
        
        if user_id not in self.conversations:
            self.conversations[user_id] = VoiceConversationContext(
                conversation_id=str(uuid.uuid4()),
                user_id=user_id,
                start_time=datetime.now(),
                last_activity=datetime.now()
            )
        
        return self.conversations[user_id]
    
    def _generate_vision_query(self, voice_text: str, trigger_type: VoiceTriggerType, conversation: VoiceConversationContext) -> str:
        """
        Generate vision query based on voice input and context.
        
        Args:
            voice_text: Original voice input
            trigger_type: Detected trigger type
            conversation: Conversation context
            
        Returns:
            Generated vision query
        """
        # Base query from voice text
        base_query = voice_text
        
        # Add context for follow-up questions
        if trigger_type == VoiceTriggerType.FOLLOW_UP and conversation.turns:
            last_turn = conversation.turns[-1]
            base_query = f"继续关于 '{last_turn.user_input}' 的问题: {voice_text}"
        
        # Add context for comparison questions
        elif trigger_type == VoiceTriggerType.COMPARISON and conversation.turns:
            context = conversation.get_recent_context(2)
            base_query = f"对比之前的对话: {context}\n当前问题: {voice_text}"
        
        # Enhance query with conversation keywords
        if conversation.context_keywords:
            keywords = ", ".join(conversation.context_keywords[-5:])  # Last 5 keywords
            base_query += f" (相关上下文: {keywords})"
        
        return base_query
    
    def process_vision_response(self, turn: ConversationTurn, vision_response: str, processing_time_ms: int, user_id: str = "default") -> str:
        """
        Process vision response and format for voice output.
        
        Args:
            turn: Conversation turn
            vision_response: Raw vision response
            processing_time_ms: Processing time
            user_id: User identifier
            
        Returns:
            Formatted response for voice synthesis
        """
        # Update turn with response
        turn.vision_response = vision_response
        turn.processing_time_ms = processing_time_ms
        
        # Format response for voice
        formatted_response = self._format_response_for_voice(vision_response, turn.trigger_type)
        turn.formatted_response = formatted_response
        
        # Add turn to conversation
        if user_id in self.conversations:
            self.conversations[user_id].add_turn(turn)
        
        return formatted_response
    
    def _format_response_for_voice(self, response: str, trigger_type: VoiceTriggerType) -> str:
        """
        Format response for voice synthesis.
        
        Args:
            response: Raw response text
            trigger_type: Trigger type for context
            
        Returns:
            Formatted response
        """
        # Get response template
        template = self.trigger_patterns[trigger_type].response_template
        
        # Clean up response text
        cleaned_response = self._clean_response_text(response)
        
        # Apply template if available
        if template and '{content}' in template:
            formatted = template.format(content=cleaned_response)
        else:
            formatted = cleaned_response
        
        # Ensure response is not too long for voice synthesis
        if len(formatted) > self.max_response_length:
            formatted = formatted[:self.max_response_length-3] + "..."
        
        return formatted
    
    def _clean_response_text(self, text: str) -> str:
        """Clean response text for voice synthesis."""
        # Remove excessive punctuation
        text = re.sub(r'[.]{2,}', '.', text)
        text = re.sub(r'[!]{2,}', '!', text)
        text = re.sub(r'[?]{2,}', '?', text)
        
        # Remove markdown formatting
        text = re.sub(r'\*\*(.*?)\*\*', r'\1', text)  # Bold
        text = re.sub(r'\*(.*?)\*', r'\1', text)      # Italic
        text = re.sub(r'`(.*?)`', r'\1', text)        # Code
        
        # Remove URLs
        text = re.sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '', text)
        
        # Clean up whitespace
        text = re.sub(r'\s+', ' ', text).strip()
        
        return text
    
    def _cleanup_expired_conversations(self):
        """Clean up expired conversations."""
        current_time = datetime.now()
        expired_users = []
        
        for user_id, conversation in self.conversations.items():
            time_diff = (current_time - conversation.last_activity).total_seconds()
            if time_diff > self.conversation_timeout:
                expired_users.append(user_id)
        
        for user_id in expired_users:
            del self.conversations[user_id]
        
        if expired_users:
            self.logger.info(f"Cleaned up {len(expired_users)} expired voice conversations")
    
    def _reset_conversation(self, user_id: str):
        """Reset conversation for user."""
        if user_id in self.conversations:
            del self.conversations[user_id]
        
        self.logger.info(f"Reset conversation for user: {user_id}")
    
    def get_conversation_status(self, user_id: str) -> Optional[Dict[str, Any]]:
        """Get conversation status for user."""
        if user_id not in self.conversations:
            return None
        
        conversation = self.conversations[user_id]
        
        return {
            'conversation_id': conversation.conversation_id,
            'start_time': conversation.start_time.isoformat(),
            'last_activity': conversation.last_activity.isoformat(),
            'turn_count': len(conversation.turns),
            'current_topic': conversation.current_topic,
            'context_keywords': conversation.context_keywords,
            'recent_context': conversation.get_recent_context()
        }
    
    def get_all_conversations_status(self) -> Dict[str, Any]:
        """Get status of all active conversations."""
        return {
            'active_conversations': len(self.conversations),
            'conversations': {
                user_id: self.get_conversation_status(user_id)
                for user_id in self.conversations.keys()
            }
        }
    
    def handle_error_response(self, error_type: str, user_id: str = "default") -> str:
        """
        Handle error responses for voice output.
        
        Args:
            error_type: Type of error
            user_id: User identifier
            
        Returns:
            Formatted error response
        """
        if error_type in self.response_templates:
            return self.response_templates[error_type]
        else:
            return "抱歉，处理您的请求时出现了问题。"


# Factory function
def create_voice_vision_integration(logger=None) -> VoiceVisionIntegration:
    """
    Create voice-vision integration instance.
    
    Args:
        logger: Logger instance
        
    Returns:
        VoiceVisionIntegration instance
    """
    return VoiceVisionIntegration(logger)


class VoiceVisionIntegrationNode(Node):
    """ROS2 Node wrapper for Voice-Vision Integration"""
    
    def __init__(self):
        super().__init__('voice_vision_integration')
        
        # Create integration instance
        self.integration = VoiceVisionIntegration(self.get_logger())
        
        # Subscribers
        self.voice_sub = self.create_subscription(
            String,
            '/voice_command',
            self.voice_callback,
            10
        )
        
        self.vision_sub = self.create_subscription(
            String,
            '/vision_result',
            self.vision_callback,
            10
        )
        
        # Publishers
        self.vision_query_pub = self.create_publisher(String, '/vision_query', 10)
        self.voice_response_pub = self.create_publisher(String, '/voice_response', 10)
        
        self.get_logger().info('Voice-Vision Integration Node started')
    
    def voice_callback(self, msg):
        """Handle voice command"""
        command = msg.data
        self.get_logger().info(f'Received voice command: {command}')
        
        # Check if it's a vision trigger
        trigger_result = self.integration.detect_voice_trigger(command)
        if trigger_result['is_trigger']:
            self.get_logger().info(f'Vision trigger detected: {trigger_result["trigger_type"]}')
            # Publish vision query request
            query_msg = String()
            query_msg.data = json.dumps({
                'command': command,
                'trigger_type': trigger_result['trigger_type'],
                'timestamp': datetime.now().isoformat()
            })
            self.vision_query_pub.publish(query_msg)
    
    def vision_callback(self, msg):
        """Handle vision result"""
        try:
            result = json.loads(msg.data)
            self.get_logger().info(f'Received vision result')
            
            # Format response for voice
            response = self.integration.format_voice_response(result)
            
            # Publish voice response
            response_msg = String()
            response_msg.data = response
            self.voice_response_pub.publish(response_msg)
            
        except json.JSONDecodeError as e:
            self.get_logger().error(f'Failed to parse vision result: {e}')


def main(args=None):
    """Main entry point for ROS2 node"""
    rclpy.init(args=args)
    node = VoiceVisionIntegrationNode()
    
    try:
        rclpy.spin(node)
    except KeyboardInterrupt:
        pass
    finally:
        node.destroy_node()
        rclpy.shutdown()


if __name__ == '__main__':
    main()
