"""
Real-time Paper Classification

Real-time classification system for processing new papers as they arrive
from the ArXiv scraper service. Provides immediate classification and
recommendation updates.
"""

import logging
import asyncio
import json
import time
from typing import Dict, List, Any, Optional, Callable
from datetime import datetime
import threading
from queue import Queue, Empty
from concurrent.futures import ThreadPoolExecutor

from ..core.classification_engine import AIClassificationEngine
from ..core.paper_classifier import SemanticPaperClassifier
from ..core.recommendation_engine import PersonalizedRecommendationEngine
from ..learning.user_preference_learner import UserPreferenceLearner


class RealtimeClassifier:
    """
    Real-time paper classification system that processes papers as they arrive.
    
    Features:
    - Real-time paper processing from scraper queue
    - Immediate classification using AI engine
    - Automatic recommendation updates for affected users
    - Priority queue for high-importance papers
    - Error handling and retry logic
    - Performance monitoring
    - Integration with existing ArXiv scraper service
    """
    
    def __init__(self, config: Dict[str, Any], database_manager=None):
        """Initialize real-time classifier.
        
        Args:
            config: Configuration dictionary
            database_manager: Database connection manager
        """
        self.config = config
        self.db = database_manager
        self.logger = logging.getLogger(__name__)
        
        # Initialize AI components
        self.classification_engine = AIClassificationEngine(config, database_manager)
        self.paper_classifier = SemanticPaperClassifier(config)
        self.recommendation_engine = PersonalizedRecommendationEngine(config, database_manager)
        
        # Real-time processing configuration
        self.processing_threads = config.get('realtime_threads', 2)
        self.queue_size = config.get('queue_size', 1000)
        self.batch_size = config.get('realtime_batch_size', 5)
        self.processing_timeout = config.get('processing_timeout', 300)  # 5 minutes
        self.retry_attempts = config.get('retry_attempts', 3)
        
        # Processing queues
        self.paper_queue = Queue(maxsize=self.queue_size)
        self.priority_queue = Queue(maxsize=100)  # High priority papers
        self.result_queue = Queue()
        
        # Threading control
        self.processing_active = False
        self.worker_threads = []
        self.thread_pool = ThreadPoolExecutor(max_workers=self.processing_threads)
        
        # Performance monitoring
        self.realtime_stats = {
            'papers_processed': 0,
            'papers_queued': 0,
            'processing_errors': 0,
            'average_processing_time': 0.0,
            'queue_full_events': 0,
            'priority_papers_processed': 0
        }
        
        # Callbacks for different events
        self.callbacks = {
            'paper_classified': [],
            'processing_error': [],
            'queue_full': []
        }
        
        self.logger.info(f"Real-time Classifier initialized with {self.processing_threads} threads")
    
    def start_processing(self):
        """Start real-time processing threads."""
        if self.processing_active:
            self.logger.warning("Real-time processing is already active")
            return
        
        self.processing_active = True
        
        # Start worker threads
        for i in range(self.processing_threads):
            thread = threading.Thread(
                target=self._worker_thread,
                name=f"RealtimeClassifier-Worker-{i}",
                daemon=True
            )
            thread.start()
            self.worker_threads.append(thread)
        
        # Start result processor thread
        result_thread = threading.Thread(
            target=self._result_processor_thread,
            name="RealtimeClassifier-ResultProcessor",
            daemon=True
        )
        result_thread.start()
        self.worker_threads.append(result_thread)
        
        self.logger.info(f"Started {len(self.worker_threads)} real-time processing threads")
    
    def stop_processing(self):
        """Stop real-time processing threads."""
        if not self.processing_active:
            self.logger.warning("Real-time processing is not active")
            return
        
        self.processing_active = False
        
        # Wait for threads to finish current work
        for thread in self.worker_threads:
            if thread.is_alive():
                thread.join(timeout=30)  # 30-second timeout
        
        self.worker_threads.clear()
        self.thread_pool.shutdown(wait=True)
        
        self.logger.info("Stopped real-time processing threads")
    
    def add_paper_for_classification(self, paper_data: Dict[str, Any], 
                                   priority: bool = False) -> bool:
        """
        Add paper to classification queue.
        
        Args:
            paper_data: Paper metadata dictionary
            priority: Whether to add to priority queue
            
        Returns:
            True if successfully queued, False if queue is full
        """
        try:
            # Validate paper data
            if not self._validate_paper_data(paper_data):
                self.logger.error(f"Invalid paper data: {paper_data.get('arxiv_id', 'unknown')}")
                return False
            
            # Add timestamp
            paper_data['queued_at'] = datetime.utcnow().isoformat()
            
            # Choose queue based on priority
            target_queue = self.priority_queue if priority else self.paper_queue
            
            try:
                target_queue.put_nowait(paper_data)
                self.realtime_stats['papers_queued'] += 1
                
                if priority:
                    self.logger.info(f"Added priority paper to queue: {paper_data.get('arxiv_id')}")
                
                return True
                
            except:
                # Queue is full
                self.realtime_stats['queue_full_events'] += 1
                self._trigger_callbacks('queue_full', {
                    'queue_type': 'priority' if priority else 'normal',
                    'paper_id': paper_data.get('arxiv_id', 'unknown')
                })
                return False
                
        except Exception as e:
            self.logger.error(f"Error adding paper to queue: {e}")
            return False
    
    def add_papers_batch(self, papers_data: List[Dict[str, Any]]) -> int:
        """
        Add multiple papers to classification queue.
        
        Args:
            papers_data: List of paper metadata dictionaries
            
        Returns:
            Number of papers successfully queued
        """
        successful_count = 0
        
        for paper_data in papers_data:
            if self.add_paper_for_classification(paper_data):
                successful_count += 1
        
        self.logger.info(f"Queued {successful_count}/{len(papers_data)} papers for real-time classification")
        return successful_count
    
    def _worker_thread(self):
        """Worker thread for processing papers from queue."""
        thread_name = threading.current_thread().name
        self.logger.info(f"Started worker thread: {thread_name}")
        
        while self.processing_active:
            try:
                # Check priority queue first
                paper_data = None
                queue_type = None
                
                try:
                    paper_data = self.priority_queue.get_nowait()
                    queue_type = 'priority'
                except Empty:
                    try:
                        paper_data = self.paper_queue.get(timeout=1.0)
                        queue_type = 'normal'
                    except Empty:
                        continue
                
                if paper_data:
                    # Process paper
                    result = self._process_paper_realtime(paper_data, queue_type)
                    
                    # Put result in result queue
                    try:
                        self.result_queue.put_nowait(result)
                    except:
                        self.logger.error(f"Result queue full, dropping result for {paper_data.get('arxiv_id')}")
                    
                    # Mark queue task as done
                    if queue_type == 'priority':
                        self.priority_queue.task_done()
                    else:
                        self.paper_queue.task_done()
                
            except Exception as e:
                self.logger.error(f"Error in worker thread {thread_name}: {e}")
                time.sleep(1)  # Brief pause before continuing
        
        self.logger.info(f"Worker thread {thread_name} stopped")
    
    def _result_processor_thread(self):
        """Thread for processing classification results."""
        self.logger.info("Started result processor thread")
        
        while self.processing_active:
            try:
                # Get result from queue
                result = self.result_queue.get(timeout=1.0)
                
                if result:
                    self._handle_classification_result(result)
                    self.result_queue.task_done()
                    
            except Empty:
                continue
            except Exception as e:
                self.logger.error(f"Error in result processor thread: {e}")
                time.sleep(1)
        
        self.logger.info("Result processor thread stopped")
    
    def _process_paper_realtime(self, paper_data: Dict[str, Any], 
                              queue_type: str) -> Dict[str, Any]:
        """Process a single paper in real-time."""
        
        start_time = time.time()
        paper_id = paper_data.get('paper_id', paper_data.get('arxiv_id', 'unknown'))
        
        try:
            self.logger.info(f"Processing paper {paper_id} from {queue_type} queue")
            
            # Classify paper using AI engine
            classification_result = self.classification_engine.classify_paper(paper_data)
            
            # Update statistics
            processing_time = time.time() - start_time
            self.realtime_stats['papers_processed'] += 1
            
            if queue_type == 'priority':
                self.realtime_stats['priority_papers_processed'] += 1
            
            # Update average processing time
            current_avg = self.realtime_stats['average_processing_time']
            count = self.realtime_stats['papers_processed']
            self.realtime_stats['average_processing_time'] = (
                (current_avg * (count - 1) + processing_time) / count
            )
            
            return {
                'success': True,
                'paper_id': paper_id,
                'paper_data': paper_data,
                'classification_result': classification_result,
                'processing_time': processing_time,
                'queue_type': queue_type,
                'processed_at': datetime.utcnow().isoformat()
            }
            
        except Exception as e:
            self.logger.error(f"Error processing paper {paper_id}: {e}")
            self.realtime_stats['processing_errors'] += 1
            
            # Try again with retry logic
            for attempt in range(self.retry_attempts):
                try:
                    time.sleep(2 ** attempt)  # Exponential backoff
                    classification_result = self.classification_engine.classify_paper(paper_data)
                    
                    processing_time = time.time() - start_time
                    self.realtime_stats['papers_processed'] += 1
                    
                    return {
                        'success': True,
                        'paper_id': paper_id,
                        'paper_data': paper_data,
                        'classification_result': classification_result,
                        'processing_time': processing_time,
                        'queue_type': queue_type,
                        'processed_at': datetime.utcnow().isoformat(),
                        'retry_attempt': attempt + 1
                    }
                    
                except Exception as retry_error:
                    self.logger.error(f"Retry {attempt + 1} failed for paper {paper_id}: {retry_error}")
                    continue
            
            # All retries failed
            return {
                'success': False,
                'paper_id': paper_id,
                'paper_data': paper_data,
                'error': str(e),
                'processing_time': time.time() - start_time,
                'queue_type': queue_type,
                'processed_at': datetime.utcnow().isoformat()
            }
    
    def _handle_classification_result(self, result: Dict[str, Any]):
        """Handle a classification result."""
        
        try:
            paper_id = result['paper_id']
            
            if result['success']:
                # Successful classification
                self.logger.info(f"Successfully classified paper {paper_id}")
                
                # Update recommendations for interested users (async)
                if self.db:
                    self._update_user_recommendations_async(result)
                
                # Trigger success callbacks
                self._trigger_callbacks('paper_classified', result)
                
            else:
                # Classification failed
                self.logger.error(f"Failed to classify paper {paper_id}: {result.get('error')}")
                
                # Trigger error callbacks
                self._trigger_callbacks('processing_error', result)
                
        except Exception as e:
            self.logger.error(f"Error handling classification result: {e}")
    
    def _update_user_recommendations_async(self, result: Dict[str, Any]):
        """Update user recommendations asynchronously after new paper classification."""
        
        def update_recommendations():
            try:
                classification_result = result['classification_result']
                
                # Find users who might be interested in this paper
                interested_users = self._find_interested_users(classification_result)
                
                # Update recommendations for these users
                for user_id in interested_users[:100]:  # Limit to top 100 users
                    try:
                        # Clear cache to trigger refresh
                        self.recommendation_engine._clear_recommendation_cache(user_id)
                    except Exception as e:
                        self.logger.error(f"Error updating recommendations for user {user_id}: {e}")
                
                if interested_users:
                    self.logger.info(f"Updated recommendations for {len(interested_users)} users after classifying {result['paper_id']}")
                    
            except Exception as e:
                self.logger.error(f"Error in async recommendation update: {e}")
        
        # Run in thread pool to avoid blocking
        self.thread_pool.submit(update_recommendations)
    
    def _find_interested_users(self, classification_result: Dict[str, Any]) -> List[str]:
        """Find users who might be interested in the classified paper."""
        
        if not self.db:
            return []
        
        try:
            # Extract paper topics and categories
            paper_topics = [topic.get('topic', '') for topic in 
                          classification_result.get('semantic_topics', [])]
            paper_categories = classification_result.get('arxiv_categories', [])
            
            if not paper_topics and not paper_categories:
                return []
            
            # Find users with matching preferences
            query_conditions = []
            query_params = []
            
            if paper_topics:
                query_conditions.append("preference_value::jsonb->'topics' @> %s")
                query_params.append(json.dumps(paper_topics[:3]))  # Top 3 topics
            
            if paper_categories:
                query_conditions.append("preference_value::jsonb->'arxiv_categories' @> %s")
                query_params.append(json.dumps(paper_categories[:2]))  # Top 2 categories
            
            if not query_conditions:
                return []
            
            query = f"""
                SELECT DISTINCT user_id
                FROM user_preferences
                WHERE preference_key = 'ai_learned_preferences'
                    AND ({' OR '.join(query_conditions)})
                LIMIT 200
            """
            
            cursor = self.db.execute(query, query_params)
            return [row[0] for row in cursor.fetchall()]
            
        except Exception as e:
            self.logger.error(f"Error finding interested users: {e}")
            return []
    
    def _validate_paper_data(self, paper_data: Dict[str, Any]) -> bool:
        """Validate paper data before processing."""
        required_fields = ['title', 'abstract']
        
        for field in required_fields:
            if not paper_data.get(field):
                return False
        
        # Must have either paper_id or arxiv_id
        if not (paper_data.get('paper_id') or paper_data.get('arxiv_id')):
            return False
        
        return True
    
    def _trigger_callbacks(self, event_type: str, data: Any):
        """Trigger registered callbacks for an event."""
        callbacks = self.callbacks.get(event_type, [])
        
        for callback in callbacks:
            try:
                callback(data)
            except Exception as e:
                self.logger.error(f"Error in callback for {event_type}: {e}")
    
    def register_callback(self, event_type: str, callback: Callable):
        """
        Register callback for specific event.
        
        Args:
            event_type: Type of event ('paper_classified', 'processing_error', 'queue_full')
            callback: Callback function
        """
        if event_type in self.callbacks:
            self.callbacks[event_type].append(callback)
        else:
            self.logger.error(f"Unknown event type: {event_type}")
    
    def get_queue_status(self) -> Dict[str, Any]:
        """Get current queue status."""
        return {
            'normal_queue_size': self.paper_queue.qsize(),
            'priority_queue_size': self.priority_queue.qsize(),
            'result_queue_size': self.result_queue.qsize(),
            'processing_active': self.processing_active,
            'worker_threads_active': len([t for t in self.worker_threads if t.is_alive()]),
            'stats': self.get_realtime_stats()
        }
    
    def get_realtime_stats(self) -> Dict[str, Any]:
        """Get real-time processing statistics."""
        return self.realtime_stats.copy()
    
    def clear_queues(self):
        """Clear all processing queues (use with caution)."""
        
        # Clear queues
        while not self.paper_queue.empty():
            try:
                self.paper_queue.get_nowait()
                self.paper_queue.task_done()
            except Empty:
                break
        
        while not self.priority_queue.empty():
            try:
                self.priority_queue.get_nowait()
                self.priority_queue.task_done()
            except Empty:
                break
        
        while not self.result_queue.empty():
            try:
                self.result_queue.get_nowait()
                self.result_queue.task_done()
            except Empty:
                break
        
        self.logger.warning("All processing queues cleared")
    
    def process_paper_sync(self, paper_data: Dict[str, Any]) -> Dict[str, Any]:
        """
        Process a paper synchronously (for testing or urgent processing).
        
        Args:
            paper_data: Paper metadata dictionary
            
        Returns:
            Classification result
        """
        return self._process_paper_realtime(paper_data, 'synchronous')
    
    def integrate_with_scraper_service(self, scraper_callback: Callable[[Dict[str, Any]], None]):
        """
        Integrate with ArXiv scraper service.
        
        Args:
            scraper_callback: Callback function to receive new papers from scraper
        """
        
        def paper_received_callback(paper_data: Dict[str, Any]):
            """Handle new paper from scraper service."""
            try:
                # Determine priority based on paper characteristics
                priority = self._determine_paper_priority(paper_data)
                
                # Add to processing queue
                success = self.add_paper_for_classification(paper_data, priority=priority)
                
                if not success:
                    self.logger.warning(f"Failed to queue paper from scraper: {paper_data.get('arxiv_id')}")
                
            except Exception as e:
                self.logger.error(f"Error handling paper from scraper service: {e}")
        
        # Register our callback with scraper service
        scraper_callback(paper_received_callback)
        
        self.logger.info("Integrated with ArXiv scraper service")
    
    def _determine_paper_priority(self, paper_data: Dict[str, Any]) -> bool:
        """Determine if paper should be processed with high priority."""
        
        # High priority criteria
        high_priority_categories = ['cs.AI', 'cs.LG', 'cs.CV', 'cs.CL']
        primary_category = paper_data.get('primary_category', '')
        
        if primary_category in high_priority_categories:
            return True
        
        # Check for trending authors (would need author popularity data)
        # For now, just use category-based priority
        
        return False