"""
AI Classification Engine

Main orchestrator that extends existing qwen3 integration for intelligent
paper classification and personalized user matching.
"""

import logging
import json
from typing import Dict, List, Any, Optional, Tuple
from datetime import datetime
from pathlib import Path

# Import existing qwen3 integration
try:
    from document_review_system.integrations.qwen3_client import Qwen3DocumentGenerator
    from arxiv_processor.paper_analyzer import PaperStructureAnalyzer
except ImportError:
    print("Warning: Could not import existing qwen3 integration. Classification features will be disabled.")
    Qwen3DocumentGenerator = None
    PaperStructureAnalyzer = None

from ..embeddings.similarity_engine import SemanticSimilarityEngine
from ..learning.user_preference_learner import UserPreferenceLearner
from ..learning.feedback_system import FeedbackLearningSystem
from ..utils.scoring import RecommendationScorer
from ..utils.text_processing import EnhancedTextProcessor


class AIClassificationEngine:
    """
    Main AI Classification Engine that orchestrates intelligent paper classification
    and personalized user matching using existing qwen3 infrastructure.
    
    Features:
    - Extends existing paper_analyzer.py with semantic understanding
    - Uses qwen3 for deep content analysis beyond keyword matching
    - Learns user preferences from interaction history
    - Provides confidence scores for all classifications
    - Supports both real-time and batch processing
    """
    
    def __init__(self, config: Dict[str, Any], database_manager=None):
        """Initialize the AI Classification Engine.
        
        Args:
            config: Configuration dictionary with model settings
            database_manager: Database connection manager
        """
        self.config = config
        self.db = database_manager
        self.logger = logging.getLogger(__name__)
        
        # Initialize text processor
        self.text_processor = EnhancedTextProcessor()
        
        # Initialize existing qwen3 components
        self._init_qwen3_components()
        
        # Initialize new AI components
        self._init_ai_components()
        
        # Standard topic categories for classification
        self.standard_topics = [
            "machine_learning", "deep_learning", "neural_networks", "computer_vision",
            "natural_language_processing", "reinforcement_learning", "optimization",
            "statistics", "probability", "mathematics", "physics", "numerical_analysis",
            "partial_differential_equations", "finite_element_methods", "algorithms",
            "data_structures", "distributed_systems", "cryptography", "quantum_computing",
            "bioinformatics", "computational_biology", "robotics", "human_computer_interaction",
            "software_engineering", "information_retrieval", "databases", "networks",
            "security", "graphics", "theoretical_computer_science", "artificial_intelligence"
        ]
        
        self.logger.info("AI Classification Engine initialized successfully")
    
    def _init_qwen3_components(self):
        """Initialize existing qwen3 components."""
        try:
            # Existing paper structure analyzer with qwen3
            self.paper_analyzer = PaperStructureAnalyzer() if PaperStructureAnalyzer else None
            
            # Existing qwen3 document generator
            self.qwen3_generator = Qwen3DocumentGenerator() if Qwen3DocumentGenerator else None
            
            if self.paper_analyzer and self.qwen3_generator:
                self.logger.info("Successfully connected to existing qwen3 infrastructure")
            else:
                self.logger.warning("qwen3 components not available - using fallback methods")
                
        except Exception as e:
            self.logger.error(f"Failed to initialize qwen3 components: {e}")
            self.paper_analyzer = None
            self.qwen3_generator = None
    
    def _init_ai_components(self):
        """Initialize new AI classification components."""
        # Semantic similarity engine for topic matching
        self.similarity_engine = SemanticSimilarityEngine(self.config.get('embedding', {}))
        
        # User preference learning system
        self.preference_learner = UserPreferenceLearner(
            self.config.get('learning', {}), 
            self.db
        )
        
        # Feedback learning system
        self.feedback_system = FeedbackLearningSystem(
            self.config.get('feedback', {}), 
            self.db
        )
        
        # Recommendation scorer
        self.scorer = RecommendationScorer(self.config.get('scoring', {}))
    
    def classify_paper(self, paper_data: Dict[str, Any], 
                      user_context: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
        """
        Perform intelligent paper classification with confidence scores.
        
        Args:
            paper_data: Paper metadata (title, abstract, authors, etc.)
            user_context: Optional user context for personalized classification
            
        Returns:
            Classification results with topics, confidence scores, and reasoning
        """
        try:
            self.logger.info(f"Classifying paper: {paper_data.get('title', 'Unknown')[:100]}...")
            
            # Step 1: Extract semantic topics using qwen3
            semantic_topics = self._extract_semantic_topics(paper_data)
            
            # Step 2: Classify into standard categories
            standard_classification = self._classify_standard_categories(paper_data, semantic_topics)
            
            # Step 3: Generate embeddings for similarity matching
            paper_embedding = self.similarity_engine.generate_paper_embedding(paper_data)
            
            # Step 4: Compute topic similarities
            topic_similarities = self._compute_topic_similarities(paper_embedding, semantic_topics)
            
            # Step 5: Generate confidence scores
            confidence_scores = self._compute_confidence_scores(
                semantic_topics, standard_classification, topic_similarities
            )
            
            # Step 6: Create classification result
            classification_result = {
                'paper_id': paper_data.get('paper_id'),
                'arxiv_id': paper_data.get('arxiv_id'),
                'semantic_topics': semantic_topics,
                'standard_categories': standard_classification,
                'topic_similarities': topic_similarities,
                'confidence_scores': confidence_scores,
                'paper_embedding': paper_embedding.tolist() if paper_embedding is not None else None,
                'classification_timestamp': datetime.utcnow().isoformat(),
                'algorithm_version': self.config.get('version', '1.0'),
                'reasoning': self._generate_classification_reasoning(
                    semantic_topics, standard_classification
                )
            }
            
            # Step 7: Store classification results
            if self.db:
                self._store_classification_results(classification_result)
            
            self.logger.info(f"Successfully classified paper with {len(semantic_topics)} topics")
            return classification_result
            
        except Exception as e:
            self.logger.error(f"Error classifying paper: {e}")
            return self._create_fallback_classification(paper_data)
    
    def _extract_semantic_topics(self, paper_data: Dict[str, Any]) -> List[Dict[str, Any]]:
        """Extract semantic topics using qwen3 analysis."""
        if not self.qwen3_generator:
            return self._fallback_topic_extraction(paper_data)
        
        try:
            # Create comprehensive analysis prompt
            prompt = self._create_topic_extraction_prompt(paper_data)
            
            # Get qwen3 analysis
            response = self.qwen3_generator.generate_document(prompt, "semantic_topic_analysis")
            
            # Parse qwen3 response to extract structured topics
            topics = self._parse_qwen3_topics(response)
            
            return topics
            
        except Exception as e:
            self.logger.error(f"qwen3 topic extraction failed: {e}")
            return self._fallback_topic_extraction(paper_data)
    
    def _create_topic_extraction_prompt(self, paper_data: Dict[str, Any]) -> str:
        """Create prompt for qwen3 semantic topic extraction."""
        title = paper_data.get('title', '')
        abstract = paper_data.get('abstract', '')
        categories = paper_data.get('subject_categories', [])
        
        prompt = f"""
        Analyze this academic paper and extract semantic topics with detailed understanding.
        Go beyond simple keyword matching to understand the core concepts, methodologies, 
        and application areas.

        Paper Title: {title}

        Abstract: {abstract}

        ArXiv Categories: {', '.join(categories) if categories else 'None provided'}

        Please provide a structured analysis in JSON format with the following information:

        {{
            "primary_topics": [
                {{
                    "topic": "specific_topic_name",
                    "confidence": 0.95,
                    "reasoning": "Why this topic is relevant",
                    "keywords": ["key", "terms", "related"]
                }}
            ],
            "secondary_topics": [
                {{
                    "topic": "secondary_topic", 
                    "confidence": 0.75,
                    "reasoning": "Connection to paper",
                    "keywords": ["relevant", "terms"]
                }}
            ],
            "methodologies": [
                {{
                    "method": "methodology_used",
                    "confidence": 0.85,
                    "context": "How it's applied in the paper"
                }}
            ],
            "application_domains": [
                {{
                    "domain": "application_area",
                    "confidence": 0.80,
                    "relevance": "How the work applies to this domain"
                }}
            ],
            "technical_level": "theoretical/experimental/applied",
            "novelty_indicators": ["novel aspects identified"],
            "interdisciplinary_connections": ["fields this work connects"]
        }}

        Focus on understanding the semantic meaning and relationships between concepts.
        Consider both explicit mentions and implicit connections in the text.
        Provide confidence scores based on how clearly each topic is indicated in the content.
        """
        
        return prompt
    
    def _parse_qwen3_topics(self, response: str) -> List[Dict[str, Any]]:
        """Parse qwen3 response to extract structured topics."""
        try:
            # Try to extract JSON from response
            json_match = None
            
            # Look for JSON block
            import re
            json_pattern = r'\{.*\}'
            match = re.search(json_pattern, response, re.DOTALL)
            
            if match:
                json_str = match.group(0)
                parsed_data = json.loads(json_str)
                
                # Convert to standard format
                topics = []
                
                # Add primary topics
                for topic in parsed_data.get('primary_topics', []):
                    topics.append({
                        'topic': topic.get('topic'),
                        'type': 'primary',
                        'confidence': float(topic.get('confidence', 0.5)),
                        'reasoning': topic.get('reasoning', ''),
                        'keywords': topic.get('keywords', [])
                    })
                
                # Add secondary topics  
                for topic in parsed_data.get('secondary_topics', []):
                    topics.append({
                        'topic': topic.get('topic'),
                        'type': 'secondary', 
                        'confidence': float(topic.get('confidence', 0.5)),
                        'reasoning': topic.get('reasoning', ''),
                        'keywords': topic.get('keywords', [])
                    })
                
                # Add methodologies
                for method in parsed_data.get('methodologies', []):
                    topics.append({
                        'topic': method.get('method'),
                        'type': 'methodology',
                        'confidence': float(method.get('confidence', 0.5)),
                        'reasoning': method.get('context', ''),
                        'keywords': []
                    })
                
                return topics
                
        except (json.JSONDecodeError, Exception) as e:
            self.logger.error(f"Failed to parse qwen3 topics: {e}")
        
        # Fallback parsing
        return self._fallback_parse_topics(response)
    
    def _fallback_topic_extraction(self, paper_data: Dict[str, Any]) -> List[Dict[str, Any]]:
        """Fallback topic extraction without qwen3."""
        topics = []
        
        # Use existing paper analyzer if available
        if self.paper_analyzer:
            try:
                # Use paper analyzer's enhanced classification
                title = paper_data.get('title', '')
                abstract = paper_data.get('abstract', '')
                
                # Enhanced keyword-based analysis
                text = f"{title} {abstract}".lower()
                
                for topic in self.standard_topics:
                    keywords = self._get_topic_keywords(topic)
                    matches = sum(1 for keyword in keywords if keyword in text)
                    
                    if matches > 0:
                        confidence = min(0.9, matches / len(keywords))
                        if confidence > 0.3:
                            topics.append({
                                'topic': topic,
                                'type': 'primary' if confidence > 0.6 else 'secondary',
                                'confidence': confidence,
                                'reasoning': f'Keyword matching with {matches}/{len(keywords)} keywords',
                                'keywords': [k for k in keywords if k in text]
                            })
                
            except Exception as e:
                self.logger.error(f"Fallback topic extraction failed: {e}")
        
        return topics[:10]  # Limit to top 10 topics
    
    def _get_topic_keywords(self, topic: str) -> List[str]:
        """Get keywords associated with a topic."""
        keyword_map = {
            'machine_learning': ['machine learning', 'ml', 'learning algorithm', 'supervised learning', 'unsupervised learning'],
            'deep_learning': ['deep learning', 'neural network', 'deep neural', 'cnn', 'rnn', 'lstm', 'transformer'],
            'computer_vision': ['computer vision', 'image processing', 'object detection', 'image classification'],
            'natural_language_processing': ['nlp', 'natural language', 'text processing', 'language model'],
            'optimization': ['optimization', 'minimize', 'maximize', 'gradient descent', 'convex optimization'],
            'numerical_analysis': ['numerical', 'finite difference', 'finite element', 'discretization'],
            'partial_differential_equations': ['pde', 'partial differential', 'differential equation', 'boundary value'],
            'statistics': ['statistics', 'statistical', 'probability', 'bayesian', 'inference'],
            'algorithms': ['algorithm', 'complexity', 'time complexity', 'data structure'],
            'artificial_intelligence': ['artificial intelligence', 'ai', 'intelligent systems', 'expert systems']
        }
        
        return keyword_map.get(topic, [topic.replace('_', ' ')])
    
    def _classify_standard_categories(self, paper_data: Dict[str, Any], 
                                    semantic_topics: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """Classify paper into standard categories."""
        classifications = []
        
        # Map semantic topics to standard categories
        topic_names = [topic['topic'] for topic in semantic_topics]
        
        for category in self.standard_topics:
            # Check if category appears in semantic topics
            relevance_score = 0.0
            matching_topics = []
            
            for topic in semantic_topics:
                similarity = self.similarity_engine.compute_text_similarity(
                    category.replace('_', ' '), 
                    topic['topic']
                )
                if similarity > 0.5:
                    relevance_score = max(relevance_score, similarity * topic['confidence'])
                    matching_topics.append(topic['topic'])
            
            if relevance_score > 0.3:
                classifications.append({
                    'category': category,
                    'confidence': relevance_score,
                    'matching_topics': matching_topics,
                    'is_primary': relevance_score > 0.7
                })
        
        # Sort by confidence
        classifications.sort(key=lambda x: x['confidence'], reverse=True)
        return classifications[:10]  # Top 10 categories
    
    def _compute_topic_similarities(self, paper_embedding, semantic_topics: List[Dict[str, Any]]) -> Dict[str, float]:
        """Compute similarity scores for various topic combinations."""
        if paper_embedding is None:
            return {}
        
        similarities = {}
        
        # Compute similarities to standard research areas
        research_areas = [
            'artificial intelligence', 'machine learning', 'data science',
            'computer vision', 'natural language processing', 'robotics',
            'theoretical computer science', 'mathematics', 'physics',
            'engineering', 'biology', 'chemistry', 'medicine'
        ]
        
        for area in research_areas:
            area_embedding = self.similarity_engine.generate_text_embedding(area)
            if area_embedding is not None:
                similarity = self.similarity_engine.cosine_similarity(paper_embedding, area_embedding)
                similarities[area] = float(similarity)
        
        return similarities
    
    def _compute_confidence_scores(self, semantic_topics: List[Dict[str, Any]], 
                                 standard_classification: List[Dict[str, Any]], 
                                 topic_similarities: Dict[str, float]) -> Dict[str, float]:
        """Compute overall confidence scores for the classification."""
        scores = {}
        
        # Overall classification confidence
        if semantic_topics:
            avg_semantic_confidence = sum(t['confidence'] for t in semantic_topics) / len(semantic_topics)
            scores['semantic_classification'] = avg_semantic_confidence
        else:
            scores['semantic_classification'] = 0.1
        
        # Standard category confidence
        if standard_classification:
            avg_standard_confidence = sum(c['confidence'] for c in standard_classification) / len(standard_classification)
            scores['category_classification'] = avg_standard_confidence
        else:
            scores['category_classification'] = 0.1
        
        # Topic similarity confidence
        if topic_similarities:
            avg_similarity = sum(topic_similarities.values()) / len(topic_similarities)
            scores['topic_similarity'] = avg_similarity
        else:
            scores['topic_similarity'] = 0.1
        
        # Combined confidence score
        scores['overall_confidence'] = (
            scores['semantic_classification'] * 0.4 +
            scores['category_classification'] * 0.3 +
            scores['topic_similarity'] * 0.3
        )
        
        return scores
    
    def _generate_classification_reasoning(self, semantic_topics: List[Dict[str, Any]], 
                                         standard_classification: List[Dict[str, Any]]) -> str:
        """Generate human-readable reasoning for the classification."""
        reasoning_parts = []
        
        if semantic_topics:
            primary_topics = [t['topic'] for t in semantic_topics if t['type'] == 'primary']
            if primary_topics:
                reasoning_parts.append(f"Primary topics identified: {', '.join(primary_topics[:3])}")
        
        if standard_classification:
            top_categories = [c['category'] for c in standard_classification[:3]]
            reasoning_parts.append(f"Top categories: {', '.join(top_categories)}")
        
        if not reasoning_parts:
            reasoning_parts.append("Classification based on basic content analysis")
        
        return ". ".join(reasoning_parts) + "."
    
    def _store_classification_results(self, classification_result: Dict[str, Any]):
        """Store classification results in database."""
        if not self.db:
            return
        
        try:
            # Store in paper_classifications table
            paper_id = classification_result.get('paper_id')
            if not paper_id:
                return
            
            # Store semantic topics
            for topic in classification_result.get('semantic_topics', []):
                self.db.execute("""
                    INSERT INTO paper_classifications 
                    (paper_id, classification_type, classification_value, confidence_score, is_primary, created_by)
                    VALUES (%s, %s, %s, %s, %s, %s)
                    ON CONFLICT (paper_id, classification_type, classification_value) 
                    DO UPDATE SET confidence_score = EXCLUDED.confidence_score
                """, (
                    paper_id, 'semantic_topic', topic['topic'], 
                    topic['confidence'], topic['type'] == 'primary', 'ai_classifier'
                ))
            
            # Store standard categories
            for category in classification_result.get('standard_categories', []):
                self.db.execute("""
                    INSERT INTO paper_classifications 
                    (paper_id, classification_type, classification_value, confidence_score, is_primary, created_by)
                    VALUES (%s, %s, %s, %s, %s, %s)
                    ON CONFLICT (paper_id, classification_type, classification_value)
                    DO UPDATE SET confidence_score = EXCLUDED.confidence_score
                """, (
                    paper_id, 'ai_category', category['category'], 
                    category['confidence'], category['is_primary'], 'ai_classifier'
                ))
            
            # Store paper metadata
            metadata = {
                'confidence_scores': classification_result.get('confidence_scores'),
                'topic_similarities': classification_result.get('topic_similarities'),
                'algorithm_version': classification_result.get('algorithm_version'),
                'reasoning': classification_result.get('reasoning'),
                'paper_embedding': classification_result.get('paper_embedding')
            }
            
            self.db.execute("""
                INSERT INTO paper_metadata (paper_id, metadata_key, metadata_value)
                VALUES (%s, %s, %s)
                ON CONFLICT (paper_id, metadata_key)
                DO UPDATE SET metadata_value = EXCLUDED.metadata_value, updated_at = CURRENT_TIMESTAMP
            """, (paper_id, 'ai_classification', json.dumps(metadata)))
            
            self.db.commit()
            
        except Exception as e:
            self.logger.error(f"Failed to store classification results: {e}")
            if self.db:
                self.db.rollback()
    
    def _create_fallback_classification(self, paper_data: Dict[str, Any]) -> Dict[str, Any]:
        """Create fallback classification when qwen3 is not available."""
        return {
            'paper_id': paper_data.get('paper_id'),
            'arxiv_id': paper_data.get('arxiv_id'),
            'semantic_topics': [],
            'standard_categories': [],
            'topic_similarities': {},
            'confidence_scores': {'overall_confidence': 0.1},
            'paper_embedding': None,
            'classification_timestamp': datetime.utcnow().isoformat(),
            'algorithm_version': 'fallback',
            'reasoning': 'Fallback classification - qwen3 not available'
        }
    
    def _fallback_parse_topics(self, response: str) -> List[Dict[str, Any]]:
        """Fallback parsing when JSON parsing fails."""
        topics = []
        
        # Simple text parsing to extract topics
        lines = response.split('\n')
        for line in lines:
            line = line.strip()
            if any(keyword in line.lower() for keyword in ['topic', 'category', 'domain', 'method']):
                # Extract potential topic name
                parts = line.split(':')
                if len(parts) > 1:
                    topic_name = parts[1].strip().strip('"\'')
                    if topic_name and len(topic_name) > 3:
                        topics.append({
                            'topic': topic_name,
                            'type': 'inferred',
                            'confidence': 0.5,
                            'reasoning': 'Extracted from text response',
                            'keywords': []
                        })
        
        return topics[:5]  # Limit fallback topics
    
    def get_user_recommendations(self, user_id: str, limit: int = 10, 
                               paper_pool: Optional[List[str]] = None) -> List[Dict[str, Any]]:
        """
        Generate personalized recommendations for a user.
        
        Args:
            user_id: User identifier
            limit: Maximum number of recommendations
            paper_pool: Optional list of paper IDs to consider
            
        Returns:
            List of paper recommendations with scores
        """
        try:
            # Get user preferences
            user_preferences = self.preference_learner.get_user_preferences(user_id)
            
            # Get papers to consider
            if paper_pool is None:
                paper_pool = self._get_recent_papers(days=7, limit=1000)
            
            recommendations = []
            
            for paper_id in paper_pool:
                # Get paper classification
                paper_classification = self._get_paper_classification(paper_id)
                
                if paper_classification:
                    # Compute recommendation score
                    score = self.scorer.compute_recommendation_score(
                        user_preferences, paper_classification
                    )
                    
                    if score > 0.1:  # Minimum threshold
                        recommendations.append({
                            'paper_id': paper_id,
                            'recommendation_score': score,
                            'reasoning': self._generate_recommendation_reasoning(
                                user_preferences, paper_classification
                            )
                        })
            
            # Sort by score and return top results
            recommendations.sort(key=lambda x: x['recommendation_score'], reverse=True)
            return recommendations[:limit]
            
        except Exception as e:
            self.logger.error(f"Error generating recommendations for user {user_id}: {e}")
            return []
    
    def _get_recent_papers(self, days: int, limit: int) -> List[str]:
        """Get recent papers for recommendation."""
        if not self.db:
            return []
        
        try:
            cursor = self.db.execute("""
                SELECT paper_id FROM papers 
                WHERE submission_date >= CURRENT_DATE - INTERVAL '%s days'
                ORDER BY submission_date DESC
                LIMIT %s
            """, (days, limit))
            
            return [row[0] for row in cursor.fetchall()]
            
        except Exception as e:
            self.logger.error(f"Error getting recent papers: {e}")
            return []
    
    def _get_paper_classification(self, paper_id: str) -> Optional[Dict[str, Any]]:
        """Get stored classification for a paper."""
        if not self.db:
            return None
        
        try:
            cursor = self.db.execute("""
                SELECT metadata_value FROM paper_metadata
                WHERE paper_id = %s AND metadata_key = 'ai_classification'
            """, (paper_id,))
            
            result = cursor.fetchone()
            if result:
                return json.loads(result[0])
            
            return None
            
        except Exception as e:
            self.logger.error(f"Error getting paper classification: {e}")
            return None
    
    def _generate_recommendation_reasoning(self, user_preferences: Dict[str, Any], 
                                         paper_classification: Dict[str, Any]) -> str:
        """Generate reasoning for recommendation."""
        reasons = []
        
        # Check topic matches
        user_topics = user_preferences.get('preferred_topics', [])
        paper_topics = [t['topic'] for t in paper_classification.get('semantic_topics', [])]
        
        common_topics = set(user_topics) & set(paper_topics)
        if common_topics:
            reasons.append(f"Matches your interests in: {', '.join(list(common_topics)[:2])}")
        
        # Check category matches
        user_categories = user_preferences.get('preferred_categories', [])
        paper_categories = [c['category'] for c in paper_classification.get('standard_categories', [])]
        
        common_categories = set(user_categories) & set(paper_categories)
        if common_categories:
            reasons.append(f"Relevant to: {', '.join(list(common_categories)[:2])}")
        
        return "; ".join(reasons) if reasons else "General interest match"