"""
Semantic Paper Classifier

Advanced paper classification using qwen3 for deep semantic understanding
of academic content beyond simple keyword matching.
"""

import logging
import json
import re
from typing import Dict, List, Any, Optional, Tuple
from datetime import datetime

try:
    from document_review_system.integrations.qwen3_client import Qwen3DocumentGenerator
except ImportError:
    Qwen3DocumentGenerator = None


class SemanticPaperClassifier:
    """
    Advanced semantic paper classifier that uses qwen3 for intelligent
    content analysis and topic extraction.
    
    Features:
    - Deep semantic understanding using qwen3
    - Context-aware topic extraction
    - Confidence scoring for all classifications
    - Multi-level analysis (topics, methodologies, applications)
    - Reasoning generation for classifications
    """
    
    def __init__(self, config: Dict[str, Any]):
        """Initialize the semantic paper classifier.
        
        Args:
            config: Configuration dictionary
        """
        self.config = config
        self.logger = logging.getLogger(__name__)
        
        # Initialize qwen3 client
        self._init_qwen3()
        
        # Classification schemas
        self.topic_hierarchy = self._load_topic_hierarchy()
        self.methodology_types = self._load_methodology_types()
        self.application_domains = self._load_application_domains()
        
        # Confidence thresholds
        self.min_confidence = config.get('min_confidence', 0.3)
        self.primary_threshold = config.get('primary_threshold', 0.7)
        
        self.logger.info("Semantic Paper Classifier initialized")
    
    def _init_qwen3(self):
        """Initialize qwen3 client."""
        try:
            if Qwen3DocumentGenerator:
                self.qwen3_client = Qwen3DocumentGenerator()
                self.logger.info("Connected to qwen3 for semantic analysis")
            else:
                self.qwen3_client = None
                self.logger.warning("qwen3 not available - using fallback classification")
        except Exception as e:
            self.logger.error(f"Failed to initialize qwen3: {e}")
            self.qwen3_client = None
    
    def _load_topic_hierarchy(self) -> Dict[str, List[str]]:
        """Load hierarchical topic structure."""
        return {
            'artificial_intelligence': [
                'machine_learning', 'deep_learning', 'neural_networks',
                'reinforcement_learning', 'computer_vision', 'natural_language_processing',
                'knowledge_representation', 'expert_systems', 'robotics'
            ],
            'machine_learning': [
                'supervised_learning', 'unsupervised_learning', 'semi_supervised_learning',
                'transfer_learning', 'meta_learning', 'online_learning',
                'ensemble_methods', 'feature_selection'
            ],
            'deep_learning': [
                'convolutional_neural_networks', 'recurrent_neural_networks',
                'transformers', 'generative_models', 'autoencoders',
                'adversarial_networks', 'graph_neural_networks'
            ],
            'mathematics': [
                'analysis', 'algebra', 'geometry', 'topology', 'number_theory',
                'combinatorics', 'probability', 'statistics', 'optimization',
                'numerical_analysis', 'partial_differential_equations'
            ],
            'computer_science': [
                'algorithms', 'data_structures', 'theoretical_computer_science',
                'distributed_systems', 'databases', 'software_engineering',
                'human_computer_interaction', 'computer_graphics', 'security'
            ],
            'physics': [
                'theoretical_physics', 'mathematical_physics', 'quantum_physics',
                'condensed_matter', 'particle_physics', 'astrophysics',
                'statistical_mechanics', 'thermodynamics'
            ]
        }
    
    def _load_methodology_types(self) -> List[str]:
        """Load methodology classification types."""
        return [
            'theoretical_analysis', 'experimental_validation', 'simulation_study',
            'empirical_analysis', 'comparative_study', 'survey_study',
            'case_study', 'mathematical_modeling', 'algorithm_design',
            'system_implementation', 'dataset_creation', 'benchmark_evaluation'
        ]
    
    def _load_application_domains(self) -> List[str]:
        """Load application domain categories."""
        return [
            'healthcare', 'finance', 'autonomous_vehicles', 'natural_language',
            'computer_vision', 'robotics', 'cybersecurity', 'education',
            'environment', 'manufacturing', 'telecommunications', 'entertainment',
            'social_networks', 'recommendation_systems', 'scientific_computing'
        ]
    
    def classify_paper(self, paper_data: Dict[str, Any]) -> Dict[str, Any]:
        """
        Perform comprehensive semantic classification of a paper.
        
        Args:
            paper_data: Paper metadata including title, abstract, etc.
            
        Returns:
            Comprehensive classification results
        """
        try:
            self.logger.info(f"Classifying paper: {paper_data.get('title', 'Unknown')[:100]}...")
            
            # Extract core information
            title = paper_data.get('title', '')
            abstract = paper_data.get('abstract', '')
            categories = paper_data.get('subject_categories', [])
            
            # Perform multi-level semantic analysis
            if self.qwen3_client:
                classification_result = self._qwen3_semantic_analysis(title, abstract, categories)
            else:
                classification_result = self._fallback_classification(title, abstract, categories)
            
            # Enhance with confidence scoring
            self._add_confidence_scores(classification_result)
            
            # Add metadata
            classification_result.update({
                'paper_id': paper_data.get('paper_id'),
                'arxiv_id': paper_data.get('arxiv_id'),
                'classification_timestamp': datetime.utcnow().isoformat(),
                'classifier_version': self.config.get('version', '1.0'),
                'method': 'qwen3_semantic' if self.qwen3_client else 'fallback'
            })
            
            self.logger.info("Paper classification completed successfully")
            return classification_result
            
        except Exception as e:
            self.logger.error(f"Error in paper classification: {e}")
            return self._create_error_classification(paper_data)
    
    def _qwen3_semantic_analysis(self, title: str, abstract: str, 
                               categories: List[str]) -> Dict[str, Any]:
        """Perform semantic analysis using qwen3."""
        
        # Create comprehensive analysis prompt
        prompt = self._create_analysis_prompt(title, abstract, categories)
        
        # Get qwen3 analysis
        response = self.qwen3_client.generate_document(prompt, "semantic_paper_analysis")
        
        # Parse structured response
        return self._parse_qwen3_analysis(response)
    
    def _create_analysis_prompt(self, title: str, abstract: str, categories: List[str]) -> str:
        """Create detailed prompt for qwen3 semantic analysis."""
        
        prompt = f"""
        Perform deep semantic analysis of this academic paper. Go beyond keyword matching 
        to understand the fundamental concepts, methodologies, and contributions.

        Paper Title: {title}

        Abstract: {abstract}

        ArXiv Categories: {', '.join(categories) if categories else 'None provided'}

        Provide a comprehensive analysis in the following JSON format:

        {{
            "semantic_topics": {{
                "primary_topics": [
                    {{
                        "topic": "specific_topic_name",
                        "confidence": 0.95,
                        "evidence": "specific text evidence from title/abstract",
                        "reasoning": "detailed explanation of why this topic is primary",
                        "hierarchy_level": "which parent category this belongs to",
                        "related_concepts": ["concept1", "concept2"]
                    }}
                ],
                "secondary_topics": [
                    {{
                        "topic": "secondary_topic_name",
                        "confidence": 0.75,
                        "evidence": "supporting evidence",
                        "reasoning": "why this is relevant but secondary",
                        "connection": "how it relates to primary topics"
                    }}
                ]
            }},
            "methodological_analysis": {{
                "primary_methodology": {{
                    "type": "methodology_type",
                    "confidence": 0.90,
                    "description": "detailed description of the methodology",
                    "novelty": "what's novel about the approach",
                    "techniques": ["technique1", "technique2"]
                }},
                "supporting_methods": [
                    {{
                        "type": "supporting_method",
                        "role": "how this method supports the main approach"
                    }}
                ]
            }},
            "application_domains": [
                {{
                    "domain": "application_area",
                    "relevance": 0.85,
                    "context": "how the work applies to this domain",
                    "potential_impact": "expected impact in this domain"
                }}
            ],
            "technical_characteristics": {{
                "complexity_level": "basic/intermediate/advanced/expert",
                "theoretical_vs_practical": "theoretical/practical/mixed",
                "novelty_assessment": "incremental/significant/breakthrough",
                "interdisciplinary_nature": ["field1", "field2"] or []
            }},
            "contribution_analysis": {{
                "main_contributions": [
                    "contribution 1 description",
                    "contribution 2 description"
                ],
                "significance": "local/moderate/high/transformative",
                "target_audience": "specific research community"
            }},
            "semantic_relationships": {{
                "builds_upon": ["related work areas"],
                "enables": ["what this work enables"],
                "challenges": ["what problems this addresses"],
                "limitations": ["identified limitations"]
            }}
        }}

        Key Instructions:
        1. Focus on semantic meaning, not just keyword presence
        2. Consider implicit connections and relationships
        3. Evaluate the depth and breadth of contributions
        4. Assess interdisciplinary connections
        5. Provide confidence scores based on evidence strength
        6. Include specific evidence from the text when possible
        7. Consider the work's position in the broader research landscape

        Provide only the JSON response without additional commentary.
        """
        
        return prompt
    
    def _parse_qwen3_analysis(self, response: str) -> Dict[str, Any]:
        """Parse qwen3 analysis response."""
        try:
            # Extract JSON from response
            json_match = re.search(r'\{.*\}', response, re.DOTALL)
            if json_match:
                json_str = json_match.group(0)
                parsed_data = json.loads(json_str)
                
                return self._normalize_qwen3_output(parsed_data)
            else:
                self.logger.warning("No JSON found in qwen3 response")
                return self._parse_text_response(response)
                
        except json.JSONDecodeError as e:
            self.logger.error(f"JSON parsing error: {e}")
            return self._parse_text_response(response)
        except Exception as e:
            self.logger.error(f"Error parsing qwen3 analysis: {e}")
            return self._create_empty_classification()
    
    def _normalize_qwen3_output(self, parsed_data: Dict[str, Any]) -> Dict[str, Any]:
        """Normalize qwen3 output to standard format."""
        
        normalized = {
            'topics': [],
            'methodologies': [],
            'applications': [],
            'technical_level': 'intermediate',
            'contributions': [],
            'semantic_relationships': {},
            'confidence_scores': {}
        }
        
        # Extract semantic topics
        semantic_topics = parsed_data.get('semantic_topics', {})
        
        # Primary topics
        for topic in semantic_topics.get('primary_topics', []):
            normalized['topics'].append({
                'name': topic.get('topic'),
                'type': 'primary',
                'confidence': float(topic.get('confidence', 0.5)),
                'evidence': topic.get('evidence', ''),
                'reasoning': topic.get('reasoning', ''),
                'hierarchy': topic.get('hierarchy_level', ''),
                'related_concepts': topic.get('related_concepts', [])
            })
        
        # Secondary topics
        for topic in semantic_topics.get('secondary_topics', []):
            normalized['topics'].append({
                'name': topic.get('topic'),
                'type': 'secondary',
                'confidence': float(topic.get('confidence', 0.5)),
                'evidence': topic.get('evidence', ''),
                'reasoning': topic.get('reasoning', ''),
                'connection': topic.get('connection', '')
            })
        
        # Methodological analysis
        method_analysis = parsed_data.get('methodological_analysis', {})
        primary_method = method_analysis.get('primary_methodology', {})
        
        if primary_method:
            normalized['methodologies'].append({
                'type': primary_method.get('type'),
                'role': 'primary',
                'confidence': float(primary_method.get('confidence', 0.5)),
                'description': primary_method.get('description', ''),
                'novelty': primary_method.get('novelty', ''),
                'techniques': primary_method.get('techniques', [])
            })
        
        # Supporting methods
        for method in method_analysis.get('supporting_methods', []):
            normalized['methodologies'].append({
                'type': method.get('type'),
                'role': 'supporting',
                'confidence': 0.6,
                'description': method.get('role', '')
            })
        
        # Application domains
        for app in parsed_data.get('application_domains', []):
            normalized['applications'].append({
                'domain': app.get('domain'),
                'relevance': float(app.get('relevance', 0.5)),
                'context': app.get('context', ''),
                'impact': app.get('potential_impact', '')
            })
        
        # Technical characteristics
        tech_chars = parsed_data.get('technical_characteristics', {})
        normalized['technical_level'] = tech_chars.get('complexity_level', 'intermediate')
        normalized['theoretical_practical'] = tech_chars.get('theoretical_vs_practical', 'mixed')
        normalized['novelty'] = tech_chars.get('novelty_assessment', 'incremental')
        normalized['interdisciplinary'] = tech_chars.get('interdisciplinary_nature', [])
        
        # Contributions
        contrib_analysis = parsed_data.get('contribution_analysis', {})
        normalized['contributions'] = contrib_analysis.get('main_contributions', [])
        normalized['significance'] = contrib_analysis.get('significance', 'moderate')
        normalized['target_audience'] = contrib_analysis.get('target_audience', '')
        
        # Semantic relationships
        normalized['semantic_relationships'] = parsed_data.get('semantic_relationships', {})
        
        return normalized
    
    def _parse_text_response(self, response: str) -> Dict[str, Any]:
        """Parse text response when JSON parsing fails."""
        
        classification = self._create_empty_classification()
        
        # Simple text parsing to extract information
        lines = response.lower().split('\n')
        
        current_section = None
        for line in lines:
            line = line.strip()
            
            # Identify sections
            if 'topic' in line or 'subject' in line:
                current_section = 'topics'
            elif 'method' in line or 'approach' in line:
                current_section = 'methods'
            elif 'application' in line or 'domain' in line:
                current_section = 'applications'
            
            # Extract content based on section
            if current_section == 'topics' and ':' in line:
                parts = line.split(':', 1)
                if len(parts) > 1:
                    topic_name = parts[1].strip()
                    if len(topic_name) > 3:
                        classification['topics'].append({
                            'name': topic_name,
                            'type': 'inferred',
                            'confidence': 0.5,
                            'evidence': line,
                            'reasoning': 'Extracted from text response'
                        })
        
        return classification
    
    def _fallback_classification(self, title: str, abstract: str, 
                               categories: List[str]) -> Dict[str, Any]:
        """Fallback classification without qwen3."""
        
        classification = self._create_empty_classification()
        
        # Combine text for analysis
        text = f"{title} {abstract}".lower()
        
        # Topic detection based on hierarchy
        for parent_topic, subtopics in self.topic_hierarchy.items():
            parent_score = 0
            matched_subtopics = []
            
            # Check parent topic
            if parent_topic.replace('_', ' ') in text:
                parent_score += 0.3
            
            # Check subtopics
            for subtopic in subtopics:
                subtopic_words = subtopic.replace('_', ' ')
                if subtopic_words in text:
                    matched_subtopics.append(subtopic)
                    parent_score += 0.1
            
            if parent_score > 0.2:
                classification['topics'].append({
                    'name': parent_topic,
                    'type': 'primary' if parent_score > 0.5 else 'secondary',
                    'confidence': min(0.9, parent_score),
                    'evidence': f"Keywords found: {', '.join(matched_subtopics[:3])}",
                    'reasoning': 'Keyword-based detection',
                    'subtopics': matched_subtopics
                })
        
        # Methodology detection
        for methodology in self.methodology_types:
            method_words = methodology.replace('_', ' ')
            if method_words in text:
                classification['methodologies'].append({
                    'type': methodology,
                    'role': 'primary',
                    'confidence': 0.6,
                    'description': f'Detected via keyword: {method_words}'
                })
        
        # Application domain detection
        for domain in self.application_domains:
            domain_words = domain.replace('_', ' ')
            if domain_words in text:
                classification['applications'].append({
                    'domain': domain,
                    'relevance': 0.5,
                    'context': f'Detected via keyword: {domain_words}'
                })
        
        # Set basic characteristics
        classification['technical_level'] = self._infer_technical_level(text)
        classification['theoretical_practical'] = self._infer_theoretical_practical(text)
        
        return classification
    
    def _infer_technical_level(self, text: str) -> str:
        """Infer technical level from text content."""
        advanced_indicators = [
            'theorem', 'proof', 'asymptotic', 'complexity', 'convergence',
            'optimal', 'rigorous', 'mathematical', 'analytical'
        ]
        
        basic_indicators = [
            'introduction', 'overview', 'survey', 'tutorial', 'basic',
            'simple', 'preliminary'
        ]
        
        advanced_count = sum(1 for indicator in advanced_indicators if indicator in text)
        basic_count = sum(1 for indicator in basic_indicators if indicator in text)
        
        if advanced_count > basic_count + 2:
            return 'advanced'
        elif basic_count > advanced_count + 2:
            return 'basic'
        else:
            return 'intermediate'
    
    def _infer_theoretical_practical(self, text: str) -> str:
        """Infer theoretical vs practical orientation."""
        theoretical_indicators = [
            'theory', 'theorem', 'proof', 'mathematical', 'analytical',
            'formal', 'abstract'
        ]
        
        practical_indicators = [
            'implementation', 'system', 'application', 'experiment',
            'evaluation', 'practical', 'real-world', 'benchmark'
        ]
        
        theo_count = sum(1 for indicator in theoretical_indicators if indicator in text)
        prac_count = sum(1 for indicator in practical_indicators if indicator in text)
        
        if theo_count > prac_count + 2:
            return 'theoretical'
        elif prac_count > theo_count + 2:
            return 'practical'
        else:
            return 'mixed'
    
    def _add_confidence_scores(self, classification: Dict[str, Any]):
        """Add confidence scores to classification."""
        
        confidence_scores = {}
        
        # Topic confidence
        if classification.get('topics'):
            topic_confidences = [t.get('confidence', 0) for t in classification['topics']]
            confidence_scores['topics'] = sum(topic_confidences) / len(topic_confidences)
        else:
            confidence_scores['topics'] = 0.1
        
        # Methodology confidence
        if classification.get('methodologies'):
            method_confidences = [m.get('confidence', 0) for m in classification['methodologies']]
            confidence_scores['methodologies'] = sum(method_confidences) / len(method_confidences)
        else:
            confidence_scores['methodologies'] = 0.1
        
        # Application confidence
        if classification.get('applications'):
            app_relevances = [a.get('relevance', 0) for a in classification['applications']]
            confidence_scores['applications'] = sum(app_relevances) / len(app_relevances)
        else:
            confidence_scores['applications'] = 0.1
        
        # Overall confidence
        confidence_scores['overall'] = (
            confidence_scores['topics'] * 0.5 +
            confidence_scores['methodologies'] * 0.3 +
            confidence_scores['applications'] * 0.2
        )
        
        classification['confidence_scores'] = confidence_scores
    
    def _create_empty_classification(self) -> Dict[str, Any]:
        """Create empty classification structure."""
        return {
            'topics': [],
            'methodologies': [],
            'applications': [],
            'technical_level': 'unknown',
            'theoretical_practical': 'unknown',
            'novelty': 'unknown',
            'interdisciplinary': [],
            'contributions': [],
            'significance': 'unknown',
            'target_audience': 'unknown',
            'semantic_relationships': {},
            'confidence_scores': {}
        }
    
    def _create_error_classification(self, paper_data: Dict[str, Any]) -> Dict[str, Any]:
        """Create error classification."""
        error_classification = self._create_empty_classification()
        error_classification.update({
            'paper_id': paper_data.get('paper_id'),
            'arxiv_id': paper_data.get('arxiv_id'),
            'error': 'Classification failed',
            'classification_timestamp': datetime.utcnow().isoformat(),
            'classifier_version': self.config.get('version', '1.0'),
            'method': 'error'
        })
        return error_classification
    
    def batch_classify_papers(self, papers_data: List[Dict[str, Any]], 
                            batch_size: int = 10) -> List[Dict[str, Any]]:
        """
        Classify multiple papers in batches for efficiency.
        
        Args:
            papers_data: List of paper data dictionaries
            batch_size: Number of papers to process in each batch
            
        Returns:
            List of classification results
        """
        results = []
        
        for i in range(0, len(papers_data), batch_size):
            batch = papers_data[i:i + batch_size]
            
            self.logger.info(f"Processing batch {i//batch_size + 1}/{(len(papers_data) + batch_size - 1)//batch_size}")
            
            batch_results = []
            for paper in batch:
                result = self.classify_paper(paper)
                batch_results.append(result)
            
            results.extend(batch_results)
            
            # Brief pause between batches to avoid overwhelming the system
            import time
            time.sleep(0.1)
        
        return results