"""
Semantic Similarity Engine

Embedding-based similarity engine for intelligent topic matching and 
paper recommendation. Uses pre-trained models for semantic understanding.
"""

import logging
import numpy as np
from typing import Dict, List, Any, Optional, Tuple, Union
import json
from datetime import datetime
import hashlib

try:
    from sentence_transformers import SentenceTransformer
    SENTENCE_TRANSFORMERS_AVAILABLE = True
except ImportError:
    SENTENCE_TRANSFORMERS_AVAILABLE = False
    SentenceTransformer = None

try:
    from sklearn.metrics.pairwise import cosine_similarity
    from sklearn.feature_extraction.text import TfidfVectorizer
    SKLEARN_AVAILABLE = True
except ImportError:
    SKLEARN_AVAILABLE = False


class SemanticSimilarityEngine:
    """
    Semantic similarity engine using embeddings for intelligent topic matching.
    
    Features:
    - Multiple embedding models (sentence-transformers, TF-IDF fallback)
    - Caching for improved performance
    - Similarity computation between papers and user preferences
    - Topic clustering and similarity analysis
    - Batch processing for efficiency
    """
    
    def __init__(self, config: Dict[str, Any]):
        """Initialize semantic similarity engine.
        
        Args:
            config: Configuration dictionary
        """
        self.config = config
        self.logger = logging.getLogger(__name__)
        
        # Model configuration
        self.model_name = config.get('model_name', 'all-MiniLM-L6-v2')
        self.use_gpu = config.get('use_gpu', False)
        self.batch_size = config.get('batch_size', 32)
        
        # Cache configuration
        self.cache_embeddings = config.get('cache_embeddings', True)
        self.embedding_cache = {}
        self.max_cache_size = config.get('max_cache_size', 10000)
        
        # Initialize embedding model
        self.embedding_model = None
        self.tfidf_vectorizer = None
        self._init_embedding_model()
        
        # Similarity thresholds
        self.similarity_thresholds = {
            'high': config.get('high_similarity_threshold', 0.8),
            'medium': config.get('medium_similarity_threshold', 0.6),
            'low': config.get('low_similarity_threshold', 0.4)
        }
        
        self.logger.info(f"Semantic Similarity Engine initialized with model: {self.model_name}")
    
    def _init_embedding_model(self):
        """Initialize the embedding model."""
        try:
            if SENTENCE_TRANSFORMERS_AVAILABLE:
                # Try to load sentence-transformers model
                device = 'cuda' if self.use_gpu else 'cpu'
                self.embedding_model = SentenceTransformer(self.model_name, device=device)
                self.logger.info(f"Loaded sentence-transformers model: {self.model_name}")
            
            elif SKLEARN_AVAILABLE:
                # Fallback to TF-IDF
                self.tfidf_vectorizer = TfidfVectorizer(
                    max_features=5000,
                    stop_words='english',
                    ngram_range=(1, 2)
                )
                self.logger.info("Using TF-IDF fallback for embeddings")
            
            else:
                self.logger.error("No embedding model available - install sentence-transformers or scikit-learn")
                
        except Exception as e:
            self.logger.error(f"Error initializing embedding model: {e}")
            # Fallback to TF-IDF if available
            if SKLEARN_AVAILABLE:
                self.tfidf_vectorizer = TfidfVectorizer(
                    max_features=5000,
                    stop_words='english',
                    ngram_range=(1, 2)
                )
                self.logger.info("Fallback to TF-IDF after sentence-transformers failed")
    
    def generate_paper_embedding(self, paper_data: Dict[str, Any]) -> Optional[np.ndarray]:
        """
        Generate embedding for a paper from its metadata.
        
        Args:
            paper_data: Paper metadata (title, abstract, etc.)
            
        Returns:
            Paper embedding vector or None if generation fails
        """
        try:
            # Create text representation of paper
            title = paper_data.get('title', '')
            abstract = paper_data.get('abstract', '')
            categories = paper_data.get('subject_categories', [])
            
            # Combine text elements
            text_parts = [title, abstract]
            if categories:
                text_parts.append(' '.join(categories))
            
            paper_text = ' '.join(filter(None, text_parts))
            
            # Generate embedding
            return self.generate_text_embedding(paper_text)
            
        except Exception as e:
            self.logger.error(f"Error generating paper embedding: {e}")
            return None
    
    def generate_text_embedding(self, text: str) -> Optional[np.ndarray]:
        """
        Generate embedding for arbitrary text.
        
        Args:
            text: Input text
            
        Returns:
            Text embedding vector or None if generation fails
        """
        if not text or not text.strip():
            return None
        
        try:
            # Check cache first
            if self.cache_embeddings:
                cache_key = self._get_cache_key(text)
                if cache_key in self.embedding_cache:
                    return self.embedding_cache[cache_key]
            
            # Generate embedding
            if self.embedding_model:
                # Use sentence-transformers
                embedding = self.embedding_model.encode(
                    text, 
                    convert_to_numpy=True,
                    normalize_embeddings=True
                )
            elif self.tfidf_vectorizer:
                # Use TF-IDF (requires fitting first if not done)
                if not hasattr(self.tfidf_vectorizer, 'vocabulary_'):
                    # Need to fit vectorizer - use provided text as minimal corpus
                    self.tfidf_vectorizer.fit([text])
                
                embedding = self.tfidf_vectorizer.transform([text]).toarray()[0]
            else:
                self.logger.error("No embedding model available")
                return None
            
            # Cache embedding
            if self.cache_embeddings and embedding is not None:
                self._cache_embedding(text, embedding)
            
            return embedding
            
        except Exception as e:
            self.logger.error(f"Error generating text embedding: {e}")
            return None
    
    def compute_similarity_matrix(self, embeddings: List[np.ndarray]) -> np.ndarray:
        """
        Compute pairwise similarity matrix for a list of embeddings.
        
        Args:
            embeddings: List of embedding vectors
            
        Returns:
            Similarity matrix
        """
        try:
            if not embeddings:
                return np.array([])
            
            # Stack embeddings
            embedding_matrix = np.stack(embeddings)
            
            # Compute cosine similarity matrix
            similarity_matrix = cosine_similarity(embedding_matrix)
            
            return similarity_matrix
            
        except Exception as e:
            self.logger.error(f"Error computing similarity matrix: {e}")
            return np.array([])
    
    def cosine_similarity(self, embedding1: np.ndarray, embedding2: np.ndarray) -> float:
        """
        Compute cosine similarity between two embeddings.
        
        Args:
            embedding1: First embedding vector
            embedding2: Second embedding vector
            
        Returns:
            Cosine similarity score (0-1)
        """
        try:
            # Handle different dimensionalities
            if embedding1.shape != embedding2.shape:
                self.logger.warning(f"Embedding dimension mismatch: {embedding1.shape} vs {embedding2.shape}")
                return 0.0
            
            # Compute cosine similarity
            if SKLEARN_AVAILABLE:
                similarity = cosine_similarity(
                    embedding1.reshape(1, -1), 
                    embedding2.reshape(1, -1)
                )[0, 0]
            else:
                # Manual cosine similarity computation
                dot_product = np.dot(embedding1, embedding2)
                norm1 = np.linalg.norm(embedding1)
                norm2 = np.linalg.norm(embedding2)
                
                if norm1 == 0 or norm2 == 0:
                    return 0.0
                
                similarity = dot_product / (norm1 * norm2)
            
            # Ensure similarity is in [0, 1] range (cosine can be negative)
            similarity = max(0.0, min(1.0, (similarity + 1) / 2))
            
            return float(similarity)
            
        except Exception as e:
            self.logger.error(f"Error computing cosine similarity: {e}")
            return 0.0
    
    def compute_text_similarity(self, text1: str, text2: str) -> float:
        """
        Compute semantic similarity between two text strings.
        
        Args:
            text1: First text string
            text2: Second text string
            
        Returns:
            Similarity score (0-1)
        """
        try:
            # Generate embeddings
            embedding1 = self.generate_text_embedding(text1)
            embedding2 = self.generate_text_embedding(text2)
            
            if embedding1 is None or embedding2 is None:
                # Fallback to simple text overlap
                return self._simple_text_similarity(text1, text2)
            
            # Compute similarity
            return self.cosine_similarity(embedding1, embedding2)
            
        except Exception as e:
            self.logger.error(f"Error computing text similarity: {e}")
            return 0.0
    
    def find_similar_papers(self, query_embedding: np.ndarray, 
                           paper_embeddings: List[Tuple[str, np.ndarray]], 
                           top_k: int = 10) -> List[Tuple[str, float]]:
        """
        Find papers most similar to a query embedding.
        
        Args:
            query_embedding: Query embedding vector
            paper_embeddings: List of (paper_id, embedding) tuples
            top_k: Number of top results to return
            
        Returns:
            List of (paper_id, similarity_score) tuples
        """
        try:
            similarities = []
            
            for paper_id, paper_embedding in paper_embeddings:
                similarity = self.cosine_similarity(query_embedding, paper_embedding)
                similarities.append((paper_id, similarity))
            
            # Sort by similarity descending
            similarities.sort(key=lambda x: x[1], reverse=True)
            
            return similarities[:top_k]
            
        except Exception as e:
            self.logger.error(f"Error finding similar papers: {e}")
            return []
    
    def compute_user_paper_similarity(self, user_preferences: Dict[str, Any], 
                                    paper_classification: Dict[str, Any]) -> Dict[str, float]:
        """
        Compute similarity between user preferences and paper classification.
        
        Args:
            user_preferences: User preference profile
            paper_classification: Paper classification results
            
        Returns:
            Dictionary of similarity scores by category
        """
        try:
            similarities = {}
            
            # Topic similarity
            user_topics = [item[0] for item in user_preferences.get('topics', [])]
            paper_topics = [topic.get('topic', '') for topic in paper_classification.get('semantic_topics', [])]
            
            if user_topics and paper_topics:
                topic_similarities = []
                for user_topic in user_topics[:5]:  # Top 5 user topics
                    max_sim = 0.0
                    for paper_topic in paper_topics:
                        sim = self.compute_text_similarity(user_topic, paper_topic)
                        max_sim = max(max_sim, sim)
                    topic_similarities.append(max_sim)
                
                similarities['topics'] = np.mean(topic_similarities) if topic_similarities else 0.0
            else:
                similarities['topics'] = 0.0
            
            # Category similarity
            user_categories = [item[0] for item in user_preferences.get('categories', [])]
            paper_categories = [cat.get('category', '') for cat in paper_classification.get('standard_categories', [])]
            
            if user_categories and paper_categories:
                category_similarities = []
                for user_cat in user_categories[:5]:  # Top 5 user categories
                    max_sim = 0.0
                    for paper_cat in paper_categories:
                        sim = self.compute_text_similarity(user_cat.replace('_', ' '), paper_cat.replace('_', ' '))
                        max_sim = max(max_sim, sim)
                    category_similarities.append(max_sim)
                
                similarities['categories'] = np.mean(category_similarities) if category_similarities else 0.0
            else:
                similarities['categories'] = 0.0
            
            # ArXiv category similarity (exact matching)
            user_arxiv_cats = [item[0] for item in user_preferences.get('arxiv_categories', [])]
            paper_arxiv_cats = paper_classification.get('arxiv_categories', [])
            
            if user_arxiv_cats and paper_arxiv_cats:
                # Exact match scoring
                matches = len(set(user_arxiv_cats) & set(paper_arxiv_cats))
                similarities['arxiv_categories'] = matches / max(len(user_arxiv_cats), len(paper_arxiv_cats))
            else:
                similarities['arxiv_categories'] = 0.0
            
            return similarities
            
        except Exception as e:
            self.logger.error(f"Error computing user-paper similarity: {e}")
            return {'topics': 0.0, 'categories': 0.0, 'arxiv_categories': 0.0}
    
    def cluster_topics(self, topics: List[str], n_clusters: int = 5) -> Dict[str, List[str]]:
        """
        Cluster topics based on semantic similarity.
        
        Args:
            topics: List of topic strings
            n_clusters: Number of clusters to create
            
        Returns:
            Dictionary mapping cluster names to topic lists
        """
        try:
            if len(topics) < n_clusters:
                # Not enough topics to cluster
                return {f"cluster_{i}": [topics[i]] for i in range(len(topics))}
            
            # Generate embeddings for all topics
            embeddings = []
            valid_topics = []
            
            for topic in topics:
                embedding = self.generate_text_embedding(topic)
                if embedding is not None:
                    embeddings.append(embedding)
                    valid_topics.append(topic)
            
            if len(embeddings) < n_clusters:
                return {f"cluster_{i}": [valid_topics[i]] for i in range(len(valid_topics))}
            
            # Perform clustering
            try:
                from sklearn.cluster import KMeans
                
                embeddings_matrix = np.stack(embeddings)
                kmeans = KMeans(n_clusters=n_clusters, random_state=42)
                cluster_labels = kmeans.fit_predict(embeddings_matrix)
                
                # Group topics by cluster
                clusters = {}
                for topic, label in zip(valid_topics, cluster_labels):
                    cluster_name = f"cluster_{label}"
                    if cluster_name not in clusters:
                        clusters[cluster_name] = []
                    clusters[cluster_name].append(topic)
                
                return clusters
                
            except ImportError:
                # Fallback: simple similarity-based grouping
                return self._simple_topic_clustering(valid_topics, embeddings, n_clusters)
            
        except Exception as e:
            self.logger.error(f"Error clustering topics: {e}")
            return {f"cluster_{i}": [topics[i]] for i in range(min(len(topics), n_clusters))}
    
    def _simple_topic_clustering(self, topics: List[str], 
                               embeddings: List[np.ndarray], 
                               n_clusters: int) -> Dict[str, List[str]]:
        """Simple clustering based on similarity."""
        clusters = {}
        used_topics = set()
        
        for i, (topic, embedding) in enumerate(zip(topics, embeddings)):
            if topic in used_topics:
                continue
            
            cluster_name = f"cluster_{len(clusters)}"
            clusters[cluster_name] = [topic]
            used_topics.add(topic)
            
            # Find similar topics
            for j, (other_topic, other_embedding) in enumerate(zip(topics, embeddings)):
                if j != i and other_topic not in used_topics:
                    similarity = self.cosine_similarity(embedding, other_embedding)
                    if similarity > self.similarity_thresholds['medium']:
                        clusters[cluster_name].append(other_topic)
                        used_topics.add(other_topic)
            
            if len(clusters) >= n_clusters:
                break
        
        # Add remaining topics to existing clusters or create new ones
        for topic in topics:
            if topic not in used_topics:
                if len(clusters) < n_clusters:
                    cluster_name = f"cluster_{len(clusters)}"
                    clusters[cluster_name] = [topic]
                else:
                    # Add to smallest cluster
                    smallest_cluster = min(clusters.keys(), key=lambda k: len(clusters[k]))
                    clusters[smallest_cluster].append(topic)
        
        return clusters
    
    def _simple_text_similarity(self, text1: str, text2: str) -> float:
        """Simple text similarity using word overlap (fallback)."""
        try:
            words1 = set(text1.lower().split())
            words2 = set(text2.lower().split())
            
            if not words1 or not words2:
                return 0.0
            
            intersection = words1 & words2
            union = words1 | words2
            
            # Jaccard similarity
            return len(intersection) / len(union) if union else 0.0
            
        except Exception:
            return 0.0
    
    def _get_cache_key(self, text: str) -> str:
        """Generate cache key for text."""
        return hashlib.md5(text.encode('utf-8')).hexdigest()
    
    def _cache_embedding(self, text: str, embedding: np.ndarray):
        """Cache an embedding with LRU-like eviction."""
        if len(self.embedding_cache) >= self.max_cache_size:
            # Remove oldest entry (simple approach)
            oldest_key = next(iter(self.embedding_cache))
            del self.embedding_cache[oldest_key]
        
        cache_key = self._get_cache_key(text)
        self.embedding_cache[cache_key] = embedding.copy()
    
    def batch_generate_embeddings(self, texts: List[str], 
                                 batch_size: Optional[int] = None) -> List[Optional[np.ndarray]]:
        """
        Generate embeddings for multiple texts efficiently.
        
        Args:
            texts: List of text strings
            batch_size: Batch size for processing (uses instance default if None)
            
        Returns:
            List of embedding vectors (same order as input)
        """
        try:
            if not texts:
                return []
            
            batch_size = batch_size or self.batch_size
            embeddings = []
            
            # Process in batches for efficiency
            for i in range(0, len(texts), batch_size):
                batch_texts = texts[i:i + batch_size]
                
                if self.embedding_model:
                    # Use sentence-transformers batch processing
                    batch_embeddings = self.embedding_model.encode(
                        batch_texts,
                        convert_to_numpy=True,
                        normalize_embeddings=True,
                        batch_size=len(batch_texts)
                    )
                    embeddings.extend(batch_embeddings)
                    
                else:
                    # Process individually for TF-IDF or fallback
                    for text in batch_texts:
                        embedding = self.generate_text_embedding(text)
                        embeddings.append(embedding)
            
            return embeddings
            
        except Exception as e:
            self.logger.error(f"Error in batch embedding generation: {e}")
            return [None] * len(texts)
    
    def get_similarity_category(self, similarity_score: float) -> str:
        """
        Categorize similarity score into high/medium/low.
        
        Args:
            similarity_score: Similarity score (0-1)
            
        Returns:
            Similarity category string
        """
        if similarity_score >= self.similarity_thresholds['high']:
            return 'high'
        elif similarity_score >= self.similarity_thresholds['medium']:
            return 'medium'
        elif similarity_score >= self.similarity_thresholds['low']:
            return 'low'
        else:
            return 'very_low'
    
    def clear_cache(self):
        """Clear the embedding cache."""
        self.embedding_cache.clear()
        self.logger.info("Embedding cache cleared")
    
    def get_cache_info(self) -> Dict[str, Any]:
        """Get cache statistics."""
        return {
            'cache_size': len(self.embedding_cache),
            'max_cache_size': self.max_cache_size,
            'cache_hit_ratio': getattr(self, '_cache_hits', 0) / getattr(self, '_cache_requests', 1)
        }