"""
Recommendation Scoring Algorithm

Advanced scoring system that combines multiple signals to generate
personalized recommendation scores (0-1 scale) for papers based on
user preferences and interaction patterns.
"""

import logging
import numpy as np
from typing import Dict, List, Any, Optional, Tuple
from datetime import datetime, timedelta
import math


class RecommendationScorer:
    """
    Advanced recommendation scoring system that combines multiple signals
    to generate personalized paper recommendations with confidence scores.
    
    Features:
    - Multi-signal scoring (semantic similarity, interaction patterns, novelty)
    - Time-aware recommendations (recent papers prioritized)
    - User behavior modeling (reading patterns, preferences)
    - Diversity promotion (avoiding echo chambers)
    - Confidence estimation for all scores
    - Explanation generation for recommendations
    """
    
    def __init__(self, config: Dict[str, Any]):
        """Initialize recommendation scorer.
        
        Args:
            config: Configuration dictionary
        """
        self.config = config
        self.logger = logging.getLogger(__name__)
        
        # Scoring weights for different signals
        self.scoring_weights = config.get('scoring_weights', {
            'semantic_similarity': 0.35,    # Topic/content similarity
            'category_match': 0.25,         # ArXiv category preferences
            'author_preference': 0.15,      # Preferred authors
            'novelty_score': 0.10,          # Paper novelty/recency
            'popularity_score': 0.05,       # Trending papers
            'diversity_bonus': 0.10         # Diversity promotion
        })
        
        # Ensure weights sum to 1.0
        total_weight = sum(self.scoring_weights.values())
        if abs(total_weight - 1.0) > 0.01:
            for key in self.scoring_weights:
                self.scoring_weights[key] /= total_weight
        
        # Scoring parameters
        self.novelty_decay_days = config.get('novelty_decay_days', 30)
        self.diversity_threshold = config.get('diversity_threshold', 0.7)
        self.min_confidence_threshold = config.get('min_confidence_threshold', 0.1)
        self.popularity_weight_decay = config.get('popularity_weight_decay', 0.95)
        
        # User behavior modeling
        self.interaction_type_weights = config.get('interaction_type_weights', {
            'view': 1.0,
            'download': 2.5,
            'bookmark': 4.0,
            'rate': 3.5,
            'comment': 3.0,
            'share': 2.0
        })
        
        self.logger.info("Recommendation Scorer initialized")
    
    def compute_recommendation_score(self, user_preferences: Dict[str, Any], 
                                   paper_classification: Dict[str, Any],
                                   paper_metadata: Optional[Dict[str, Any]] = None,
                                   user_history: Optional[List[Dict[str, Any]]] = None) -> float:
        """
        Compute comprehensive recommendation score for a user-paper pair.
        
        Args:
            user_preferences: User preference profile
            paper_classification: Paper classification results
            paper_metadata: Additional paper metadata (submission date, popularity, etc.)
            user_history: User's recent interaction history
            
        Returns:
            Recommendation score (0-1)
        """
        try:
            # Initialize scoring components
            scores = {}
            
            # 1. Semantic similarity score
            scores['semantic_similarity'] = self._compute_semantic_similarity_score(
                user_preferences, paper_classification
            )
            
            # 2. Category match score
            scores['category_match'] = self._compute_category_match_score(
                user_preferences, paper_classification
            )
            
            # 3. Author preference score
            scores['author_preference'] = self._compute_author_preference_score(
                user_preferences, paper_metadata
            )
            
            # 4. Novelty score
            scores['novelty_score'] = self._compute_novelty_score(
                paper_metadata, user_preferences
            )
            
            # 5. Popularity score
            scores['popularity_score'] = self._compute_popularity_score(
                paper_metadata
            )
            
            # 6. Diversity bonus
            scores['diversity_bonus'] = self._compute_diversity_bonus(
                paper_classification, user_history
            )
            
            # Compute weighted final score
            final_score = 0.0
            for component, weight in self.scoring_weights.items():
                component_score = scores.get(component, 0.0)
                final_score += weight * component_score
            
            # Apply confidence adjustment
            confidence = self._compute_score_confidence(scores, user_preferences)
            final_score *= confidence
            
            # Ensure score is in [0, 1] range
            final_score = max(0.0, min(1.0, final_score))
            
            # Log detailed scoring for debugging
            self._log_scoring_details(scores, final_score, confidence)
            
            return final_score
            
        except Exception as e:
            self.logger.error(f"Error computing recommendation score: {e}")
            return 0.0
    
    def _compute_semantic_similarity_score(self, user_preferences: Dict[str, Any], 
                                         paper_classification: Dict[str, Any]) -> float:
        """Compute semantic similarity between user preferences and paper."""
        try:
            # Get user's preferred topics with weights
            user_topics = user_preferences.get('topics', [])
            if not user_topics:
                return 0.1
            
            # Get paper topics with confidence scores
            paper_topics = paper_classification.get('semantic_topics', [])
            if not paper_topics:
                return 0.1
            
            # Compute topic matching scores
            topic_scores = []
            for user_topic, user_weight in user_topics[:10]:  # Top 10 user topics
                max_match_score = 0.0
                
                for paper_topic in paper_topics:
                    paper_topic_name = paper_topic.get('topic', '')
                    paper_confidence = paper_topic.get('confidence', 0.5)
                    
                    # Simple text similarity (can be enhanced with embeddings)
                    similarity = self._compute_text_similarity(user_topic, paper_topic_name)
                    match_score = similarity * paper_confidence * user_weight
                    max_match_score = max(max_match_score, match_score)
                
                topic_scores.append(max_match_score)
            
            # Aggregate topic scores
            if topic_scores:
                # Use weighted average with preference weights
                user_weights = [weight for _, weight in user_topics[:len(topic_scores)]]
                weighted_score = sum(score * weight for score, weight in zip(topic_scores, user_weights))
                total_weight = sum(user_weights)
                return weighted_score / total_weight if total_weight > 0 else 0.1
            
            return 0.1
            
        except Exception as e:
            self.logger.error(f"Error computing semantic similarity score: {e}")
            return 0.1
    
    def _compute_category_match_score(self, user_preferences: Dict[str, Any], 
                                    paper_classification: Dict[str, Any]) -> float:
        """Compute category matching score."""
        try:
            # ArXiv category preferences
            user_arxiv_cats = dict(user_preferences.get('arxiv_categories', []))
            paper_arxiv_cats = paper_classification.get('arxiv_categories', [])
            
            if not user_arxiv_cats or not paper_arxiv_cats:
                # Check standard categories as fallback
                user_std_cats = dict(user_preferences.get('categories', []))
                paper_std_cats = [cat.get('category', '') for cat in 
                                paper_classification.get('standard_categories', [])]
                
                if user_std_cats and paper_std_cats:
                    match_score = 0.0
                    for paper_cat in paper_std_cats:
                        if paper_cat in user_std_cats:
                            match_score = max(match_score, user_std_cats[paper_cat])
                    return match_score
                
                return 0.1
            
            # Compute ArXiv category match
            exact_matches = set(user_arxiv_cats.keys()) & set(paper_arxiv_cats)
            
            if exact_matches:
                # Weight by user preference strength
                match_scores = [user_arxiv_cats[cat] for cat in exact_matches]
                return max(match_scores)  # Best matching category
            
            # Check for hierarchical matches (e.g., cs.AI matches cs.LG)
            hierarchical_score = 0.0
            for paper_cat in paper_arxiv_cats:
                paper_prefix = paper_cat.split('.')[0] if '.' in paper_cat else paper_cat
                for user_cat, user_weight in user_arxiv_cats.items():
                    user_prefix = user_cat.split('.')[0] if '.' in user_cat else user_cat
                    if paper_prefix == user_prefix:
                        hierarchical_score = max(hierarchical_score, user_weight * 0.7)
            
            return max(0.1, hierarchical_score)
            
        except Exception as e:
            self.logger.error(f"Error computing category match score: {e}")
            return 0.1
    
    def _compute_author_preference_score(self, user_preferences: Dict[str, Any], 
                                       paper_metadata: Optional[Dict[str, Any]]) -> float:
        """Compute author preference score."""
        try:
            user_authors = dict(user_preferences.get('authors', []))
            
            if not user_authors or not paper_metadata:
                return 0.1
            
            paper_authors = paper_metadata.get('authors', [])
            if not paper_authors:
                return 0.1
            
            # Check for author matches
            max_author_score = 0.0
            for paper_author in paper_authors:
                # Normalize author name for comparison
                paper_author_norm = self._normalize_author_name(paper_author)
                
                for user_author, user_weight in user_authors.items():
                    user_author_norm = self._normalize_author_name(user_author)
                    
                    # Exact match
                    if paper_author_norm == user_author_norm:
                        max_author_score = max(max_author_score, user_weight)
                    else:
                        # Partial match (last name)
                        paper_last = paper_author_norm.split()[-1] if paper_author_norm else ''
                        user_last = user_author_norm.split()[-1] if user_author_norm else ''
                        
                        if paper_last and user_last and paper_last == user_last:
                            max_author_score = max(max_author_score, user_weight * 0.6)
            
            return max_author_score
            
        except Exception as e:
            self.logger.error(f"Error computing author preference score: {e}")
            return 0.1
    
    def _compute_novelty_score(self, paper_metadata: Optional[Dict[str, Any]], 
                             user_preferences: Dict[str, Any]) -> float:
        """Compute paper novelty score based on recency and user preferences."""
        try:
            if not paper_metadata:
                return 0.5  # Neutral score when metadata unavailable
            
            # Time-based novelty
            submission_date = paper_metadata.get('submission_date')
            if isinstance(submission_date, str):
                try:
                    submission_date = datetime.fromisoformat(submission_date.replace('Z', '+00:00'))
                except:
                    submission_date = None
            
            time_novelty = 0.5  # Default
            if submission_date:
                days_since_submission = (datetime.utcnow() - submission_date.replace(tzinfo=None)).days
                # Exponential decay: newer papers get higher scores
                time_novelty = math.exp(-days_since_submission / self.novelty_decay_days)
            
            # Content novelty (if available in classification)
            content_novelty = 0.5
            ai_classification = paper_metadata.get('ai_classification', {})
            if ai_classification:
                novelty_assessment = ai_classification.get('novelty', 'unknown')
                novelty_mapping = {
                    'breakthrough': 1.0,
                    'significant': 0.8,
                    'incremental': 0.4,
                    'unknown': 0.5
                }
                content_novelty = novelty_mapping.get(novelty_assessment, 0.5)
            
            # User's novelty preference (if available)
            user_novelty_pref = 0.5
            novelty_prefs = dict(user_preferences.get('novelty', []))
            if novelty_prefs:
                # Average user's novelty preferences
                user_novelty_pref = sum(novelty_prefs.values()) / len(novelty_prefs)
            
            # Combine novelty signals
            combined_novelty = (
                time_novelty * 0.4 + 
                content_novelty * 0.4 + 
                user_novelty_pref * 0.2
            )
            
            return max(0.1, min(1.0, combined_novelty))
            
        except Exception as e:
            self.logger.error(f"Error computing novelty score: {e}")
            return 0.5
    
    def _compute_popularity_score(self, paper_metadata: Optional[Dict[str, Any]]) -> float:
        """Compute paper popularity score."""
        try:
            if not paper_metadata:
                return 0.1
            
            # Metrics that indicate popularity
            view_count = paper_metadata.get('view_count', 0)
            download_count = paper_metadata.get('download_count', 0)
            citation_count = paper_metadata.get('citation_count', 0)
            bookmark_count = paper_metadata.get('bookmark_count', 0)
            
            # Normalize scores (simple approach)
            max_views = 10000
            max_downloads = 5000
            max_citations = 100
            max_bookmarks = 1000
            
            view_score = min(1.0, view_count / max_views)
            download_score = min(1.0, download_count / max_downloads)
            citation_score = min(1.0, citation_count / max_citations)
            bookmark_score = min(1.0, bookmark_count / max_bookmarks)
            
            # Weight different popularity metrics
            popularity_score = (
                view_score * 0.2 +
                download_score * 0.3 +
                citation_score * 0.4 +
                bookmark_score * 0.1
            )
            
            return max(0.1, popularity_score)
            
        except Exception as e:
            self.logger.error(f"Error computing popularity score: {e}")
            return 0.1
    
    def _compute_diversity_bonus(self, paper_classification: Dict[str, Any], 
                               user_history: Optional[List[Dict[str, Any]]]) -> float:
        """Compute diversity bonus to avoid echo chambers."""
        try:
            if not user_history:
                return 0.5  # Neutral when no history available
            
            # Extract topics from user's recent interactions
            recent_topics = set()
            for interaction in user_history[-20:]:  # Last 20 interactions
                interaction_classification = interaction.get('ai_classification', {})
                for topic in interaction_classification.get('semantic_topics', []):
                    recent_topics.add(topic.get('topic', ''))
            
            if not recent_topics:
                return 0.5
            
            # Check paper topics for diversity
            paper_topics = set()
            for topic in paper_classification.get('semantic_topics', []):
                paper_topics.add(topic.get('topic', ''))
            
            if not paper_topics:
                return 0.5
            
            # Compute novelty of paper topics relative to user history
            common_topics = recent_topics & paper_topics
            diversity_ratio = 1.0 - (len(common_topics) / len(paper_topics))
            
            # Apply diversity bonus if paper introduces new topics
            if diversity_ratio > self.diversity_threshold:
                return 0.8  # Bonus for diverse content
            elif diversity_ratio > 0.5:
                return 0.6  # Moderate diversity
            else:
                return 0.3  # Similar to recent history
            
        except Exception as e:
            self.logger.error(f"Error computing diversity bonus: {e}")
            return 0.5
    
    def _compute_score_confidence(self, scores: Dict[str, float], 
                                user_preferences: Dict[str, Any]) -> float:
        """Compute confidence in the recommendation score."""
        try:
            # Base confidence on availability of scoring signals
            available_signals = sum(1 for score in scores.values() if score > 0.1)
            total_signals = len(self.scoring_weights)
            signal_coverage = available_signals / total_signals
            
            # Confidence based on user preference completeness
            pref_completeness = 0.0
            pref_categories = ['topics', 'categories', 'authors', 'arxiv_categories']
            
            for category in pref_categories:
                if user_preferences.get(category):
                    pref_completeness += 0.25
            
            # Learning confidence from user preferences
            learning_confidence = user_preferences.get('confidence_score', 0.5)
            
            # Combined confidence
            combined_confidence = (
                signal_coverage * 0.4 +
                pref_completeness * 0.3 +
                learning_confidence * 0.3
            )
            
            return max(self.min_confidence_threshold, combined_confidence)
            
        except Exception as e:
            self.logger.error(f"Error computing score confidence: {e}")
            return self.min_confidence_threshold
    
    def _compute_text_similarity(self, text1: str, text2: str) -> float:
        """Compute simple text similarity (can be enhanced with embeddings)."""
        if not text1 or not text2:
            return 0.0
        
        # Simple word overlap similarity
        words1 = set(text1.lower().split())
        words2 = set(text2.lower().split())
        
        if not words1 or not words2:
            return 0.0
        
        intersection = words1 & words2
        union = words1 | words2
        
        return len(intersection) / len(union) if union else 0.0
    
    def _normalize_author_name(self, author_name: str) -> str:
        """Normalize author name for comparison."""
        if not author_name:
            return ''
        
        # Remove common prefixes/suffixes
        name = author_name.strip()
        name = name.replace('Dr. ', '').replace('Prof. ', '').replace('Mr. ', '').replace('Ms. ', '')
        
        # Handle different formats (Last, First vs First Last)
        if ',' in name:
            parts = name.split(',')
            if len(parts) >= 2:
                last_name = parts[0].strip()
                first_name = parts[1].strip()
                name = f"{first_name} {last_name}"
        
        return name.lower().strip()
    
    def _log_scoring_details(self, scores: Dict[str, float], 
                           final_score: float, confidence: float):
        """Log detailed scoring information for debugging."""
        if self.logger.isEnabledFor(logging.DEBUG):
            score_details = ', '.join([f"{k}: {v:.3f}" for k, v in scores.items()])
            self.logger.debug(f"Scoring details - {score_details}, final: {final_score:.3f}, confidence: {confidence:.3f}")
    
    def compute_batch_scores(self, user_preferences: Dict[str, Any], 
                           papers_data: List[Tuple[Dict[str, Any], Dict[str, Any]]], 
                           user_history: Optional[List[Dict[str, Any]]] = None) -> List[float]:
        """
        Compute recommendation scores for multiple papers efficiently.
        
        Args:
            user_preferences: User preference profile
            papers_data: List of (paper_classification, paper_metadata) tuples
            user_history: User's recent interaction history
            
        Returns:
            List of recommendation scores
        """
        try:
            scores = []
            
            for paper_classification, paper_metadata in papers_data:
                score = self.compute_recommendation_score(
                    user_preferences, paper_classification, paper_metadata, user_history
                )
                scores.append(score)
            
            return scores
            
        except Exception as e:
            self.logger.error(f"Error computing batch scores: {e}")
            return [0.0] * len(papers_data)
    
    def explain_recommendation(self, user_preferences: Dict[str, Any], 
                             paper_classification: Dict[str, Any],
                             paper_metadata: Optional[Dict[str, Any]] = None,
                             score: Optional[float] = None) -> Dict[str, Any]:
        """
        Generate explanation for a recommendation score.
        
        Args:
            user_preferences: User preference profile
            paper_classification: Paper classification results
            paper_metadata: Additional paper metadata
            score: Pre-computed score (optional)
            
        Returns:
            Explanation dictionary with reasoning
        """
        try:
            if score is None:
                score = self.compute_recommendation_score(
                    user_preferences, paper_classification, paper_metadata
                )
            
            explanation = {
                'recommendation_score': score,
                'score_category': self._get_score_category(score),
                'reasoning_factors': [],
                'matching_interests': [],
                'novelty_aspects': [],
                'potential_concerns': []
            }
            
            # Analyze matching interests
            user_topics = [item[0] for item in user_preferences.get('topics', [])[:5]]
            paper_topics = [topic.get('topic', '') for topic in 
                          paper_classification.get('semantic_topics', [])[:3]]
            
            for user_topic in user_topics:
                for paper_topic in paper_topics:
                    similarity = self._compute_text_similarity(user_topic, paper_topic)
                    if similarity > 0.3:
                        explanation['matching_interests'].append({
                            'user_interest': user_topic,
                            'paper_topic': paper_topic,
                            'similarity': similarity
                        })
            
            # Generate reasoning factors
            if explanation['matching_interests']:
                explanation['reasoning_factors'].append(
                    f"Matches {len(explanation['matching_interests'])} of your research interests"
                )
            
            # Category matches
            user_categories = set(item[0] for item in user_preferences.get('arxiv_categories', []))
            paper_categories = set(paper_classification.get('arxiv_categories', []))
            category_matches = user_categories & paper_categories
            
            if category_matches:
                explanation['reasoning_factors'].append(
                    f"Published in preferred categories: {', '.join(list(category_matches)[:2])}"
                )
            
            # Novelty aspects
            if paper_metadata:
                submission_date = paper_metadata.get('submission_date')
                if submission_date:
                    explanation['novelty_aspects'].append('Recent publication')
            
            return explanation
            
        except Exception as e:
            self.logger.error(f"Error generating explanation: {e}")
            return {'recommendation_score': score or 0.0, 'error': 'Could not generate explanation'}
    
    def _get_score_category(self, score: float) -> str:
        """Categorize recommendation score."""
        if score >= 0.8:
            return 'highly_recommended'
        elif score >= 0.6:
            return 'recommended'
        elif score >= 0.4:
            return 'moderately_relevant'
        elif score >= 0.2:
            return 'somewhat_relevant'
        else:
            return 'low_relevance'