"""
Personalized Recommendation Engine

High-level recommendation engine that orchestrates all AI classification
components to provide personalized paper recommendations for users.
"""

import logging
import time
from typing import Dict, List, Any, Optional, Tuple
from datetime import datetime, timedelta
import json

from .classification_engine import AIClassificationEngine
from .paper_classifier import SemanticPaperClassifier
from ..learning.user_preference_learner import UserPreferenceLearner
from ..learning.feedback_system import FeedbackLearningSystem
from ..embeddings.similarity_engine import SemanticSimilarityEngine
from ..utils.scoring import RecommendationScorer
from ..batch.batch_processor import BatchClassificationProcessor


class PersonalizedRecommendationEngine:
    """
    Main recommendation engine that provides personalized paper recommendations
    by coordinating all AI classification components.
    
    Features:
    - Real-time personalized recommendations
    - Multi-faceted scoring (content, preferences, novelty, diversity)
    - Adaptive recommendations based on user feedback
    - Cold-start handling for new users
    - Explanation generation for recommendations
    - A/B testing support
    """
    
    def __init__(self, config: Dict[str, Any], database_manager=None):
        """Initialize recommendation engine.
        
        Args:
            config: Configuration dictionary
            database_manager: Database connection manager
        """
        self.config = config
        self.db = database_manager
        self.logger = logging.getLogger(__name__)
        
        # Initialize AI components
        self.classification_engine = AIClassificationEngine(config, database_manager)
        self.paper_classifier = SemanticPaperClassifier(config)
        self.preference_learner = UserPreferenceLearner(config.get('learning', {}), database_manager)
        self.feedback_system = FeedbackLearningSystem(config.get('feedback', {}), database_manager)
        self.similarity_engine = SemanticSimilarityEngine(config.get('embedding', {}))
        self.recommendation_scorer = RecommendationScorer(config.get('scoring', {}))
        self.batch_processor = BatchClassificationProcessor(config, database_manager)
        
        # Recommendation parameters
        self.default_recommendation_count = config.get('default_recommendation_count', 20)
        self.max_recommendation_count = config.get('max_recommendation_count', 100)
        self.recommendation_cache_hours = config.get('recommendation_cache_hours', 6)
        self.diversity_factor = config.get('diversity_factor', 0.2)
        
        # Performance tracking
        self.recommendation_stats = {
            'recommendations_generated': 0,
            'total_recommendation_time': 0.0,
            'cache_hits': 0,
            'cache_misses': 0
        }
        
        self.logger.info("Personalized Recommendation Engine initialized")
    
    def get_recommendations(self, user_id: str, 
                          count: Optional[int] = None,
                          paper_pool: Optional[List[str]] = None,
                          refresh_cache: bool = False,
                          include_explanations: bool = True) -> List[Dict[str, Any]]:
        """
        Get personalized paper recommendations for a user.
        
        Args:
            user_id: User identifier
            count: Number of recommendations to return
            paper_pool: Optional list of paper IDs to consider
            refresh_cache: Force refresh of recommendations
            include_explanations: Include explanation for each recommendation
            
        Returns:
            List of recommendation dictionaries
        """
        try:
            start_time = time.time()
            count = count or self.default_recommendation_count
            count = min(count, self.max_recommendation_count)
            
            self.logger.info(f"Generating {count} recommendations for user {user_id}")
            
            # Check cache first
            if not refresh_cache:
                cached_recommendations = self._get_cached_recommendations(user_id, count)
                if cached_recommendations:
                    self.recommendation_stats['cache_hits'] += 1
                    return cached_recommendations
            
            self.recommendation_stats['cache_misses'] += 1
            
            # Get user preferences
            user_preferences = self.preference_learner.get_user_preferences(user_id)
            
            # Get user's interaction history for diversity
            user_history = self._get_user_history(user_id, days=30)
            
            # Get candidate papers
            if paper_pool is None:
                paper_pool = self._get_candidate_papers(user_preferences, count * 3)
            
            # Score all candidate papers
            paper_scores = []
            for paper_id in paper_pool:
                # Get paper classification
                paper_classification = self._get_paper_classification(paper_id)
                if not paper_classification:
                    continue
                
                # Get paper metadata
                paper_metadata = self._get_paper_metadata(paper_id)
                
                # Compute recommendation score
                score = self.recommendation_scorer.compute_recommendation_score(
                    user_preferences, paper_classification, paper_metadata, user_history
                )
                
                if score > 0.1:  # Minimum threshold
                    paper_scores.append({
                        'paper_id': paper_id,
                        'score': score,
                        'classification': paper_classification,
                        'metadata': paper_metadata
                    })
            
            # Sort by score
            paper_scores.sort(key=lambda x: x['score'], reverse=True)
            
            # Apply diversity filtering
            diverse_recommendations = self._apply_diversity_filtering(
                paper_scores[:count * 2], user_history, count
            )
            
            # Generate final recommendations with explanations
            recommendations = []
            for item in diverse_recommendations[:count]:
                recommendation = {
                    'paper_id': item['paper_id'],
                    'recommendation_score': item['score'],
                    'paper_title': item['metadata'].get('title', 'Unknown'),
                    'paper_authors': item['metadata'].get('authors', []),
                    'paper_abstract': item['metadata'].get('abstract', ''),
                    'arxiv_id': item['metadata'].get('arxiv_id'),
                    'primary_category': item['metadata'].get('primary_category'),
                    'submission_date': item['metadata'].get('submission_date'),
                    'recommendation_reason': 'Personalized match',
                    'generated_at': datetime.utcnow().isoformat()
                }
                
                # Add explanation if requested
                if include_explanations:
                    explanation = self.recommendation_scorer.explain_recommendation(
                        user_preferences, item['classification'], item['metadata'], item['score']
                    )
                    recommendation['explanation'] = explanation
                
                recommendations.append(recommendation)
            
            # Cache recommendations
            self._cache_recommendations(user_id, recommendations)
            
            # Update statistics
            processing_time = time.time() - start_time
            self.recommendation_stats['recommendations_generated'] += 1
            self.recommendation_stats['total_recommendation_time'] += processing_time
            
            self.logger.info(f"Generated {len(recommendations)} recommendations for user {user_id} in {processing_time:.2f}s")
            return recommendations
            
        except Exception as e:
            self.logger.error(f"Error generating recommendations for user {user_id}: {e}")
            return []
    
    def get_trending_recommendations(self, count: int = 20) -> List[Dict[str, Any]]:
        """
        Get trending papers for general recommendations.
        
        Args:
            count: Number of trending papers to return
            
        Returns:
            List of trending paper recommendations
        """
        try:
            self.logger.info(f"Getting {count} trending recommendations")
            
            # Get trending papers from last week
            trending_papers = self._get_trending_papers(days=7, limit=count * 2)
            
            recommendations = []
            for paper in trending_papers:
                recommendation = {
                    'paper_id': paper['paper_id'],
                    'paper_title': paper['title'],
                    'paper_authors': paper.get('authors', []),
                    'paper_abstract': paper.get('abstract', ''),
                    'arxiv_id': paper.get('arxiv_id'),
                    'primary_category': paper.get('primary_category'),
                    'submission_date': paper.get('submission_date'),
                    'trending_score': paper.get('trending_score', 0.0),
                    'view_count': paper.get('view_count', 0),
                    'download_count': paper.get('download_count', 0),
                    'recommendation_reason': 'Trending paper',
                    'generated_at': datetime.utcnow().isoformat()
                }
                recommendations.append(recommendation)
            
            return recommendations[:count]
            
        except Exception as e:
            self.logger.error(f"Error getting trending recommendations: {e}")
            return []
    
    def get_similar_papers(self, paper_id: str, count: int = 10) -> List[Dict[str, Any]]:
        """
        Get papers similar to a given paper.
        
        Args:
            paper_id: Reference paper ID
            count: Number of similar papers to return
            
        Returns:
            List of similar paper recommendations
        """
        try:
            self.logger.info(f"Finding {count} papers similar to {paper_id}")
            
            # Get reference paper classification
            reference_classification = self._get_paper_classification(paper_id)
            if not reference_classification:
                return []
            
            # Get reference paper embedding
            reference_metadata = self._get_paper_metadata(paper_id)
            reference_embedding = self.similarity_engine.generate_paper_embedding(reference_metadata)
            
            if reference_embedding is None:
                # Fallback to topic-based similarity
                return self._get_topic_similar_papers(reference_classification, count)
            
            # Get candidate papers for similarity comparison
            candidate_papers = self._get_recent_papers(days=90, limit=1000)
            
            # Compute similarities
            similarities = []
            for candidate in candidate_papers:
                if candidate['paper_id'] == paper_id:
                    continue
                
                candidate_embedding = self.similarity_engine.generate_paper_embedding(candidate)
                if candidate_embedding is not None:
                    similarity = self.similarity_engine.cosine_similarity(
                        reference_embedding, candidate_embedding
                    )
                    
                    if similarity > 0.3:  # Minimum similarity threshold
                        similarities.append({
                            'paper_id': candidate['paper_id'],
                            'similarity_score': similarity,
                            'metadata': candidate
                        })
            
            # Sort by similarity
            similarities.sort(key=lambda x: x['similarity_score'], reverse=True)
            
            # Format recommendations
            recommendations = []
            for item in similarities[:count]:
                recommendation = {
                    'paper_id': item['paper_id'],
                    'similarity_score': item['similarity_score'],
                    'paper_title': item['metadata'].get('title', 'Unknown'),
                    'paper_authors': item['metadata'].get('authors', []),
                    'arxiv_id': item['metadata'].get('arxiv_id'),
                    'primary_category': item['metadata'].get('primary_category'),
                    'submission_date': item['metadata'].get('submission_date'),
                    'recommendation_reason': f'Similar to: {reference_metadata.get("title", "reference paper")}',
                    'generated_at': datetime.utcnow().isoformat()
                }
                recommendations.append(recommendation)
            
            return recommendations
            
        except Exception as e:
            self.logger.error(f"Error finding similar papers to {paper_id}: {e}")
            return []
    
    def update_recommendations_from_feedback(self, user_id: str, paper_id: str, 
                                           feedback_type: str, feedback_data: Dict[str, Any]):
        """
        Update user recommendations based on feedback.
        
        Args:
            user_id: User identifier
            paper_id: Paper identifier that received feedback
            feedback_type: Type of feedback
            feedback_data: Feedback data
        """
        try:
            # Process feedback through feedback system
            result = self.feedback_system.process_feedback(
                user_id, paper_id, feedback_type, feedback_data
            )
            
            if result.get('success') and result.get('preference_updates', {}).get('preferences_changed'):
                # Clear recommendation cache to trigger refresh
                self._clear_recommendation_cache(user_id)
                self.logger.info(f"Cleared recommendation cache for user {user_id} after feedback")
            
        except Exception as e:
            self.logger.error(f"Error updating recommendations from feedback: {e}")
    
    def _get_cached_recommendations(self, user_id: str, count: int) -> Optional[List[Dict[str, Any]]]:
        """Get cached recommendations if available and recent."""
        if not self.db:
            return None
        
        try:
            cutoff_time = datetime.utcnow() - timedelta(hours=self.recommendation_cache_hours)
            
            cursor = self.db.execute("""
                SELECT paper_id, recommendation_score, recommendation_reason, generated_at
                FROM user_recommendations
                WHERE user_id = %s AND generated_at >= %s
                ORDER BY recommendation_score DESC
                LIMIT %s
            """, (user_id, cutoff_time, count))
            
            cached_recs = []
            for row in cursor.fetchall():
                # Get paper metadata
                paper_metadata = self._get_paper_metadata(row[0])
                
                if paper_metadata:
                    cached_recs.append({
                        'paper_id': row[0],
                        'recommendation_score': float(row[1]),
                        'paper_title': paper_metadata.get('title', 'Unknown'),
                        'paper_authors': paper_metadata.get('authors', []),
                        'paper_abstract': paper_metadata.get('abstract', ''),
                        'arxiv_id': paper_metadata.get('arxiv_id'),
                        'primary_category': paper_metadata.get('primary_category'),
                        'submission_date': paper_metadata.get('submission_date'),
                        'recommendation_reason': row[2],
                        'generated_at': row[3].isoformat()
                    })
            
            return cached_recs if cached_recs else None
            
        except Exception as e:
            self.logger.error(f"Error getting cached recommendations: {e}")
            return None
    
    def _cache_recommendations(self, user_id: str, recommendations: List[Dict[str, Any]]):
        """Cache recommendations in database."""
        if not self.db:
            return
        
        try:
            # Clear old recommendations first
            self.db.execute("""
                DELETE FROM user_recommendations
                WHERE user_id = %s AND generated_at < CURRENT_TIMESTAMP - INTERVAL '24 hours'
            """, (user_id,))
            
            # Insert new recommendations
            for rec in recommendations:
                self.db.execute("""
                    INSERT INTO user_recommendations 
                    (user_id, paper_id, recommendation_score, recommendation_reason, 
                     algorithm_version, generated_at, expires_at)
                    VALUES (%s, %s, %s, %s, %s, %s, %s)
                    ON CONFLICT (user_id, paper_id, algorithm_version)
                    DO UPDATE SET 
                        recommendation_score = EXCLUDED.recommendation_score,
                        recommendation_reason = EXCLUDED.recommendation_reason,
                        generated_at = EXCLUDED.generated_at,
                        expires_at = EXCLUDED.expires_at
                """, (
                    user_id,
                    rec['paper_id'],
                    rec['recommendation_score'],
                    rec.get('recommendation_reason', ''),
                    'personalized_v1',
                    datetime.utcnow(),
                    datetime.utcnow() + timedelta(hours=self.recommendation_cache_hours)
                ))
            
            self.db.commit()
            
        except Exception as e:
            self.logger.error(f"Error caching recommendations: {e}")
            if self.db:
                self.db.rollback()
    
    def _clear_recommendation_cache(self, user_id: str):
        """Clear cached recommendations for a user."""
        if not self.db:
            return
        
        try:
            self.db.execute("""
                DELETE FROM user_recommendations
                WHERE user_id = %s AND algorithm_version = 'personalized_v1'
            """, (user_id,))
            
            self.db.commit()
            
        except Exception as e:
            self.logger.error(f"Error clearing recommendation cache: {e}")
            if self.db:
                self.db.rollback()
    
    def _get_candidate_papers(self, user_preferences: Dict[str, Any], limit: int) -> List[str]:
        """Get candidate papers based on user preferences."""
        if not self.db:
            return []
        
        try:
            # Get papers from user's preferred categories
            preferred_categories = [item[0] for item in user_preferences.get('arxiv_categories', [])[:5]]
            
            if preferred_categories:
                placeholders = ','.join(['%s'] * len(preferred_categories))
                cursor = self.db.execute(f"""
                    SELECT DISTINCT p.paper_id
                    FROM papers p
                    WHERE (p.primary_category IN ({placeholders}) 
                           OR p.secondary_categories && %s)
                        AND p.submission_date >= CURRENT_DATE - INTERVAL '30 days'
                    ORDER BY p.submission_date DESC
                    LIMIT %s
                """, preferred_categories + [preferred_categories, limit])
            else:
                # Fallback to recent papers
                cursor = self.db.execute("""
                    SELECT paper_id FROM papers
                    WHERE submission_date >= CURRENT_DATE - INTERVAL '30 days'
                    ORDER BY submission_date DESC
                    LIMIT %s
                """, (limit,))
            
            return [row[0] for row in cursor.fetchall()]
            
        except Exception as e:
            self.logger.error(f"Error getting candidate papers: {e}")
            return []
    
    def _get_paper_classification(self, paper_id: str) -> Optional[Dict[str, Any]]:
        """Get paper classification from database."""
        if not self.db:
            return None
        
        try:
            cursor = self.db.execute("""
                SELECT metadata_value FROM paper_metadata
                WHERE paper_id = %s AND metadata_key = 'ai_classification'
            """, (paper_id,))
            
            result = cursor.fetchone()
            if result:
                return json.loads(result[0])
            
            return None
            
        except Exception as e:
            self.logger.error(f"Error getting paper classification: {e}")
            return None
    
    def _get_paper_metadata(self, paper_id: str) -> Optional[Dict[str, Any]]:
        """Get paper metadata from database."""
        if not self.db:
            return None
        
        try:
            cursor = self.db.execute("""
                SELECT p.paper_id, p.arxiv_id, p.title, p.abstract, 
                       p.primary_category, p.secondary_categories, p.submission_date,
                       p.view_count, p.download_count, p.citation_count,
                       array_agg(a.full_name) as authors
                FROM papers p
                LEFT JOIN paper_authors pa ON p.paper_id = pa.paper_id
                LEFT JOIN authors a ON pa.author_id = a.author_id
                WHERE p.paper_id = %s
                GROUP BY p.paper_id, p.arxiv_id, p.title, p.abstract, 
                         p.primary_category, p.secondary_categories, p.submission_date,
                         p.view_count, p.download_count, p.citation_count
            """, (paper_id,))
            
            result = cursor.fetchone()
            if result:
                return {
                    'paper_id': result[0],
                    'arxiv_id': result[1],
                    'title': result[2],
                    'abstract': result[3],
                    'primary_category': result[4],
                    'secondary_categories': result[5] or [],
                    'submission_date': result[6],
                    'view_count': result[7] or 0,
                    'download_count': result[8] or 0,
                    'citation_count': result[9] or 0,
                    'authors': [name for name in result[10] if name] if result[10] else []
                }
            
            return None
            
        except Exception as e:
            self.logger.error(f"Error getting paper metadata: {e}")
            return None
    
    def _get_user_history(self, user_id: str, days: int) -> List[Dict[str, Any]]:
        """Get user's recent interaction history."""
        if not self.db:
            return []
        
        try:
            cutoff_date = datetime.utcnow() - timedelta(days=days)
            
            cursor = self.db.execute("""
                SELECT upi.paper_id, upi.interaction_type, upi.interaction_date,
                       pm.metadata_value as ai_classification
                FROM user_paper_interactions upi
                LEFT JOIN paper_metadata pm ON upi.paper_id = pm.paper_id 
                    AND pm.metadata_key = 'ai_classification'
                WHERE upi.user_id = %s AND upi.interaction_date >= %s
                ORDER BY upi.interaction_date DESC
                LIMIT 50
            """, (user_id, cutoff_date))
            
            history = []
            for row in cursor.fetchall():
                history.append({
                    'paper_id': row[0],
                    'interaction_type': row[1],
                    'interaction_date': row[2],
                    'ai_classification': json.loads(row[3]) if row[3] else None
                })
            
            return history
            
        except Exception as e:
            self.logger.error(f"Error getting user history: {e}")
            return []
    
    def _apply_diversity_filtering(self, scored_papers: List[Dict[str, Any]], 
                                 user_history: List[Dict[str, Any]], 
                                 target_count: int) -> List[Dict[str, Any]]:
        """Apply diversity filtering to avoid echo chambers."""
        
        if not user_history:
            return scored_papers[:target_count]
        
        # Extract topics from user history
        historical_topics = set()
        for interaction in user_history:
            if interaction.get('ai_classification'):
                for topic in interaction['ai_classification'].get('semantic_topics', []):
                    historical_topics.add(topic.get('topic', ''))
        
        # Select diverse papers
        diverse_papers = []
        selected_topics = set()
        
        for paper in scored_papers:
            if len(diverse_papers) >= target_count:
                break
            
            # Check paper topics
            paper_topics = set()
            for topic in paper['classification'].get('semantic_topics', []):
                paper_topics.add(topic.get('topic', ''))
            
            # Calculate diversity score
            topic_overlap = len(paper_topics & historical_topics)
            new_topics = len(paper_topics - selected_topics)
            
            diversity_bonus = new_topics * self.diversity_factor
            adjusted_score = paper['score'] + diversity_bonus
            
            # Penalize too much overlap with history
            if topic_overlap > len(paper_topics) * 0.8:
                adjusted_score *= 0.8
            
            diverse_papers.append({
                **paper,
                'adjusted_score': adjusted_score
            })
            
            selected_topics.update(paper_topics)
        
        # Sort by adjusted score
        diverse_papers.sort(key=lambda x: x.get('adjusted_score', x['score']), reverse=True)
        
        return diverse_papers
    
    def _get_trending_papers(self, days: int, limit: int) -> List[Dict[str, Any]]:
        """Get trending papers from database."""
        if not self.db:
            return []
        
        try:
            cutoff_date = datetime.utcnow() - timedelta(days=days)
            
            cursor = self.db.execute("""
                SELECT p.paper_id, p.arxiv_id, p.title, p.abstract, 
                       p.primary_category, p.submission_date,
                       pa.trending_score, pa.view_count, pa.download_count,
                       array_agg(a.full_name) as authors
                FROM papers p
                LEFT JOIN paper_analytics pa ON p.paper_id = pa.paper_id
                LEFT JOIN paper_authors pauth ON p.paper_id = pauth.paper_id
                LEFT JOIN authors a ON pauth.author_id = a.author_id
                WHERE p.submission_date >= %s
                GROUP BY p.paper_id, p.arxiv_id, p.title, p.abstract, 
                         p.primary_category, p.submission_date,
                         pa.trending_score, pa.view_count, pa.download_count
                ORDER BY pa.trending_score DESC NULLS LAST
                LIMIT %s
            """, (cutoff_date, limit))
            
            papers = []
            for row in cursor.fetchall():
                papers.append({
                    'paper_id': row[0],
                    'arxiv_id': row[1],
                    'title': row[2],
                    'abstract': row[3],
                    'primary_category': row[4],
                    'submission_date': row[5],
                    'trending_score': float(row[6]) if row[6] else 0.0,
                    'view_count': row[7] or 0,
                    'download_count': row[8] or 0,
                    'authors': [name for name in row[9] if name] if row[9] else []
                })
            
            return papers
            
        except Exception as e:
            self.logger.error(f"Error getting trending papers: {e}")
            return []
    
    def _get_recent_papers(self, days: int, limit: int) -> List[Dict[str, Any]]:
        """Get recent papers for similarity comparison."""
        if not self.db:
            return []
        
        try:
            cutoff_date = datetime.utcnow() - timedelta(days=days)
            
            cursor = self.db.execute("""
                SELECT paper_id, arxiv_id, title, abstract, primary_category, submission_date
                FROM papers
                WHERE submission_date >= %s
                ORDER BY submission_date DESC
                LIMIT %s
            """, (cutoff_date, limit))
            
            papers = []
            for row in cursor.fetchall():
                papers.append({
                    'paper_id': row[0],
                    'arxiv_id': row[1],
                    'title': row[2],
                    'abstract': row[3],
                    'primary_category': row[4],
                    'submission_date': row[5]
                })
            
            return papers
            
        except Exception as e:
            self.logger.error(f"Error getting recent papers: {e}")
            return []
    
    def _get_topic_similar_papers(self, reference_classification: Dict[str, Any], 
                                count: int) -> List[Dict[str, Any]]:
        """Fallback topic-based similarity when embeddings are not available."""
        
        reference_topics = [topic.get('topic', '') for topic in 
                          reference_classification.get('semantic_topics', [])]
        
        if not reference_topics or not self.db:
            return []
        
        try:
            # Find papers with similar topics
            cursor = self.db.execute("""
                SELECT DISTINCT p.paper_id, p.title, p.arxiv_id, 
                       p.primary_category, p.submission_date, 
                       pc.classification_value, pc.confidence_score
                FROM papers p
                JOIN paper_classifications pc ON p.paper_id = pc.paper_id
                WHERE pc.classification_type = 'semantic_topic'
                    AND pc.classification_value = ANY(%s)
                    AND p.submission_date >= CURRENT_DATE - INTERVAL '90 days'
                ORDER BY pc.confidence_score DESC
                LIMIT %s
            """, (reference_topics, count * 2))
            
            recommendations = []
            for row in cursor.fetchall():
                recommendations.append({
                    'paper_id': row[0],
                    'similarity_score': float(row[6]),  # Using confidence as similarity
                    'paper_title': row[1],
                    'arxiv_id': row[2],
                    'primary_category': row[3],
                    'submission_date': row[4],
                    'recommendation_reason': f'Similar topics: {row[5]}',
                    'generated_at': datetime.utcnow().isoformat()
                })
            
            return recommendations[:count]
            
        except Exception as e:
            self.logger.error(f"Error getting topic similar papers: {e}")
            return []
    
    def get_recommendation_stats(self) -> Dict[str, Any]:
        """Get recommendation engine statistics."""
        stats = self.recommendation_stats.copy()
        
        # Calculate derived metrics
        if stats['recommendations_generated'] > 0:
            stats['average_recommendation_time'] = (
                stats['total_recommendation_time'] / stats['recommendations_generated']
            )
            stats['cache_hit_rate'] = (
                stats['cache_hits'] / (stats['cache_hits'] + stats['cache_misses'])
            )
        else:
            stats['average_recommendation_time'] = 0.0
            stats['cache_hit_rate'] = 0.0
        
        return stats