"""
User Preference Learning System

Learns user preferences from interaction history to enable personalized
paper recommendations. Analyzes reading patterns, bookmark behavior,
and explicit feedback to build comprehensive user preference profiles.
"""

import logging
import json
import numpy as np
from typing import Dict, List, Any, Optional, Tuple
from datetime import datetime, timedelta
from collections import defaultdict, Counter
import math


class UserPreferenceLearner:
    """
    User preference learning system that builds comprehensive user profiles
    from interaction history for personalized recommendations.
    
    Features:
    - Multi-faceted preference modeling (topics, authors, categories, complexity)
    - Time-weighted learning (recent interactions matter more)
    - Implicit feedback analysis (views, downloads, time spent)
    - Explicit feedback integration (ratings, bookmarks)
    - Preference decay over time
    - Cold-start handling for new users
    """
    
    def __init__(self, config: Dict[str, Any], database_manager=None):
        """Initialize user preference learner.
        
        Args:
            config: Configuration dictionary
            database_manager: Database connection manager
        """
        self.config = config
        self.db = database_manager
        self.logger = logging.getLogger(__name__)
        
        # Learning parameters
        self.time_decay_factor = config.get('time_decay_factor', 0.95)  # How much to decay older interactions
        self.min_interactions = config.get('min_interactions', 5)  # Minimum interactions before learning
        self.interaction_weights = config.get('interaction_weights', {
            'view': 1.0,
            'download': 3.0,
            'bookmark': 5.0,
            'rate': 4.0,
            'comment': 3.0,
            'share': 2.0
        })
        
        # Preference categories
        self.preference_categories = [
            'topics', 'authors', 'categories', 'methodologies', 
            'application_domains', 'technical_level', 'novelty'
        ]
        
        # Cache for user preferences
        self.preference_cache = {}
        self.cache_expiry = config.get('cache_expiry_hours', 24)
        
        self.logger.info("User Preference Learner initialized")
    
    def learn_user_preferences(self, user_id: str, 
                             refresh_cache: bool = False) -> Dict[str, Any]:
        """
        Learn comprehensive user preferences from interaction history.
        
        Args:
            user_id: User identifier
            refresh_cache: Force refresh of cached preferences
            
        Returns:
            User preference profile
        """
        try:
            # Check cache first
            if not refresh_cache and user_id in self.preference_cache:
                cached_prefs, cache_time = self.preference_cache[user_id]
                if (datetime.utcnow() - cache_time).total_seconds() < self.cache_expiry * 3600:
                    return cached_prefs
            
            self.logger.info(f"Learning preferences for user {user_id}")
            
            # Get user interaction history
            interactions = self._get_user_interactions(user_id)
            
            if len(interactions) < self.min_interactions:
                # Handle cold-start scenario
                preferences = self._get_coldstart_preferences(user_id)
            else:
                # Learn from interaction history
                preferences = self._analyze_user_interactions(interactions)
            
            # Enhance with explicit preferences
            explicit_prefs = self._get_explicit_preferences(user_id)
            preferences = self._merge_preferences(preferences, explicit_prefs)
            
            # Add metadata
            preferences.update({
                'user_id': user_id,
                'learning_timestamp': datetime.utcnow().isoformat(),
                'total_interactions': len(interactions),
                'learning_method': 'coldstart' if len(interactions) < self.min_interactions else 'interaction_based',
                'confidence_score': self._compute_learning_confidence(interactions),
                'last_interaction': interactions[-1]['interaction_date'].isoformat() if interactions else None
            })
            
            # Cache preferences
            self.preference_cache[user_id] = (preferences, datetime.utcnow())
            
            # Store in database
            self._store_user_preferences(user_id, preferences)
            
            self.logger.info(f"Learned preferences for user {user_id} from {len(interactions)} interactions")
            return preferences
            
        except Exception as e:
            self.logger.error(f"Error learning preferences for user {user_id}: {e}")
            return self._get_default_preferences(user_id)
    
    def _get_user_interactions(self, user_id: str, 
                             days_back: int = 180) -> List[Dict[str, Any]]:
        """Get user interaction history from database."""
        if not self.db:
            return []
        
        try:
            cutoff_date = datetime.utcnow() - timedelta(days=days_back)
            
            cursor = self.db.execute("""
                SELECT 
                    upi.interaction_type,
                    upi.interaction_date,
                    upi.interaction_data,
                    p.paper_id,
                    p.arxiv_id,
                    p.title,
                    p.primary_category,
                    p.secondary_categories,
                    pm.metadata_value as ai_classification,
                    ur.rating,
                    ub.notes as bookmark_notes
                FROM user_paper_interactions upi
                JOIN papers p ON upi.paper_id = p.paper_id
                LEFT JOIN paper_metadata pm ON p.paper_id = pm.paper_id 
                    AND pm.metadata_key = 'ai_classification'
                LEFT JOIN user_ratings ur ON upi.user_id = ur.user_id 
                    AND upi.paper_id = ur.paper_id
                LEFT JOIN user_bookmarks ub ON upi.user_id = ub.user_id 
                    AND upi.paper_id = ub.paper_id
                WHERE upi.user_id = %s 
                    AND upi.interaction_date >= %s
                ORDER BY upi.interaction_date DESC
            """, (user_id, cutoff_date))
            
            interactions = []
            for row in cursor.fetchall():
                interaction = {
                    'interaction_type': row[0],
                    'interaction_date': row[1],
                    'interaction_data': json.loads(row[2]) if row[2] else {},
                    'paper_id': row[3],
                    'arxiv_id': row[4],
                    'title': row[5],
                    'primary_category': row[6],
                    'secondary_categories': row[7] or [],
                    'ai_classification': json.loads(row[8]) if row[8] else None,
                    'rating': row[9],
                    'bookmark_notes': row[10]
                }
                interactions.append(interaction)
            
            return interactions
            
        except Exception as e:
            self.logger.error(f"Error getting user interactions: {e}")
            return []
    
    def _analyze_user_interactions(self, interactions: List[Dict[str, Any]]) -> Dict[str, Any]:
        """Analyze user interactions to extract preferences."""
        
        preferences = {
            'topics': {},
            'authors': {},
            'categories': {},
            'methodologies': {},
            'application_domains': {},
            'technical_level': {},
            'novelty': {},
            'arxiv_categories': {},
            'interaction_patterns': {},
            'temporal_preferences': {}
        }
        
        # Time weights for interactions (more recent = higher weight)
        now = datetime.utcnow()
        
        for interaction in interactions:
            # Calculate time weight
            days_ago = (now - interaction['interaction_date']).days
            time_weight = self.time_decay_factor ** (days_ago / 30)  # Decay every 30 days
            
            # Get interaction weight
            interaction_weight = self.interaction_weights.get(
                interaction['interaction_type'], 1.0
            )
            
            # Combined weight
            total_weight = time_weight * interaction_weight
            
            # Rating boost (if user rated the paper highly)
            rating_boost = 1.0
            if interaction['rating']:
                rating_boost = max(0.5, interaction['rating'] / 5.0)  # Scale 1-5 to 0.2-1.0
            
            final_weight = total_weight * rating_boost
            
            # Extract preferences from paper
            self._extract_paper_preferences(interaction, final_weight, preferences)
        
        # Normalize and rank preferences
        return self._normalize_preferences(preferences)
    
    def _extract_paper_preferences(self, interaction: Dict[str, Any], 
                                 weight: float, preferences: Dict[str, Any]):
        """Extract preferences from a single paper interaction."""
        
        # ArXiv categories
        primary_cat = interaction['primary_category']
        if primary_cat:
            preferences['arxiv_categories'][primary_cat] = (
                preferences['arxiv_categories'].get(primary_cat, 0) + weight
            )
        
        for secondary_cat in interaction['secondary_categories']:
            preferences['arxiv_categories'][secondary_cat] = (
                preferences['arxiv_categories'].get(secondary_cat, 0) + weight * 0.5
            )
        
        # AI classification-based preferences
        ai_classification = interaction['ai_classification']
        if ai_classification:
            # Semantic topics
            for topic in ai_classification.get('semantic_topics', []):
                topic_name = topic.get('topic')
                topic_confidence = topic.get('confidence', 0.5)
                if topic_name:
                    topic_weight = weight * topic_confidence
                    preferences['topics'][topic_name] = (
                        preferences['topics'].get(topic_name, 0) + topic_weight
                    )
            
            # Standard categories
            for category in ai_classification.get('standard_categories', []):
                cat_name = category.get('category')
                cat_confidence = category.get('confidence', 0.5)
                if cat_name:
                    cat_weight = weight * cat_confidence
                    preferences['categories'][cat_name] = (
                        preferences['categories'].get(cat_name, 0) + cat_weight
                    )
            
            # Technical characteristics
            confidence_scores = ai_classification.get('confidence_scores', {})
            overall_confidence = confidence_scores.get('overall_confidence', 0.5)
            
            # Technical level (if available in classification)
            tech_level = ai_classification.get('technical_level')
            if tech_level:
                preferences['technical_level'][tech_level] = (
                    preferences['technical_level'].get(tech_level, 0) + weight * overall_confidence
                )
        
        # Interaction patterns
        interaction_type = interaction['interaction_type']
        preferences['interaction_patterns'][interaction_type] = (
            preferences['interaction_patterns'].get(interaction_type, 0) + 1
        )
        
        # Temporal patterns (hour of day, day of week)
        interaction_time = interaction['interaction_date']
        hour = interaction_time.hour
        day_of_week = interaction_time.strftime('%A')
        
        if 'hours' not in preferences['temporal_preferences']:
            preferences['temporal_preferences']['hours'] = {}
        if 'days' not in preferences['temporal_preferences']:
            preferences['temporal_preferences']['days'] = {}
        
        preferences['temporal_preferences']['hours'][hour] = (
            preferences['temporal_preferences']['hours'].get(hour, 0) + weight
        )
        preferences['temporal_preferences']['days'][day_of_week] = (
            preferences['temporal_preferences']['days'].get(day_of_week, 0) + weight
        )
    
    def _normalize_preferences(self, preferences: Dict[str, Any]) -> Dict[str, Any]:
        """Normalize and rank preferences."""
        
        normalized = {}
        
        for category, prefs in preferences.items():
            if isinstance(prefs, dict) and prefs:
                if category == 'temporal_preferences':
                    # Handle nested temporal preferences
                    normalized[category] = {}
                    for temp_type, temp_prefs in prefs.items():
                        if temp_prefs:
                            total_weight = sum(temp_prefs.values())
                            normalized[category][temp_type] = {
                                item: score / total_weight
                                for item, score in sorted(
                                    temp_prefs.items(), 
                                    key=lambda x: x[1], 
                                    reverse=True
                                )
                            }
                else:
                    # Normalize regular preferences
                    total_weight = sum(prefs.values())
                    if total_weight > 0:
                        # Convert to sorted list of (item, normalized_score) tuples
                        normalized_scores = [
                            (item, score / total_weight)
                            for item, score in sorted(
                                prefs.items(), 
                                key=lambda x: x[1], 
                                reverse=True
                            )
                        ]
                        normalized[category] = normalized_scores
                    else:
                        normalized[category] = []
            else:
                normalized[category] = prefs
        
        return normalized
    
    def _get_explicit_preferences(self, user_id: str) -> Dict[str, Any]:
        """Get explicitly stated user preferences."""
        if not self.db:
            return {}
        
        try:
            cursor = self.db.execute("""
                SELECT preference_key, preference_value
                FROM user_preferences
                WHERE user_id = %s AND preference_key LIKE 'ai_preference_%'
            """, (user_id,))
            
            explicit_prefs = {}
            for row in cursor.fetchall():
                pref_key = row[0].replace('ai_preference_', '')
                pref_value = json.loads(row[1])
                explicit_prefs[pref_key] = pref_value
            
            return explicit_prefs
            
        except Exception as e:
            self.logger.error(f"Error getting explicit preferences: {e}")
            return {}
    
    def _merge_preferences(self, learned_prefs: Dict[str, Any], 
                          explicit_prefs: Dict[str, Any]) -> Dict[str, Any]:
        """Merge learned and explicit preferences."""
        
        merged = learned_prefs.copy()
        
        # Weight explicit preferences higher
        explicit_weight = 1.5
        
        for category, explicit_items in explicit_prefs.items():
            if category in merged and isinstance(merged[category], list):
                # Convert explicit preferences to same format
                if isinstance(explicit_items, list):
                    for item in explicit_items:
                        # Find if item already exists in learned preferences
                        found = False
                        for i, (learned_item, learned_score) in enumerate(merged[category]):
                            if learned_item == item:
                                # Boost existing preference
                                merged[category][i] = (learned_item, learned_score * explicit_weight)
                                found = True
                                break
                        
                        if not found:
                            # Add new explicit preference
                            merged[category].append((item, 0.8 * explicit_weight))
                
                # Re-sort after merging
                merged[category] = sorted(merged[category], key=lambda x: x[1], reverse=True)
        
        return merged
    
    def _get_coldstart_preferences(self, user_id: str) -> Dict[str, Any]:
        """Handle cold-start scenario for new users."""
        
        # Get global popularity trends
        popular_preferences = self._get_popular_preferences()
        
        # Get any explicit preferences
        explicit_prefs = self._get_explicit_preferences(user_id)
        
        # Combine with low confidence scores
        coldstart_prefs = {}
        
        for category in self.preference_categories:
            if category in explicit_prefs:
                # Use explicit preferences with high confidence
                coldstart_prefs[category] = [(item, 0.8) for item in explicit_prefs[category]]
            elif category in popular_preferences:
                # Use popular items with low confidence
                popular_items = popular_preferences[category][:5]  # Top 5
                coldstart_prefs[category] = [(item, 0.3) for item in popular_items]
            else:
                coldstart_prefs[category] = []
        
        return coldstart_prefs
    
    def _get_popular_preferences(self) -> Dict[str, List[str]]:
        """Get globally popular preferences for cold-start."""
        if not self.db:
            return {}
        
        try:
            # Get most popular topics/categories across all users
            cursor = self.db.execute("""
                SELECT 
                    pc.classification_value,
                    pc.classification_type,
                    COUNT(DISTINCT upi.user_id) as user_count,
                    AVG(pc.confidence_score) as avg_confidence
                FROM paper_classifications pc
                JOIN user_paper_interactions upi ON pc.paper_id = upi.paper_id
                WHERE pc.created_by = 'ai_classifier'
                    AND upi.interaction_date >= CURRENT_DATE - INTERVAL '30 days'
                GROUP BY pc.classification_value, pc.classification_type
                HAVING COUNT(DISTINCT upi.user_id) >= 10
                ORDER BY user_count DESC, avg_confidence DESC
            """)
            
            popular = defaultdict(list)
            for row in cursor.fetchall():
                classification_value, classification_type, user_count, avg_confidence = row
                
                if classification_type == 'semantic_topic':
                    popular['topics'].append(classification_value)
                elif classification_type == 'ai_category':
                    popular['categories'].append(classification_value)
            
            return dict(popular)
            
        except Exception as e:
            self.logger.error(f"Error getting popular preferences: {e}")
            return {}
    
    def _compute_learning_confidence(self, interactions: List[Dict[str, Any]]) -> float:
        """Compute confidence in learned preferences."""
        if not interactions:
            return 0.1
        
        # Base confidence on number of interactions
        base_confidence = min(0.9, len(interactions) / 50)  # Max confidence at 50 interactions
        
        # Boost for diversity of interaction types
        interaction_types = set(i['interaction_type'] for i in interactions)
        diversity_boost = len(interaction_types) / len(self.interaction_weights)
        
        # Boost for explicit ratings/bookmarks
        explicit_count = sum(1 for i in interactions if i['rating'] or i['bookmark_notes'])
        explicit_boost = min(0.2, explicit_count / len(interactions))
        
        total_confidence = base_confidence + diversity_boost * 0.1 + explicit_boost
        return min(0.95, total_confidence)
    
    def _store_user_preferences(self, user_id: str, preferences: Dict[str, Any]):
        """Store learned preferences in database."""
        if not self.db:
            return
        
        try:
            # Store as JSON in user_preferences table
            self.db.execute("""
                INSERT INTO user_preferences (user_id, preference_key, preference_value)
                VALUES (%s, %s, %s)
                ON CONFLICT (user_id, preference_key)
                DO UPDATE SET preference_value = EXCLUDED.preference_value, updated_at = CURRENT_TIMESTAMP
            """, (user_id, 'ai_learned_preferences', json.dumps(preferences)))
            
            self.db.commit()
            
        except Exception as e:
            self.logger.error(f"Error storing user preferences: {e}")
            if self.db:
                self.db.rollback()
    
    def _get_default_preferences(self, user_id: str) -> Dict[str, Any]:
        """Get default preferences for error scenarios."""
        return {
            'user_id': user_id,
            'topics': [],
            'authors': [],
            'categories': [],
            'methodologies': [],
            'application_domains': [],
            'technical_level': [],
            'novelty': [],
            'arxiv_categories': [],
            'interaction_patterns': {},
            'temporal_preferences': {},
            'learning_timestamp': datetime.utcnow().isoformat(),
            'total_interactions': 0,
            'learning_method': 'default',
            'confidence_score': 0.1,
            'last_interaction': None
        }
    
    def get_user_preferences(self, user_id: str) -> Dict[str, Any]:
        """Get cached or stored user preferences.
        
        Args:
            user_id: User identifier
            
        Returns:
            User preference profile
        """
        # Check cache first
        if user_id in self.preference_cache:
            cached_prefs, cache_time = self.preference_cache[user_id]
            if (datetime.utcnow() - cache_time).total_seconds() < self.cache_expiry * 3600:
                return cached_prefs
        
        # Try to load from database
        if self.db:
            try:
                cursor = self.db.execute("""
                    SELECT preference_value FROM user_preferences
                    WHERE user_id = %s AND preference_key = 'ai_learned_preferences'
                """, (user_id,))
                
                result = cursor.fetchone()
                if result:
                    preferences = json.loads(result[0])
                    # Update cache
                    self.preference_cache[user_id] = (preferences, datetime.utcnow())
                    return preferences
                    
            except Exception as e:
                self.logger.error(f"Error loading user preferences: {e}")
        
        # Fall back to learning preferences
        return self.learn_user_preferences(user_id)
    
    def update_preferences_from_feedback(self, user_id: str, paper_id: str, 
                                       feedback_type: str, feedback_value: Any):
        """Update user preferences based on explicit feedback.
        
        Args:
            user_id: User identifier
            paper_id: Paper identifier
            feedback_type: Type of feedback ('like', 'dislike', 'rating', 'bookmark')
            feedback_value: Value of the feedback
        """
        try:
            # Get current preferences
            preferences = self.get_user_preferences(user_id)
            
            # Get paper classification
            paper_classification = self._get_paper_classification(paper_id)
            
            if paper_classification:
                # Update preferences based on feedback
                weight = self._get_feedback_weight(feedback_type, feedback_value)
                
                # Update topic preferences
                for topic in paper_classification.get('semantic_topics', []):
                    topic_name = topic.get('topic')
                    if topic_name:
                        self._update_preference_item(preferences, 'topics', topic_name, weight)
                
                # Update category preferences
                for category in paper_classification.get('standard_categories', []):
                    cat_name = category.get('category')
                    if cat_name:
                        self._update_preference_item(preferences, 'categories', cat_name, weight)
                
                # Store updated preferences
                self._store_user_preferences(user_id, preferences)
                
                # Update cache
                self.preference_cache[user_id] = (preferences, datetime.utcnow())
                
        except Exception as e:
            self.logger.error(f"Error updating preferences from feedback: {e}")
    
    def _get_paper_classification(self, paper_id: str) -> Optional[Dict[str, Any]]:
        """Get paper classification from database."""
        if not self.db:
            return None
        
        try:
            cursor = self.db.execute("""
                SELECT metadata_value FROM paper_metadata
                WHERE paper_id = %s AND metadata_key = 'ai_classification'
            """, (paper_id,))
            
            result = cursor.fetchone()
            if result:
                return json.loads(result[0])
            
            return None
            
        except Exception as e:
            self.logger.error(f"Error getting paper classification: {e}")
            return None
    
    def _get_feedback_weight(self, feedback_type: str, feedback_value: Any) -> float:
        """Calculate weight from feedback type and value."""
        feedback_weights = {
            'like': 2.0,
            'dislike': -1.0,
            'bookmark': 3.0,
            'unbookmark': -1.5,
            'share': 1.5
        }
        
        if feedback_type == 'rating':
            # Scale rating 1-5 to -2.0 to +2.0
            return (feedback_value - 3) * (2.0 / 2)
        
        return feedback_weights.get(feedback_type, 0.5)
    
    def _update_preference_item(self, preferences: Dict[str, Any], 
                              category: str, item: str, weight: float):
        """Update a single preference item."""
        if category not in preferences:
            preferences[category] = []
        
        # Find existing item
        found = False
        for i, (pref_item, pref_score) in enumerate(preferences[category]):
            if pref_item == item:
                # Update score
                new_score = max(0.1, min(1.0, pref_score + weight * 0.1))
                preferences[category][i] = (pref_item, new_score)
                found = True
                break
        
        if not found and weight > 0:
            # Add new preference
            preferences[category].append((item, min(0.8, weight * 0.1 + 0.5)))
        
        # Re-sort
        preferences[category] = sorted(preferences[category], key=lambda x: x[1], reverse=True)
        
        # Keep top 20 items
        preferences[category] = preferences[category][:20]