"""
Feedback Learning System

System for learning from user feedback to improve recommendation accuracy
over time. Processes both explicit and implicit feedback signals.
"""

import logging
import json
import numpy as np
from typing import Dict, List, Any, Optional, Tuple
from datetime import datetime, timedelta
from collections import defaultdict, deque
import math


class FeedbackLearningSystem:
    """
    System that learns from user feedback to improve recommendation accuracy.
    
    Features:
    - Explicit feedback processing (likes, ratings, bookmarks)
    - Implicit feedback analysis (click-through, dwell time)
    - Adaptive learning rates based on user behavior
    - Feedback quality assessment
    - Real-time preference updates
    - A/B testing support for recommendation algorithms
    """
    
    def __init__(self, config: Dict[str, Any], database_manager=None):
        """Initialize feedback learning system.
        
        Args:
            config: Configuration dictionary
            database_manager: Database connection manager
        """
        self.config = config
        self.db = database_manager
        self.logger = logging.getLogger(__name__)
        
        # Learning parameters
        self.learning_rate = config.get('learning_rate', 0.01)
        self.feedback_decay_days = config.get('feedback_decay_days', 90)
        self.min_feedback_count = config.get('min_feedback_count', 5)
        self.confidence_threshold = config.get('confidence_threshold', 0.3)
        
        # Feedback type weights
        self.feedback_weights = config.get('feedback_weights', {
            'explicit_like': 1.0,
            'explicit_dislike': -1.0,
            'bookmark': 1.5,
            'unbookmark': -0.8,
            'rating_1': -2.0,
            'rating_2': -1.0,
            'rating_3': 0.0,
            'rating_4': 1.0,
            'rating_5': 2.0,
            'click_through': 0.3,
            'dwell_time_short': -0.2,
            'dwell_time_medium': 0.2,
            'dwell_time_long': 0.5,
            'download': 1.2,
            'share': 0.8,
            'comment': 1.0
        })
        
        # Implicit feedback thresholds (seconds)
        self.dwell_time_thresholds = {
            'short': config.get('dwell_time_short', 30),
            'medium': config.get('dwell_time_medium', 120),
            'long': config.get('dwell_time_long', 300)
        }
        
        # Feedback history cache
        self.feedback_cache = {}
        self.cache_size = config.get('cache_size', 1000)
        
        # Learning statistics
        self.learning_stats = {
            'feedback_processed': 0,
            'preferences_updated': 0,
            'accuracy_improvements': 0.0,
            'total_learning_time': 0.0
        }
        
        self.logger.info("Feedback Learning System initialized")
    
    def process_feedback(self, user_id: str, paper_id: str, 
                        feedback_type: str, feedback_data: Dict[str, Any]) -> Dict[str, Any]:
        """
        Process user feedback and update preferences accordingly.
        
        Args:
            user_id: User identifier
            paper_id: Paper identifier
            feedback_type: Type of feedback (like, rating, click, etc.)
            feedback_data: Additional feedback data
            
        Returns:
            Processing results with updated preferences
        """
        try:
            start_time = datetime.utcnow()
            
            self.logger.info(f"Processing feedback: {user_id} -> {paper_id} ({feedback_type})")
            
            # Validate feedback
            if not self._validate_feedback(feedback_type, feedback_data):
                return {'success': False, 'error': 'Invalid feedback data'}
            
            # Get paper classification
            paper_classification = self._get_paper_classification(paper_id)
            if not paper_classification:
                self.logger.warning(f"No classification found for paper {paper_id}")
                return {'success': False, 'error': 'Paper classification not found'}
            
            # Process feedback based on type
            feedback_score = self._compute_feedback_score(feedback_type, feedback_data)
            
            # Update user preferences
            preference_updates = self._update_user_preferences(
                user_id, paper_classification, feedback_score, feedback_type
            )
            
            # Store feedback record
            feedback_record = self._create_feedback_record(
                user_id, paper_id, feedback_type, feedback_data, feedback_score
            )
            self._store_feedback_record(feedback_record)
            
            # Update learning statistics
            self.learning_stats['feedback_processed'] += 1
            if preference_updates.get('preferences_changed', False):
                self.learning_stats['preferences_updated'] += 1
            
            processing_time = (datetime.utcnow() - start_time).total_seconds()
            self.learning_stats['total_learning_time'] += processing_time
            
            return {
                'success': True,
                'feedback_score': feedback_score,
                'preference_updates': preference_updates,
                'processing_time': processing_time
            }
            
        except Exception as e:
            self.logger.error(f"Error processing feedback: {e}")
            return {'success': False, 'error': str(e)}
    
    def process_implicit_feedback(self, user_id: str, paper_id: str, 
                                interaction_data: Dict[str, Any]) -> Dict[str, Any]:
        """
        Process implicit feedback from user interactions.
        
        Args:
            user_id: User identifier
            paper_id: Paper identifier  
            interaction_data: Interaction data (dwell time, clicks, etc.)
            
        Returns:
            Processing results
        """
        try:
            # Extract implicit feedback signals
            feedback_signals = []
            
            # Click-through feedback
            if interaction_data.get('clicked'):
                feedback_signals.append(('click_through', {}))
            
            # Dwell time feedback
            dwell_time = interaction_data.get('dwell_time_seconds', 0)
            if dwell_time > 0:
                if dwell_time < self.dwell_time_thresholds['short']:
                    feedback_signals.append(('dwell_time_short', {'dwell_time': dwell_time}))
                elif dwell_time < self.dwell_time_thresholds['medium']:
                    feedback_signals.append(('dwell_time_medium', {'dwell_time': dwell_time}))
                elif dwell_time >= self.dwell_time_thresholds['long']:
                    feedback_signals.append(('dwell_time_long', {'dwell_time': dwell_time}))
            
            # Download feedback
            if interaction_data.get('downloaded'):
                feedback_signals.append(('download', {}))
            
            # Share feedback
            if interaction_data.get('shared'):
                feedback_signals.append(('share', {}))
            
            # Process all feedback signals
            results = []
            for feedback_type, feedback_data in feedback_signals:
                result = self.process_feedback(user_id, paper_id, feedback_type, feedback_data)
                results.append(result)
            
            return {
                'success': True,
                'signals_processed': len(feedback_signals),
                'results': results
            }
            
        except Exception as e:
            self.logger.error(f"Error processing implicit feedback: {e}")
            return {'success': False, 'error': str(e)}
    
    def batch_process_feedback(self, feedback_batch: List[Dict[str, Any]]) -> Dict[str, Any]:
        """
        Process multiple feedback items in batch for efficiency.
        
        Args:
            feedback_batch: List of feedback dictionaries
            
        Returns:
            Batch processing results
        """
        try:
            start_time = datetime.utcnow()
            
            successful_count = 0
            failed_count = 0
            errors = []
            
            for feedback_item in feedback_batch:
                try:
                    result = self.process_feedback(
                        feedback_item['user_id'],
                        feedback_item['paper_id'],
                        feedback_item['feedback_type'],
                        feedback_item.get('feedback_data', {})
                    )
                    
                    if result.get('success'):
                        successful_count += 1
                    else:
                        failed_count += 1
                        errors.append(result.get('error'))
                        
                except Exception as e:
                    failed_count += 1
                    errors.append(str(e))
            
            processing_time = (datetime.utcnow() - start_time).total_seconds()
            
            return {
                'success': True,
                'total_feedback': len(feedback_batch),
                'successful': successful_count,
                'failed': failed_count,
                'errors': errors,
                'processing_time': processing_time
            }
            
        except Exception as e:
            self.logger.error(f"Error in batch feedback processing: {e}")
            return {'success': False, 'error': str(e)}
    
    def _validate_feedback(self, feedback_type: str, feedback_data: Dict[str, Any]) -> bool:
        """Validate feedback type and data."""
        
        # Check if feedback type is supported
        if feedback_type not in self.feedback_weights:
            self.logger.warning(f"Unsupported feedback type: {feedback_type}")
            return False
        
        # Validate rating feedback
        if feedback_type.startswith('rating_'):
            rating_value = feedback_data.get('rating')
            if rating_value is None or not (1 <= rating_value <= 5):
                return False
        
        # Validate dwell time feedback
        if 'dwell_time' in feedback_type:
            dwell_time = feedback_data.get('dwell_time')
            if dwell_time is None or dwell_time < 0:
                return False
        
        return True
    
    def _compute_feedback_score(self, feedback_type: str, 
                              feedback_data: Dict[str, Any]) -> float:
        """Compute normalized feedback score."""
        
        base_score = self.feedback_weights.get(feedback_type, 0.0)
        
        # Adjust score based on feedback data
        if feedback_type.startswith('rating_'):
            # Use actual rating value
            rating = feedback_data.get('rating', 3)
            # Convert 1-5 rating to -2 to +2 scale
            base_score = (rating - 3) * (2.0 / 2.0)
        
        elif 'dwell_time' in feedback_type:
            # Adjust based on actual dwell time
            dwell_time = feedback_data.get('dwell_time', 0)
            if feedback_type == 'dwell_time_long':
                # Bonus for very long dwell times
                if dwell_time > self.dwell_time_thresholds['long'] * 2:
                    base_score *= 1.5
            elif feedback_type == 'dwell_time_short':
                # More penalty for very short dwell times
                if dwell_time < self.dwell_time_thresholds['short'] / 2:
                    base_score *= 1.5
        
        # Apply time decay for older feedback
        feedback_timestamp = feedback_data.get('timestamp')
        if feedback_timestamp:
            if isinstance(feedback_timestamp, str):
                try:
                    feedback_timestamp = datetime.fromisoformat(feedback_timestamp.replace('Z', '+00:00'))
                except:
                    feedback_timestamp = datetime.utcnow()
            
            days_old = (datetime.utcnow() - feedback_timestamp.replace(tzinfo=None)).days
            decay_factor = math.exp(-days_old / self.feedback_decay_days)
            base_score *= decay_factor
        
        # Normalize score to [-1, 1] range
        return max(-1.0, min(1.0, base_score))
    
    def _update_user_preferences(self, user_id: str, paper_classification: Dict[str, Any], 
                               feedback_score: float, feedback_type: str) -> Dict[str, Any]:
        """Update user preferences based on feedback."""
        try:
            # Get current user preferences
            from ..learning.user_preference_learner import UserPreferenceLearner
            preference_learner = UserPreferenceLearner(self.config, self.db)
            current_prefs = preference_learner.get_user_preferences(user_id)
            
            # Determine learning rate based on user's feedback history
            user_learning_rate = self._compute_adaptive_learning_rate(user_id, feedback_type)
            
            preferences_changed = False
            
            # Update topic preferences
            for topic in paper_classification.get('semantic_topics', []):
                topic_name = topic.get('topic')
                topic_confidence = topic.get('confidence', 0.5)
                
                if topic_name:
                    # Weighted feedback score by topic confidence
                    weighted_score = feedback_score * topic_confidence * user_learning_rate
                    
                    # Update preference
                    if self._update_preference_item(current_prefs, 'topics', topic_name, weighted_score):
                        preferences_changed = True
            
            # Update category preferences
            for category in paper_classification.get('standard_categories', []):
                category_name = category.get('category')
                category_confidence = category.get('confidence', 0.5)
                
                if category_name:
                    weighted_score = feedback_score * category_confidence * user_learning_rate
                    
                    if self._update_preference_item(current_prefs, 'categories', category_name, weighted_score):
                        preferences_changed = True
            
            # Update ArXiv category preferences
            for arxiv_cat in paper_classification.get('arxiv_categories', []):
                weighted_score = feedback_score * user_learning_rate
                
                if self._update_preference_item(current_prefs, 'arxiv_categories', arxiv_cat, weighted_score):
                    preferences_changed = True
            
            # Store updated preferences
            if preferences_changed:
                preference_learner._store_user_preferences(user_id, current_prefs)
                
                # Clear cache to force reload
                if user_id in preference_learner.preference_cache:
                    del preference_learner.preference_cache[user_id]
            
            return {
                'preferences_changed': preferences_changed,
                'learning_rate_used': user_learning_rate,
                'topics_updated': len(paper_classification.get('semantic_topics', [])),
                'categories_updated': len(paper_classification.get('standard_categories', []))
            }
            
        except Exception as e:
            self.logger.error(f"Error updating user preferences: {e}")
            return {'preferences_changed': False, 'error': str(e)}
    
    def _compute_adaptive_learning_rate(self, user_id: str, feedback_type: str) -> float:
        """Compute adaptive learning rate based on user's feedback history."""
        try:
            # Get user's recent feedback history
            feedback_history = self._get_user_feedback_history(user_id, days=30)
            
            if len(feedback_history) < self.min_feedback_count:
                # New user - use higher learning rate
                return min(1.0, self.learning_rate * 2.0)
            
            # Compute feedback consistency
            feedback_scores = [f['feedback_score'] for f in feedback_history]
            feedback_variance = np.var(feedback_scores) if len(feedback_scores) > 1 else 0.5
            
            # Users with consistent feedback get higher learning rates
            consistency_factor = 1.0 - min(1.0, feedback_variance)
            
            # Explicit feedback gets higher learning rate than implicit
            explicit_types = ['explicit_like', 'explicit_dislike', 'rating_', 'bookmark']
            if any(fb_type in feedback_type for fb_type in explicit_types):
                explicit_factor = 1.5
            else:
                explicit_factor = 1.0
            
            # Compute adaptive rate
            adaptive_rate = self.learning_rate * (1.0 + consistency_factor) * explicit_factor
            
            return min(1.0, adaptive_rate)
            
        except Exception as e:
            self.logger.error(f"Error computing adaptive learning rate: {e}")
            return self.learning_rate
    
    def _update_preference_item(self, preferences: Dict[str, Any], 
                              category: str, item: str, score_delta: float) -> bool:
        """Update a single preference item and return True if changed."""
        if category not in preferences:
            preferences[category] = []
        
        # Find existing item
        for i, (pref_item, pref_score) in enumerate(preferences[category]):
            if pref_item == item:
                # Update existing preference
                old_score = pref_score
                new_score = max(0.01, min(1.0, pref_score + score_delta))
                preferences[category][i] = (pref_item, new_score)
                
                # Re-sort preferences
                preferences[category] = sorted(preferences[category], key=lambda x: x[1], reverse=True)
                
                return abs(new_score - old_score) > 0.01  # Only return True if significant change
        
        # Add new item if score is positive
        if score_delta > 0:
            new_score = min(0.8, 0.5 + score_delta)
            preferences[category].append((item, new_score))
            preferences[category] = sorted(preferences[category], key=lambda x: x[1], reverse=True)
            preferences[category] = preferences[category][:20]  # Keep top 20
            return True
        
        return False
    
    def _get_user_feedback_history(self, user_id: str, days: int = 30) -> List[Dict[str, Any]]:
        """Get user's recent feedback history."""
        if not self.db:
            return []
        
        try:
            cutoff_date = datetime.utcnow() - timedelta(days=days)
            
            cursor = self.db.execute("""
                SELECT feedback_type, feedback_data, feedback_score, created_at
                FROM user_feedback_logs
                WHERE user_id = %s AND created_at >= %s
                ORDER BY created_at DESC
                LIMIT 100
            """, (user_id, cutoff_date))
            
            feedback_history = []
            for row in cursor.fetchall():
                feedback_history.append({
                    'feedback_type': row[0],
                    'feedback_data': json.loads(row[1]) if row[1] else {},
                    'feedback_score': row[2],
                    'created_at': row[3]
                })
            
            return feedback_history
            
        except Exception as e:
            self.logger.error(f"Error getting user feedback history: {e}")
            return []
    
    def _create_feedback_record(self, user_id: str, paper_id: str, 
                              feedback_type: str, feedback_data: Dict[str, Any], 
                              feedback_score: float) -> Dict[str, Any]:
        """Create feedback record for storage."""
        return {
            'user_id': user_id,
            'paper_id': paper_id,
            'feedback_type': feedback_type,
            'feedback_data': feedback_data,
            'feedback_score': feedback_score,
            'created_at': datetime.utcnow().isoformat(),
            'processed': True
        }
    
    def _store_feedback_record(self, feedback_record: Dict[str, Any]):
        """Store feedback record in database."""
        if not self.db:
            return
        
        try:
            # Create table if not exists (should be part of schema)
            self.db.execute("""
                CREATE TABLE IF NOT EXISTS user_feedback_logs (
                    feedback_id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
                    user_id UUID NOT NULL,
                    paper_id UUID NOT NULL,
                    feedback_type VARCHAR(50) NOT NULL,
                    feedback_data JSONB,
                    feedback_score DECIMAL(5,4) NOT NULL,
                    created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
                    processed BOOLEAN DEFAULT true
                )
            """)
            
            # Insert feedback record
            self.db.execute("""
                INSERT INTO user_feedback_logs 
                (user_id, paper_id, feedback_type, feedback_data, feedback_score, created_at, processed)
                VALUES (%s, %s, %s, %s, %s, %s, %s)
            """, (
                feedback_record['user_id'],
                feedback_record['paper_id'],
                feedback_record['feedback_type'],
                json.dumps(feedback_record['feedback_data']),
                feedback_record['feedback_score'],
                feedback_record['created_at'],
                feedback_record['processed']
            ))
            
            self.db.commit()
            
        except Exception as e:
            self.logger.error(f"Error storing feedback record: {e}")
            if self.db:
                self.db.rollback()
    
    def _get_paper_classification(self, paper_id: str) -> Optional[Dict[str, Any]]:
        """Get paper classification from database."""
        if not self.db:
            return None
        
        try:
            cursor = self.db.execute("""
                SELECT metadata_value FROM paper_metadata
                WHERE paper_id = %s AND metadata_key = 'ai_classification'
            """, (paper_id,))
            
            result = cursor.fetchone()
            if result:
                return json.loads(result[0])
            
            return None
            
        except Exception as e:
            self.logger.error(f"Error getting paper classification: {e}")
            return None
    
    def analyze_feedback_quality(self, user_id: str) -> Dict[str, Any]:
        """Analyze quality and consistency of user's feedback."""
        try:
            feedback_history = self._get_user_feedback_history(user_id, days=90)
            
            if not feedback_history:
                return {'error': 'No feedback history found'}
            
            # Analyze feedback patterns
            feedback_types = [f['feedback_type'] for f in feedback_history]
            feedback_scores = [f['feedback_score'] for f in feedback_history]
            
            # Calculate statistics
            analysis = {
                'total_feedback_count': len(feedback_history),
                'feedback_type_distribution': dict(zip(*np.unique(feedback_types, return_counts=True))),
                'average_feedback_score': np.mean(feedback_scores),
                'feedback_score_variance': np.var(feedback_scores),
                'feedback_consistency': 1.0 - min(1.0, np.var(feedback_scores)),
                'explicit_feedback_ratio': sum(1 for ft in feedback_types 
                                             if any(explicit in ft for explicit in ['like', 'rating', 'bookmark'])) / len(feedback_types),
                'recent_activity': len([f for f in feedback_history 
                                      if (datetime.utcnow() - datetime.fromisoformat(f['created_at'].replace('Z', '+00:00').replace('+00:00', ''))).days <= 7])
            }
            
            # Quality assessment
            quality_score = (
                min(1.0, analysis['total_feedback_count'] / 50) * 0.3 +  # Volume
                analysis['feedback_consistency'] * 0.4 +  # Consistency
                analysis['explicit_feedback_ratio'] * 0.2 +  # Explicitness
                min(1.0, analysis['recent_activity'] / 10) * 0.1  # Recency
            )
            
            analysis['feedback_quality_score'] = quality_score
            
            return analysis
            
        except Exception as e:
            self.logger.error(f"Error analyzing feedback quality: {e}")
            return {'error': str(e)}
    
    def get_learning_statistics(self) -> Dict[str, Any]:
        """Get current learning statistics."""
        stats = self.learning_stats.copy()
        
        # Calculate derived metrics
        if stats['feedback_processed'] > 0:
            stats['average_processing_time'] = stats['total_learning_time'] / stats['feedback_processed']
            stats['preference_update_rate'] = stats['preferences_updated'] / stats['feedback_processed']
        else:
            stats['average_processing_time'] = 0.0
            stats['preference_update_rate'] = 0.0
        
        return stats
    
    def reset_learning_statistics(self):
        """Reset learning statistics."""
        self.learning_stats = {
            'feedback_processed': 0,
            'preferences_updated': 0,
            'accuracy_improvements': 0.0,
            'total_learning_time': 0.0
        }
        
        self.logger.info("Learning statistics reset")