"""
Batch Classification Processor

Efficient batch processing system for classifying thousands of papers daily
with proper error handling, progress tracking, and performance optimization.
"""

import logging
import asyncio
import json
import time
from typing import Dict, List, Any, Optional, Callable, Iterator
from datetime import datetime, timedelta
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
from dataclasses import dataclass, asdict
import multiprocessing
from pathlib import Path

from ..core.classification_engine import AIClassificationEngine
from ..core.paper_classifier import SemanticPaperClassifier
from ..learning.user_preference_learner import UserPreferenceLearner
from ..utils.scoring import RecommendationScorer


@dataclass
class BatchProcessingResult:
    """Result of batch processing operation."""
    total_papers: int
    successful_classifications: int
    failed_classifications: int
    processing_time_seconds: float
    average_time_per_paper: float
    errors: List[Dict[str, Any]]
    start_time: datetime
    end_time: datetime
    batch_id: str


@dataclass
class BatchProcessingConfig:
    """Configuration for batch processing."""
    batch_size: int = 50
    max_workers: int = 4
    retry_attempts: int = 3
    retry_delay_seconds: float = 1.0
    progress_callback_interval: int = 100
    save_intermediate_results: bool = True
    intermediate_save_interval: int = 500
    memory_limit_mb: Optional[int] = 8192
    timeout_seconds: Optional[int] = 3600


class BatchClassificationProcessor:
    """
    High-performance batch processor for paper classification and recommendation generation.
    
    Features:
    - Parallel processing with configurable worker count
    - Memory-efficient chunked processing
    - Error handling and retry logic
    - Progress tracking and callbacks
    - Intermediate result saving
    - Performance monitoring
    - Graceful degradation on failures
    """
    
    def __init__(self, config: Dict[str, Any], database_manager=None):
        """Initialize batch processor.
        
        Args:
            config: Configuration dictionary
            database_manager: Database connection manager
        """
        self.config = config
        self.db = database_manager
        self.logger = logging.getLogger(__name__)
        
        # Processing configuration
        self.batch_config = BatchProcessingConfig(**config.get('batch_processing', {}))
        
        # Initialize AI components
        self.classification_engine = AIClassificationEngine(config, database_manager)
        self.paper_classifier = SemanticPaperClassifier(config)
        self.preference_learner = UserPreferenceLearner(config.get('learning', {}), database_manager)
        self.recommendation_scorer = RecommendationScorer(config.get('scoring', {}))
        
        # Performance monitoring
        self.processing_stats = {
            'papers_processed': 0,
            'total_processing_time': 0.0,
            'average_processing_time': 0.0,
            'classification_errors': 0,
            'recommendation_errors': 0
        }
        
        # Progress tracking
        self.progress_callbacks: List[Callable[[int, int], None]] = []
        self.current_batch_id = None
        
        self.logger.info(f"Batch Classification Processor initialized with {self.batch_config.max_workers} workers")
    
    def process_papers_batch(self, papers_data: List[Dict[str, Any]], 
                           batch_id: Optional[str] = None,
                           progress_callback: Optional[Callable[[int, int], None]] = None) -> BatchProcessingResult:
        """
        Process a batch of papers for classification.
        
        Args:
            papers_data: List of paper data dictionaries
            batch_id: Optional batch identifier
            progress_callback: Optional progress callback function
            
        Returns:
            Batch processing results
        """
        start_time = datetime.utcnow()
        batch_id = batch_id or f"batch_{int(time.time())}"
        self.current_batch_id = batch_id
        
        self.logger.info(f"Starting batch processing {batch_id} with {len(papers_data)} papers")
        
        # Initialize result tracking
        successful_classifications = 0
        failed_classifications = 0
        errors = []
        
        # Add progress callback
        if progress_callback:
            self.progress_callbacks.append(progress_callback)
        
        try:
            # Process papers in chunks
            processed_count = 0
            
            for chunk_start in range(0, len(papers_data), self.batch_config.batch_size):
                chunk_end = min(chunk_start + self.batch_config.batch_size, len(papers_data))
                chunk = papers_data[chunk_start:chunk_end]
                
                self.logger.info(f"Processing chunk {chunk_start//self.batch_config.batch_size + 1}/{(len(papers_data) + self.batch_config.batch_size - 1)//self.batch_config.batch_size}")
                
                # Process chunk
                chunk_results = self._process_paper_chunk(chunk, batch_id)
                
                # Update counters
                for result in chunk_results:
                    if result.get('success', False):
                        successful_classifications += 1
                    else:
                        failed_classifications += 1
                        if 'error' in result:
                            errors.append({
                                'paper_id': result.get('paper_id'),
                                'error': result['error'],
                                'timestamp': datetime.utcnow().isoformat()
                            })
                
                processed_count += len(chunk)
                
                # Call progress callbacks
                self._call_progress_callbacks(processed_count, len(papers_data))
                
                # Save intermediate results
                if (self.batch_config.save_intermediate_results and 
                    processed_count % self.batch_config.intermediate_save_interval == 0):
                    self._save_intermediate_results(chunk_results, batch_id, processed_count)
                
                # Memory cleanup
                del chunk_results
            
            end_time = datetime.utcnow()
            processing_time = (end_time - start_time).total_seconds()
            
            # Create result summary
            result = BatchProcessingResult(
                total_papers=len(papers_data),
                successful_classifications=successful_classifications,
                failed_classifications=failed_classifications,
                processing_time_seconds=processing_time,
                average_time_per_paper=processing_time / len(papers_data) if papers_data else 0,
                errors=errors,
                start_time=start_time,
                end_time=end_time,
                batch_id=batch_id
            )
            
            # Update processing stats
            self._update_processing_stats(result)
            
            # Log results
            self._log_batch_results(result)
            
            return result
            
        except Exception as e:
            self.logger.error(f"Critical error in batch processing {batch_id}: {e}")
            end_time = datetime.utcnow()
            processing_time = (end_time - start_time).total_seconds()
            
            return BatchProcessingResult(
                total_papers=len(papers_data),
                successful_classifications=successful_classifications,
                failed_classifications=len(papers_data) - successful_classifications,
                processing_time_seconds=processing_time,
                average_time_per_paper=processing_time / len(papers_data) if papers_data else 0,
                errors=errors + [{'error': str(e), 'timestamp': datetime.utcnow().isoformat()}],
                start_time=start_time,
                end_time=end_time,
                batch_id=batch_id
            )
        
        finally:
            # Cleanup
            if progress_callback in self.progress_callbacks:
                self.progress_callbacks.remove(progress_callback)
            self.current_batch_id = None
    
    def _process_paper_chunk(self, chunk: List[Dict[str, Any]], 
                           batch_id: str) -> List[Dict[str, Any]]:
        """Process a chunk of papers using parallel processing."""
        
        if self.batch_config.max_workers == 1:
            # Sequential processing
            return [self._process_single_paper(paper, batch_id) for paper in chunk]
        
        # Parallel processing
        with ThreadPoolExecutor(max_workers=self.batch_config.max_workers) as executor:
            futures = []
            
            for paper in chunk:
                future = executor.submit(self._process_single_paper, paper, batch_id)
                futures.append(future)
            
            # Collect results
            results = []
            for future in futures:
                try:
                    result = future.result(timeout=60)  # 60-second timeout per paper
                    results.append(result)
                except Exception as e:
                    self.logger.error(f"Error processing paper in chunk: {e}")
                    results.append({
                        'success': False,
                        'error': str(e),
                        'paper_id': 'unknown'
                    })
            
            return results
    
    def _process_single_paper(self, paper_data: Dict[str, Any], 
                            batch_id: str) -> Dict[str, Any]:
        """Process a single paper with retry logic."""
        
        paper_id = paper_data.get('paper_id', 'unknown')
        
        for attempt in range(self.batch_config.retry_attempts):
            try:
                start_time = time.time()
                
                # Classify paper
                classification_result = self.classification_engine.classify_paper(paper_data)
                
                # Store classification if successful
                if classification_result and not classification_result.get('error'):
                    processing_time = time.time() - start_time
                    
                    return {
                        'success': True,
                        'paper_id': paper_id,
                        'classification_result': classification_result,
                        'processing_time': processing_time,
                        'batch_id': batch_id,
                        'attempt': attempt + 1
                    }
                else:
                    # Classification failed
                    error_msg = classification_result.get('error', 'Unknown classification error')
                    self.logger.warning(f"Classification failed for paper {paper_id}: {error_msg}")
                    
                    if attempt < self.batch_config.retry_attempts - 1:
                        time.sleep(self.batch_config.retry_delay_seconds * (2 ** attempt))
                        continue
                    else:
                        return {
                            'success': False,
                            'paper_id': paper_id,
                            'error': error_msg,
                            'batch_id': batch_id,
                            'attempts': self.batch_config.retry_attempts
                        }
                
            except Exception as e:
                self.logger.error(f"Error processing paper {paper_id} (attempt {attempt + 1}): {e}")
                
                if attempt < self.batch_config.retry_attempts - 1:
                    time.sleep(self.batch_config.retry_delay_seconds * (2 ** attempt))
                    continue
                else:
                    return {
                        'success': False,
                        'paper_id': paper_id,
                        'error': str(e),
                        'batch_id': batch_id,
                        'attempts': self.batch_config.retry_attempts
                    }
        
        # Should not reach here
        return {
            'success': False,
            'paper_id': paper_id,
            'error': 'Maximum retry attempts exceeded',
            'batch_id': batch_id
        }
    
    def generate_user_recommendations_batch(self, user_ids: List[str], 
                                          paper_pool: Optional[List[str]] = None,
                                          batch_id: Optional[str] = None) -> BatchProcessingResult:
        """
        Generate recommendations for multiple users in batch.
        
        Args:
            user_ids: List of user identifiers
            paper_pool: Optional list of paper IDs to consider
            batch_id: Optional batch identifier
            
        Returns:
            Batch processing results
        """
        start_time = datetime.utcnow()
        batch_id = batch_id or f"recommendations_{int(time.time())}"
        
        self.logger.info(f"Starting batch recommendation generation {batch_id} for {len(user_ids)} users")
        
        successful_generations = 0
        failed_generations = 0
        errors = []
        
        try:
            # Process users in chunks
            for chunk_start in range(0, len(user_ids), self.batch_config.batch_size):
                chunk_end = min(chunk_start + self.batch_config.batch_size, len(user_ids))
                user_chunk = user_ids[chunk_start:chunk_end]
                
                # Generate recommendations for chunk
                chunk_results = self._generate_recommendations_chunk(user_chunk, paper_pool, batch_id)
                
                # Update counters
                for result in chunk_results:
                    if result.get('success', False):
                        successful_generations += 1
                    else:
                        failed_generations += 1
                        if 'error' in result:
                            errors.append({
                                'user_id': result.get('user_id'),
                                'error': result['error'],
                                'timestamp': datetime.utcnow().isoformat()
                            })
            
            end_time = datetime.utcnow()
            processing_time = (end_time - start_time).total_seconds()
            
            return BatchProcessingResult(
                total_papers=len(user_ids),  # Using total_papers field for users
                successful_classifications=successful_generations,
                failed_classifications=failed_generations,
                processing_time_seconds=processing_time,
                average_time_per_paper=processing_time / len(user_ids) if user_ids else 0,
                errors=errors,
                start_time=start_time,
                end_time=end_time,
                batch_id=batch_id
            )
            
        except Exception as e:
            self.logger.error(f"Critical error in batch recommendation generation {batch_id}: {e}")
            end_time = datetime.utcnow()
            processing_time = (end_time - start_time).total_seconds()
            
            return BatchProcessingResult(
                total_papers=len(user_ids),
                successful_classifications=successful_generations,
                failed_classifications=len(user_ids) - successful_generations,
                processing_time_seconds=processing_time,
                average_time_per_paper=processing_time / len(user_ids) if user_ids else 0,
                errors=errors + [{'error': str(e), 'timestamp': datetime.utcnow().isoformat()}],
                start_time=start_time,
                end_time=end_time,
                batch_id=batch_id
            )
    
    def _generate_recommendations_chunk(self, user_ids: List[str], 
                                      paper_pool: Optional[List[str]], 
                                      batch_id: str) -> List[Dict[str, Any]]:
        """Generate recommendations for a chunk of users."""
        
        results = []
        
        for user_id in user_ids:
            try:
                start_time = time.time()
                
                # Generate recommendations
                recommendations = self.classification_engine.get_user_recommendations(
                    user_id, limit=20, paper_pool=paper_pool
                )
                
                # Store recommendations in database
                if self.db and recommendations:
                    self._store_user_recommendations(user_id, recommendations, batch_id)
                
                processing_time = time.time() - start_time
                
                results.append({
                    'success': True,
                    'user_id': user_id,
                    'recommendation_count': len(recommendations),
                    'processing_time': processing_time,
                    'batch_id': batch_id
                })
                
            except Exception as e:
                self.logger.error(f"Error generating recommendations for user {user_id}: {e}")
                results.append({
                    'success': False,
                    'user_id': user_id,
                    'error': str(e),
                    'batch_id': batch_id
                })
        
        return results
    
    def _store_user_recommendations(self, user_id: str, 
                                  recommendations: List[Dict[str, Any]], 
                                  batch_id: str):
        """Store user recommendations in database."""
        try:
            # Clear old recommendations
            self.db.execute("""
                DELETE FROM user_recommendations 
                WHERE user_id = %s AND generated_at < CURRENT_TIMESTAMP - INTERVAL '7 days'
            """, (user_id,))
            
            # Insert new recommendations
            for rec in recommendations:
                self.db.execute("""
                    INSERT INTO user_recommendations 
                    (user_id, paper_id, recommendation_score, recommendation_reason, 
                     algorithm_version, generated_at, expires_at)
                    VALUES (%s, %s, %s, %s, %s, %s, %s)
                    ON CONFLICT (user_id, paper_id, algorithm_version)
                    DO UPDATE SET 
                        recommendation_score = EXCLUDED.recommendation_score,
                        recommendation_reason = EXCLUDED.recommendation_reason,
                        generated_at = EXCLUDED.generated_at,
                        expires_at = EXCLUDED.expires_at
                """, (
                    user_id,
                    rec['paper_id'],
                    rec['recommendation_score'],
                    rec.get('reasoning', ''),
                    batch_id,
                    datetime.utcnow(),
                    datetime.utcnow() + timedelta(days=30)  # 30-day expiry
                ))
            
            self.db.commit()
            
        except Exception as e:
            self.logger.error(f"Error storing recommendations for user {user_id}: {e}")
            if self.db:
                self.db.rollback()
    
    def _call_progress_callbacks(self, current: int, total: int):
        """Call all registered progress callbacks."""
        for callback in self.progress_callbacks:
            try:
                callback(current, total)
            except Exception as e:
                self.logger.error(f"Error in progress callback: {e}")
    
    def _save_intermediate_results(self, results: List[Dict[str, Any]], 
                                 batch_id: str, processed_count: int):
        """Save intermediate processing results."""
        try:
            results_file = Path(f"batch_results_{batch_id}_{processed_count}.json")
            
            with open(results_file, 'w') as f:
                json.dump({
                    'batch_id': batch_id,
                    'processed_count': processed_count,
                    'timestamp': datetime.utcnow().isoformat(),
                    'results': results
                }, f, indent=2)
            
            self.logger.info(f"Saved intermediate results to {results_file}")
            
        except Exception as e:
            self.logger.error(f"Error saving intermediate results: {e}")
    
    def _update_processing_stats(self, result: BatchProcessingResult):
        """Update internal processing statistics."""
        self.processing_stats['papers_processed'] += result.total_papers
        self.processing_stats['total_processing_time'] += result.processing_time_seconds
        self.processing_stats['average_processing_time'] = (
            self.processing_stats['total_processing_time'] / 
            self.processing_stats['papers_processed']
        )
        self.processing_stats['classification_errors'] += result.failed_classifications
    
    def _log_batch_results(self, result: BatchProcessingResult):
        """Log detailed batch processing results."""
        success_rate = (result.successful_classifications / result.total_papers * 100 
                       if result.total_papers > 0 else 0)
        
        self.logger.info(f"""
Batch Processing Complete: {result.batch_id}
========================================
Total Papers: {result.total_papers}
Successfully Processed: {result.successful_classifications}
Failed: {result.failed_classifications}
Success Rate: {success_rate:.1f}%
Total Processing Time: {result.processing_time_seconds:.2f} seconds
Average Time per Paper: {result.average_time_per_paper:.3f} seconds
Error Count: {len(result.errors)}
========================================
        """)
        
        # Log errors if any
        if result.errors:
            self.logger.warning(f"Errors encountered in batch {result.batch_id}:")
            for error in result.errors[:10]:  # Log first 10 errors
                self.logger.warning(f"  Paper {error.get('paper_id', 'unknown')}: {error.get('error', 'Unknown error')}")
    
    def get_processing_stats(self) -> Dict[str, Any]:
        """Get current processing statistics."""
        return self.processing_stats.copy()
    
    def process_daily_papers(self, cutoff_date: Optional[datetime] = None) -> BatchProcessingResult:
        """
        Process all papers submitted since cutoff date (daily processing job).
        
        Args:
            cutoff_date: Process papers submitted after this date (defaults to yesterday)
            
        Returns:
            Batch processing results
        """
        if cutoff_date is None:
            cutoff_date = datetime.utcnow() - timedelta(days=1)
        
        self.logger.info(f"Starting daily paper processing for papers since {cutoff_date}")
        
        # Get papers from database
        try:
            papers_data = self._get_recent_papers(cutoff_date)
            
            if not papers_data:
                self.logger.info("No new papers found for processing")
                return BatchProcessingResult(
                    total_papers=0,
                    successful_classifications=0,
                    failed_classifications=0,
                    processing_time_seconds=0,
                    average_time_per_paper=0,
                    errors=[],
                    start_time=datetime.utcnow(),
                    end_time=datetime.utcnow(),
                    batch_id=f"daily_{cutoff_date.strftime('%Y%m%d')}"
                )
            
            # Process papers
            batch_id = f"daily_{cutoff_date.strftime('%Y%m%d')}"
            return self.process_papers_batch(papers_data, batch_id)
            
        except Exception as e:
            self.logger.error(f"Error in daily paper processing: {e}")
            raise
    
    def _get_recent_papers(self, cutoff_date: datetime) -> List[Dict[str, Any]]:
        """Get papers from database submitted after cutoff date."""
        if not self.db:
            return []
        
        try:
            cursor = self.db.execute("""
                SELECT paper_id, arxiv_id, title, abstract, primary_category, 
                       secondary_categories, submission_date, authors.full_name as authors
                FROM papers p
                LEFT JOIN paper_authors pa ON p.paper_id = pa.paper_id
                LEFT JOIN authors ON pa.author_id = authors.author_id
                WHERE p.submission_date > %s 
                    AND p.indexed_at IS NULL  -- Only unprocessed papers
                ORDER BY p.submission_date DESC
            """, (cutoff_date,))
            
            papers_dict = {}
            for row in cursor.fetchall():
                paper_id = row[0]
                if paper_id not in papers_dict:
                    papers_dict[paper_id] = {
                        'paper_id': paper_id,
                        'arxiv_id': row[1],
                        'title': row[2],
                        'abstract': row[3],
                        'primary_category': row[4],
                        'secondary_categories': row[5] or [],
                        'submission_date': row[6],
                        'authors': []
                    }
                
                if row[7]:  # author name
                    papers_dict[paper_id]['authors'].append(row[7])
            
            return list(papers_dict.values())
            
        except Exception as e:
            self.logger.error(f"Error getting recent papers: {e}")
            return []
    
    def schedule_daily_processing(self, processing_time: str = "02:00") -> bool:
        """
        Schedule daily processing job (placeholder - requires actual scheduler integration).
        
        Args:
            processing_time: Time to run daily processing (HH:MM format)
            
        Returns:
            True if scheduling successful
        """
        # This would integrate with a job scheduler like Celery, APScheduler, etc.
        self.logger.info(f"Daily processing scheduled for {processing_time}")
        return True