"""
Comprehensive logging system for the data preprocessing pipeline.

This module provides logging functionality for error tracking, processing summaries,
and performance metrics during batch processing operations.
"""

import logging
import time
import json
from typing import Dict, List, Optional, Any
from datetime import datetime, timedelta
from pathlib import Path
from dataclasses import dataclass, asdict
from core.models import ProcessingResult


@dataclass
class ProcessingMetrics:
    """Performance metrics for processing operations."""
    total_combinations: int
    successful_combinations: int
    failed_combinations: int
    total_processing_time: float
    average_processing_time: float
    fastest_processing_time: float
    slowest_processing_time: float
    success_rate: float
    start_time: datetime
    end_time: datetime


@dataclass
class ErrorSummary:
    """Summary of errors encountered during processing."""
    combination_id: str
    combination: List[int]
    error_message: str
    error_type: str
    timestamp: datetime
    processing_time: float


class ProcessingLogger:
    """
    Comprehensive logging system for batch processing operations.
    
    Provides functionality for:
    - Error logging for failed combinations with detailed messages
    - Processing summary report generation
    - Performance metrics tracking (processing times, success rates)
    """
    
    def __init__(self, log_directory: str = "logs", log_level: int = logging.INFO):
        """
        Initialize the processing logger.
        
        Args:
            log_directory: Directory to store log files
            log_level: Logging level (default: INFO)
        """
        self.log_directory = Path(log_directory)
        self.log_directory.mkdir(exist_ok=True)
        
        # Initialize loggers
        self._setup_loggers(log_level)
        
        # Processing tracking
        self.processing_results: List[ProcessingResult] = []
        self.error_summaries: List[ErrorSummary] = []
        self.start_time: Optional[datetime] = None
        self.end_time: Optional[datetime] = None
        
        # Performance metrics
        self.processing_times: List[float] = []
        
    def _setup_loggers(self, log_level: int) -> None:
        """
        Set up logging configuration.
        
        Args:
            log_level: Logging level to use
        """
        # Create formatters
        detailed_formatter = logging.Formatter(
            '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
        )
        simple_formatter = logging.Formatter(
            '%(asctime)s - %(levelname)s - %(message)s'
        )
        
        # Main processing logger
        self.main_logger = logging.getLogger('preprocessing_pipeline')
        self.main_logger.setLevel(log_level)
        
        # Error logger for failed combinations
        self.error_logger = logging.getLogger('preprocessing_errors')
        self.error_logger.setLevel(logging.ERROR)
        
        # Performance logger
        self.performance_logger = logging.getLogger('preprocessing_performance')
        self.performance_logger.setLevel(logging.INFO)
        
        # Clear existing handlers and close them properly
        for logger in [self.main_logger, self.error_logger, self.performance_logger]:
            for handler in logger.handlers[:]:
                handler.close()
                logger.removeHandler(handler)
        
        # File handlers
        main_handler = logging.FileHandler(self.log_directory / 'processing.log')
        main_handler.setFormatter(detailed_formatter)
        self.main_logger.addHandler(main_handler)
        
        error_handler = logging.FileHandler(self.log_directory / 'errors.log')
        error_handler.setFormatter(detailed_formatter)
        self.error_logger.addHandler(error_handler)
        
        performance_handler = logging.FileHandler(self.log_directory / 'performance.log')
        performance_handler.setFormatter(simple_formatter)
        self.performance_logger.addHandler(performance_handler)
        
        # Console handler for main logger
        console_handler = logging.StreamHandler()
        console_handler.setFormatter(simple_formatter)
        self.main_logger.addHandler(console_handler)
    
    def start_processing_session(self, total_combinations: int, input_file: str) -> None:
        """
        Start a new processing session.
        
        Args:
            total_combinations: Total number of combinations to process
            input_file: Path to the input file being processed
            
        Requirements: 7.4 - log processing session details
        """
        self.start_time = datetime.now()
        self.processing_results.clear()
        self.error_summaries.clear()
        self.processing_times.clear()
        
        self.main_logger.info(f"Starting processing session")
        self.main_logger.info(f"Input file: {input_file}")
        self.main_logger.info(f"Total combinations to process: {total_combinations}")
        self.main_logger.info(f"Session started at: {self.start_time}")
        
        self.performance_logger.info(f"SESSION_START: {total_combinations} combinations, input: {input_file}")
    
    def log_combination_success(self, result: ProcessingResult) -> None:
        """
        Log successful combination processing.
        
        Args:
            result: Processing result for the successful combination
            
        Requirements: 7.4 - track successful processing
        """
        self.processing_results.append(result)
        self.processing_times.append(result.processing_time)
        
        self.main_logger.info(
            f"Successfully processed combination {result.combination_id} "
            f"({result.combination}) in {result.processing_time:.3f}s"
        )
        
        self.performance_logger.info(
            f"SUCCESS: {result.combination_id}, time: {result.processing_time:.3f}s, "
            f"output: {result.output_file}"
        )
    
    def log_combination_error(self, result: ProcessingResult, error_type: str = "ProcessingError") -> None:
        """
        Log failed combination processing with detailed error information.
        
        Args:
            result: Processing result for the failed combination
            error_type: Type of error that occurred
            
        Requirements: 7.4, 6.4 - error logging for failed combinations with detailed messages
        """
        self.processing_results.append(result)
        
        # Create error summary
        error_summary = ErrorSummary(
            combination_id=result.combination_id,
            combination=result.combination,
            error_message=result.error_message or "Unknown error",
            error_type=error_type,
            timestamp=datetime.now(),
            processing_time=result.processing_time
        )
        self.error_summaries.append(error_summary)
        
        # Log error details
        self.error_logger.error(
            f"Failed to process combination {result.combination_id} ({result.combination}): "
            f"{error_summary.error_message}"
        )
        
        self.main_logger.error(
            f"Combination {result.combination_id} failed after {result.processing_time:.3f}s: "
            f"{error_summary.error_message}"
        )
        
        self.performance_logger.info(
            f"FAILURE: {result.combination_id}, time: {result.processing_time:.3f}s, "
            f"error: {error_type}"
        )
    
    def log_progress_update(self, current: int, total: int, percentage: float, eta: float) -> None:
        """
        Log progress updates during processing.
        
        Args:
            current: Current combination number
            total: Total combinations
            percentage: Completion percentage
            eta: Estimated time remaining in seconds
        """
        self.main_logger.info(
            f"Progress: {current}/{total} ({percentage:.1f}%) - ETA: {self._format_duration(eta)}"
        )
    
    def end_processing_session(self) -> ProcessingMetrics:
        """
        End the processing session and calculate final metrics.
        
        Returns:
            ProcessingMetrics: Complete processing metrics
            
        Requirements: 7.4, 6.4 - processing summary report generation and performance metrics
        """
        self.end_time = datetime.now()
        
        # Calculate metrics
        total_combinations = len(self.processing_results)
        successful_combinations = sum(1 for r in self.processing_results if r.success)
        failed_combinations = total_combinations - successful_combinations
        
        total_processing_time = sum(r.processing_time for r in self.processing_results)
        average_processing_time = total_processing_time / total_combinations if total_combinations > 0 else 0
        
        success_times = [r.processing_time for r in self.processing_results if r.success]
        fastest_processing_time = min(success_times) if success_times else 0
        slowest_processing_time = max(success_times) if success_times else 0
        
        success_rate = (successful_combinations / total_combinations * 100) if total_combinations > 0 else 0
        
        metrics = ProcessingMetrics(
            total_combinations=total_combinations,
            successful_combinations=successful_combinations,
            failed_combinations=failed_combinations,
            total_processing_time=total_processing_time,
            average_processing_time=average_processing_time,
            fastest_processing_time=fastest_processing_time,
            slowest_processing_time=slowest_processing_time,
            success_rate=success_rate,
            start_time=self.start_time,
            end_time=self.end_time
        )
        
        # Log session summary
        session_duration = (self.end_time - self.start_time).total_seconds()
        
        self.main_logger.info(f"Processing session completed")
        self.main_logger.info(f"Session duration: {self._format_duration(session_duration)}")
        self.main_logger.info(f"Total combinations processed: {total_combinations}")
        self.main_logger.info(f"Successful: {successful_combinations} ({success_rate:.1f}%)")
        self.main_logger.info(f"Failed: {failed_combinations}")
        self.main_logger.info(f"Average processing time: {average_processing_time:.3f}s")
        
        self.performance_logger.info(
            f"SESSION_END: duration: {session_duration:.1f}s, "
            f"success_rate: {success_rate:.1f}%, "
            f"avg_time: {average_processing_time:.3f}s"
        )
        
        return metrics
    
    def generate_summary_report(self, output_file: Optional[str] = None) -> str:
        """
        Generate a comprehensive processing summary report.
        
        Args:
            output_file: Optional file path to save the report
            
        Returns:
            str: Formatted summary report
            
        Requirements: 7.4, 6.4 - processing summary report generation
        """
        if not self.processing_results:
            return "No processing results available for report generation."
        
        metrics = self.end_processing_session() if self.end_time is None else self._calculate_current_metrics()
        
        # Build report
        report_lines = [
            "=" * 80,
            "DATA PREPROCESSING PIPELINE - PROCESSING SUMMARY REPORT",
            "=" * 80,
            "",
            f"Session Information:",
            f"  Start Time: {metrics.start_time.strftime('%Y-%m-%d %H:%M:%S')}",
            f"  End Time: {metrics.end_time.strftime('%Y-%m-%d %H:%M:%S')}",
            f"  Duration: {self._format_duration((metrics.end_time - metrics.start_time).total_seconds())}",
            "",
            f"Processing Statistics:",
            f"  Total Combinations: {metrics.total_combinations}",
            f"  Successful: {metrics.successful_combinations} ({metrics.success_rate:.1f}%)",
            f"  Failed: {metrics.failed_combinations}",
            "",
            f"Performance Metrics:",
            f"  Total Processing Time: {self._format_duration(metrics.total_processing_time)}",
            f"  Average Processing Time: {metrics.average_processing_time:.3f}s",
            f"  Fastest Processing Time: {metrics.fastest_processing_time:.3f}s",
            f"  Slowest Processing Time: {metrics.slowest_processing_time:.3f}s",
            ""
        ]
        
        # Add error summary if there are failures
        if self.error_summaries:
            report_lines.extend([
                "Error Summary:",
                "-" * 40
            ])
            
            # Group errors by type
            error_types = {}
            for error in self.error_summaries:
                if error.error_type not in error_types:
                    error_types[error.error_type] = []
                error_types[error.error_type].append(error)
            
            for error_type, errors in error_types.items():
                report_lines.append(f"  {error_type}: {len(errors)} occurrences")
                for error in errors[:5]:  # Show first 5 errors of each type
                    report_lines.append(f"    - {error.combination_id}: {error.error_message}")
                if len(errors) > 5:
                    report_lines.append(f"    ... and {len(errors) - 5} more")
                report_lines.append("")
        
        # Add successful combinations summary
        successful_results = [r for r in self.processing_results if r.success]
        if successful_results:
            report_lines.extend([
                "Successful Combinations:",
                "-" * 40
            ])
            
            # Show top 10 fastest and slowest
            sorted_by_time = sorted(successful_results, key=lambda x: x.processing_time)
            
            report_lines.append("  Fastest Processing Times:")
            for result in sorted_by_time[:5]:
                report_lines.append(f"    {result.combination_id}: {result.processing_time:.3f}s")
            
            report_lines.append("")
            report_lines.append("  Slowest Processing Times:")
            for result in sorted_by_time[-5:]:
                report_lines.append(f"    {result.combination_id}: {result.processing_time:.3f}s")
        
        report_lines.extend([
            "",
            "=" * 80,
            f"Report generated at: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
            "=" * 80
        ])
        
        report = "\n".join(report_lines)
        
        # Save to file if requested
        if output_file:
            report_path = Path(output_file)
            report_path.parent.mkdir(parents=True, exist_ok=True)
            with open(report_path, 'w', encoding='utf-8') as f:
                f.write(report)
            self.main_logger.info(f"Summary report saved to: {report_path}")
        
        return report
    
    def export_metrics_json(self, output_file: str) -> None:
        """
        Export processing metrics to JSON format.
        
        Args:
            output_file: Path to save the JSON metrics file
            
        Requirements: 7.4 - performance metrics tracking
        """
        metrics = self._calculate_current_metrics()
        
        # Convert to serializable format
        metrics_dict = asdict(metrics)
        metrics_dict['start_time'] = metrics.start_time.isoformat()
        metrics_dict['end_time'] = metrics.end_time.isoformat()
        
        # Add detailed results
        export_data = {
            'metrics': metrics_dict,
            'processing_results': [asdict(result) for result in self.processing_results],
            'error_summaries': [
                {
                    **asdict(error),
                    'timestamp': error.timestamp.isoformat()
                }
                for error in self.error_summaries
            ]
        }
        
        output_path = Path(output_file)
        output_path.parent.mkdir(parents=True, exist_ok=True)
        
        with open(output_path, 'w', encoding='utf-8') as f:
            json.dump(export_data, f, indent=2, ensure_ascii=False)
        
        self.main_logger.info(f"Metrics exported to JSON: {output_path}")
    
    def _calculate_current_metrics(self) -> ProcessingMetrics:
        """Calculate metrics based on current processing results."""
        total_combinations = len(self.processing_results)
        successful_combinations = sum(1 for r in self.processing_results if r.success)
        failed_combinations = total_combinations - successful_combinations
        
        total_processing_time = sum(r.processing_time for r in self.processing_results)
        average_processing_time = total_processing_time / total_combinations if total_combinations > 0 else 0
        
        success_times = [r.processing_time for r in self.processing_results if r.success]
        fastest_processing_time = min(success_times) if success_times else 0
        slowest_processing_time = max(success_times) if success_times else 0
        
        success_rate = (successful_combinations / total_combinations * 100) if total_combinations > 0 else 0
        
        return ProcessingMetrics(
            total_combinations=total_combinations,
            successful_combinations=successful_combinations,
            failed_combinations=failed_combinations,
            total_processing_time=total_processing_time,
            average_processing_time=average_processing_time,
            fastest_processing_time=fastest_processing_time,
            slowest_processing_time=slowest_processing_time,
            success_rate=success_rate,
            start_time=self.start_time or datetime.now(),
            end_time=self.end_time or datetime.now()
        )
    
    def _format_duration(self, seconds: float) -> str:
        """
        Format duration in seconds to human-readable format.
        
        Args:
            seconds: Duration in seconds
            
        Returns:
            str: Formatted duration string
        """
        if seconds <= 0:
            return "0s"
        
        total_seconds = int(seconds)
        hours = total_seconds // 3600
        minutes = (total_seconds % 3600) // 60
        secs = total_seconds % 60
        
        if hours > 0:
            return f"{hours}h {minutes}m {secs}s"
        elif minutes > 0:
            return f"{minutes}m {secs}s"
        else:
            return f"{secs}s"
    
    def get_error_statistics(self) -> Dict[str, int]:
        """
        Get statistics about error types.
        
        Returns:
            Dict[str, int]: Error type counts
        """
        error_counts = {}
        for error in self.error_summaries:
            error_counts[error.error_type] = error_counts.get(error.error_type, 0) + 1
        return error_counts
    
    def get_processing_results(self) -> List[ProcessingResult]:
        """Get all processing results."""
        return self.processing_results.copy()
    
    def get_error_summaries(self) -> List[ErrorSummary]:
        """Get all error summaries."""
        return self.error_summaries.copy()
    
    def cleanup(self) -> None:
        """Clean up resources and close file handlers."""
        for logger in [self.main_logger, self.error_logger, self.performance_logger]:
            for handler in logger.handlers[:]:
                handler.close()
                logger.removeHandler(handler)