#!/usr/bin/env python3
"""
Data Preprocessing Pipeline - Main Application Entry Point

This module serves as the main orchestrator for the data preprocessing pipeline,
coordinating all components and providing both interactive and batch processing modes.
"""

import sys
import argparse
import logging
import os
from pathlib import Path
from typing import Optional, List

# Import core components
from core.file_manager import FileManager
from core.data_loader import DataLoader
from core.configuration_manager import ConfigurationManager
from core.combination_generator import CombinationGenerator
from core.processing_pipeline import ProcessingPipeline
from core.output_manager import OutputManager
from core.exceptions import (
    PreprocessingPipelineError,
    UserCancellationError,
    FileOperationError,
    ConfigurationError,
    ProcessingError
)

# Import UI and utilities
from ui.console_ui import ConsoleUI
from preprocessors.registry import PreprocessorRegistry
import logging
from utils.progress_tracker import ProgressTracker


class DataPreprocessingPipeline:
    """
    Main application orchestrator for the data preprocessing pipeline.
    
    This class coordinates all components and provides both interactive
    and batch processing capabilities.
    """
    
    def __init__(self, log_level: str = "INFO"):
        """
        Initialize the main application.
        
        Args:
            log_level (str): Logging level (DEBUG, INFO, WARNING, ERROR)
        """
        # Setup logging
        self.logger = self._setup_logging(log_level)
        self.logger.info("Initializing Data Preprocessing Pipeline")
        
        # Initialize core components
        try:
            self.file_manager = FileManager()
            self.data_loader = DataLoader()
            self.config_manager = ConfigurationManager()
            self.combination_generator = CombinationGenerator()
            self.output_manager = OutputManager()
            
            # Initialize preprocessor registry with auto-discovery
            self.preprocessor_registry = PreprocessorRegistry()
            self.preprocessor_registry.auto_discover_processors()
            
            # Initialize processing pipeline
            self.processing_pipeline = ProcessingPipeline(self.preprocessor_registry)
            
            self.logger.info("All components initialized successfully")
            
        except Exception as e:
            self.logger.error(f"Failed to initialize components: {e}")
            raise PreprocessingPipelineError(f"Initialization failed: {e}")
    
    def _setup_logging(self, log_level: str) -> logging.Logger:
        """
        Setup logging configuration.
        
        Args:
            log_level (str): Logging level
            
        Returns:
            logging.Logger: Configured logger
        """
        # Configure logging
        logging.basicConfig(
            level=getattr(logging, log_level.upper()),
            format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
            handlers=[
                logging.StreamHandler(),
                logging.FileHandler('preprocessing_pipeline.log')
            ]
        )
        
        return logging.getLogger(__name__)
    
    def run_interactive_mode(self) -> int:
        """
        Run the application in interactive console mode.
        
        Returns:
            int: Exit code (0 for success, 1 for error)
        """
        try:
            self.logger.info("Starting interactive mode")
            # Pass existing components to avoid re-initialization
            console_ui = ConsoleUI(
                file_manager=self.file_manager,
                data_loader=self.data_loader,
                config_manager=self.config_manager,
                combination_generator=self.combination_generator,
                output_manager=self.output_manager,
                processing_pipeline=self.processing_pipeline,
                preprocessor_registry=self.preprocessor_registry
            )
            console_ui.run()
            return 0
            
        except KeyboardInterrupt:
            self.logger.info("Interactive mode cancelled by user")
            print("\nApplication interrupted by user.")
            return 0
            
        except Exception as e:
            self.logger.error(f"Error in interactive mode: {e}")
            print(f"Error: {e}")
            return 1
    
    def run_batch_mode(self, input_file: str, output_dir: str, 
                      layer_selections: List[List[int]]) -> int:
        """
        Run the application in batch processing mode.
        
        Args:
            input_file (str): Path to input Excel file
            output_dir (str): Output directory path
            layer_selections (List[List[int]]): Preprocessing method selections per layer
            
        Returns:
            int: Exit code (0 for success, 1 for error)
        """
        try:
            self.logger.info(f"Starting batch mode: {input_file} -> {output_dir}")
            
            # Load and validate data
            print(f"Loading data from: {input_file}")
            data = self.data_loader.load_excel(input_file)
            
            if data is None:
                raise ProcessingError("Failed to load data from Excel file")
            
            print(f"Data loaded successfully: {data.shape[0]} rows, {data.shape[1]} columns")
            
            print("Validating data structure...")
            self.data_loader.validate_data_structure(data)
            
            # Create output directory
            if not os.path.exists(output_dir):
                os.makedirs(output_dir, exist_ok=True)
                self.logger.info(f"Created output directory: {output_dir}")
            
            # Generate combinations
            print("Generating processing combinations...")
            combinations = self.combination_generator.generate_combinations(layer_selections)
            print(f"Generated {len(combinations)} combinations to process")
            
            # Initialize progress tracking
            progress_tracker = ProgressTracker(
                total_combinations=len(combinations),
                update_callback=self._batch_progress_callback
            )
            progress_tracker.start_tracking()
            
            # Process combinations
            print("Starting batch processing...")
            
            def progress_callback(current: int, total: int, combination_id: str):
                processing_time = None
                if current > 1:
                    processing_time = progress_tracker.get_average_processing_time()
                progress_tracker.update_progress(combination_id, processing_time)
            
            batch_result = self.processing_pipeline.batch_process(
                data=data,
                combinations=combinations,
                progress_callback=progress_callback
            )
            
            # Save results
            print(f"\nSaving {batch_result.successful_combinations} successful results...")
            self._save_batch_results(batch_result, output_dir)
            
            # Display summary
            self._display_batch_summary(batch_result, output_dir)
            
            return 0 if batch_result.successful_combinations > 0 else 1
            
        except Exception as e:
            self.logger.error(f"Error in batch mode: {e}")
            print(f"Batch processing failed: {e}")
            return 1
    
    def _batch_progress_callback(self, progress_info) -> None:
        """Progress callback for batch mode."""
        print(f"\rProgress: {progress_info.percentage:5.1f}% "
              f"({progress_info.current_combination}/{progress_info.total_combinations}) "
              f"ETA: {self._format_time(progress_info.estimated_time_remaining)} "
              f"| {progress_info.current_combination_id}", end="", flush=True)
    
    def _save_batch_results(self, batch_result, output_dir: str) -> None:
        """Save batch processing results to files."""
        method_names = self.preprocessor_registry.get_method_names()
        saved_files = {}
        
        for combination_id, result in batch_result.results.items():
            if result.success and result.processed_data is not None:
                try:
                    # Generate method names for filename
                    combination_method_names = [
                        method_names.get(mid, f"Method{mid}") 
                        for mid in result.combination
                    ]
                    
                    # Get output file path
                    output_path = self.output_manager.get_full_output_path(
                        directory=output_dir,
                        combination_id=combination_id,
                        method_names=combination_method_names
                    )
                    
                    # Save the file
                    success = self.output_manager.save_excel(result.processed_data, output_path)
                    saved_files[output_path] = success
                    
                except Exception as e:
                    self.logger.error(f"Error saving {combination_id}: {e}")
                    saved_files[f"error_{combination_id}"] = False
        
        # Generate summary report
        try:
            summary_report = self.output_manager.generate_summary_report(saved_files)
            summary_path = Path(output_dir) / "processing_summary.txt"
            
            with open(summary_path, 'w') as f:
                f.write(summary_report)
            
            print(f"\nSummary report saved: {summary_path}")
            
        except Exception as e:
            self.logger.warning(f"Could not save summary report: {e}")
    
    def _display_batch_summary(self, batch_result, output_dir: str) -> None:
        """Display batch processing summary."""
        print(f"\n\n{'='*60}")
        print("BATCH PROCESSING SUMMARY")
        print(f"{'='*60}")
        print(f"Total combinations: {batch_result.total_combinations}")
        print(f"Successful: {batch_result.successful_combinations}")
        print(f"Failed: {batch_result.failed_combinations}")
        print(f"Success rate: {batch_result.success_rate:.1f}%")
        print(f"Processing time: {self._format_time(batch_result.total_processing_time)}")
        print(f"Output directory: {output_dir}")
        print(f"{'='*60}")
    
    def _format_time(self, seconds: float) -> str:
        """Format time duration in human-readable format."""
        if seconds <= 0:
            return "0s"
        
        total_seconds = int(seconds)
        hours = total_seconds // 3600
        minutes = (total_seconds % 3600) // 60
        secs = total_seconds % 60
        
        if hours > 0:
            return f"{hours}h{minutes:02d}m{secs:02d}s"
        elif minutes > 0:
            return f"{minutes}m{secs:02d}s"
        else:
            return f"{secs}s"
    
    def list_available_methods(self) -> None:
        """List all available preprocessing methods."""
        print("Available Preprocessing Methods:")
        print("-" * 40)
        
        method_names = self.preprocessor_registry.get_method_names()
        for method_id in sorted(method_names.keys()):
            print(f"  {method_id}: {method_names[method_id]}")
        
        print(f"\nTotal methods available: {len(method_names)}")
    
    def validate_system(self) -> bool:
        """
        Validate system components and configuration.
        
        Returns:
            bool: True if system is valid, False otherwise
        """
        try:
            print("Validating system components...")
            
            # Check preprocessor registry
            method_count = self.preprocessor_registry.get_processor_count()
            if method_count == 0:
                print("ERROR: No preprocessing methods available")
                return False
            
            print(f"✓ {method_count} preprocessing methods available")
            
            # Check if default method (0) is available
            if 0 not in self.preprocessor_registry.get_available_methods():
                print("ERROR: Default method (0) not available")
                return False
            
            print("✓ Default method (0) available")
            
            # Test basic component functionality
            test_combinations = [[0], [0]]
            try:
                generated = self.combination_generator.generate_combinations(test_combinations)
                if len(generated) != 1:
                    print(f"ERROR: Combination generator test failed - expected 1, got {len(generated)}")
                    print(f"Generated combinations: {generated}")
                    return False
                print("✓ Combination generator working")
            except Exception as e:
                print(f"ERROR: Combination generator failed: {e}")
                return False
            
            print("✓ System validation passed")
            return True
            
        except Exception as e:
            print(f"ERROR: System validation failed: {e}")
            return False
    
    def cleanup(self) -> None:
        """Clean up resources before exit."""
        try:
            if hasattr(self, 'file_manager'):
                self.file_manager.cleanup()
            self.logger.info("Application cleanup completed")
        except Exception as e:
            self.logger.warning(f"Cleanup error: {e}")


def parse_arguments() -> argparse.Namespace:
    """
    Parse command-line arguments.
    
    Returns:
        argparse.Namespace: Parsed arguments
    """
    parser = argparse.ArgumentParser(
        description="Data Preprocessing Pipeline - Flexible Excel Data Processing Tool",
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog="""
Examples:
  # Interactive mode (default)
  python main.py
  
  # Batch mode with simple configuration
  python main.py --batch --input data.xlsx --output results/ --layers "0" "1,2"
  
  # List available methods
  python main.py --list-methods
  
  # Validate system
  python main.py --validate
        """
    )
    
    # Mode selection
    parser.add_argument(
        '--batch', 
        action='store_true',
        help='Run in batch processing mode'
    )
    
    parser.add_argument(
        '--interactive', 
        action='store_true',
        help='Run in interactive mode (default)'
    )
    
    # Batch mode arguments
    parser.add_argument(
        '--input', '-i',
        type=str,
        help='Input Excel file path (required for batch mode)'
    )
    
    parser.add_argument(
        '--output', '-o',
        type=str,
        help='Output directory path (required for batch mode)'
    )
    
    parser.add_argument(
        '--layers', '-l',
        nargs='+',
        help='Layer method selections (e.g., "0" "1,2" for 2 layers)'
    )
    
    # Utility arguments
    parser.add_argument(
        '--list-methods',
        action='store_true',
        help='List available preprocessing methods and exit'
    )
    
    parser.add_argument(
        '--validate',
        action='store_true',
        help='Validate system components and exit'
    )
    
    # Logging
    parser.add_argument(
        '--log-level',
        choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'],
        default='INFO',
        help='Set logging level (default: INFO)'
    )
    
    parser.add_argument(
        '--version',
        action='version',
        version='Data Preprocessing Pipeline v1.0.0'
    )
    
    return parser.parse_args()


def parse_layer_selections(layer_strings: List[str]) -> List[List[int]]:
    """
    Parse layer selection strings into method ID lists.
    
    Args:
        layer_strings (List[str]): List of comma-separated method ID strings
        
    Returns:
        List[List[int]]: Parsed layer selections
        
    Raises:
        ValueError: If parsing fails
    """
    layer_selections = []
    
    for i, layer_str in enumerate(layer_strings):
        try:
            # Parse comma-separated method IDs
            method_ids = []
            for method_str in layer_str.split(','):
                method_id = int(method_str.strip())
                if method_id < 0:
                    raise ValueError(f"Method ID must be non-negative: {method_id}")
                method_ids.append(method_id)
            
            if not method_ids:
                raise ValueError(f"Layer {i+1} has no methods specified")
            
            layer_selections.append(method_ids)
            
        except ValueError as e:
            raise ValueError(f"Invalid layer {i+1} specification '{layer_str}': {e}")
    
    return layer_selections


def main() -> int:
    """
    Main entry point for the application.
    
    Returns:
        int: Exit code (0 for success, 1 for error)
    """
    try:
        # Parse command-line arguments
        args = parse_arguments()
        
        # Initialize the main application
        app = DataPreprocessingPipeline(log_level=args.log_level)
        
        try:
            # Handle utility commands
            if args.list_methods:
                app.list_available_methods()
                return 0
            
            if args.validate:
                return 0 if app.validate_system() else 1
            
            # Determine mode
            if args.batch:
                # Validate batch mode arguments
                if not args.input:
                    print("ERROR: --input is required for batch mode")
                    return 1
                
                if not args.output:
                    print("ERROR: --output is required for batch mode")
                    return 1
                
                if not args.layers:
                    print("ERROR: --layers is required for batch mode")
                    return 1
                
                # Parse layer selections
                try:
                    layer_selections = parse_layer_selections(args.layers)
                except ValueError as e:
                    print(f"ERROR: {e}")
                    return 1
                
                # Run batch mode
                return app.run_batch_mode(args.input, args.output, layer_selections)
            
            else:
                # Run interactive mode (default)
                return app.run_interactive_mode()
        
        finally:
            # Always cleanup
            app.cleanup()
    
    except KeyboardInterrupt:
        print("\nApplication interrupted by user.")
        return 0
    
    except Exception as e:
        print(f"Critical error: {e}")
        return 1


if __name__ == "__main__":
    sys.exit(main())