"""
Output management system for the data preprocessing pipeline.

This module handles the creation of output directories, filename generation,
and file organization for processed data files.
"""

import os
import pandas as pd
from datetime import datetime
from typing import Dict, List, Optional
from pathlib import Path
import logging


class OutputManager:
    """
    Manages output file generation and organization for preprocessing results.
    
    This class handles creating organized directory structures, generating
    descriptive filenames, and resolving naming conflicts for output files.
    """
    
    def __init__(self):
        """Initialize the OutputManager."""
        self.base_output_dir = "preprocessing_output"
    
    def create_output_directory(self, base_name: str) -> str:
        """
        Create organized output directory structure.
        
        Creates a timestamped directory structure for organizing output files
        based on the input file name and current timestamp.
        
        Args:
            base_name (str): Base name for the output directory (typically input filename)
            
        Returns:
            str: Path to the created output directory
            
        Requirements: 6.2 - organize files in a structured directory format
        """
        # Remove file extension from base_name if present
        base_name_clean = Path(base_name).stem
        
        # Create timestamp for unique directory naming
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        
        # Create directory structure: preprocessing_output/filename_timestamp/
        output_dir = Path(self.base_output_dir) / f"{base_name_clean}_{timestamp}"
        
        # Create the directory if it doesn't exist
        output_dir.mkdir(parents=True, exist_ok=True)
        
        return str(output_dir)
    
    def generate_filename(self, combination_id: str, method_names: List[str]) -> str:
        """
        Generate descriptive filename based on preprocessing combinations.
        
        Creates a filename that clearly indicates which preprocessing methods
        were applied in the combination.
        
        Args:
            combination_id (str): Unique identifier for the combination
            method_names (List[str]): List of preprocessing method names in order
            
        Returns:
            str: Generated filename with .xlsx extension
            
        Requirements: 6.1 - descriptive name indicating preprocessing combination
        """
        # Join method names with underscores, replacing spaces and special chars
        method_string = "_".join([
            name.replace(" ", "_").replace("-", "_").lower() 
            for name in method_names
        ])
        
        # Limit filename length to avoid filesystem issues
        if len(method_string) > 100:
            method_string = method_string[:100]
        
        # Create filename: combination_id_method1_method2_method3.xlsx
        filename = f"{combination_id}_{method_string}.xlsx"
        
        return filename
    
    def resolve_filename_conflict(self, directory: str, filename: str) -> str:
        """
        Resolve file naming conflicts by appending unique identifiers.
        
        If a file with the same name already exists, appends a counter
        to create a unique filename.
        
        Args:
            directory (str): Directory where the file will be saved
            filename (str): Original filename
            
        Returns:
            str: Unique filename that doesn't conflict with existing files
            
        Requirements: 6.3 - append unique identifier to prevent overwrites
        """
        file_path = Path(directory) / filename
        
        # If file doesn't exist, return original filename
        if not file_path.exists():
            return filename
        
        # Extract name and extension
        name_part = file_path.stem
        extension = file_path.suffix
        
        # Find a unique filename by appending counter
        counter = 1
        while True:
            new_filename = f"{name_part}_{counter:03d}{extension}"
            new_file_path = Path(directory) / new_filename
            
            if not new_file_path.exists():
                return new_filename
            
            counter += 1
            
            # Safety check to prevent infinite loop
            if counter > 9999:
                # Use timestamp as fallback
                timestamp = datetime.now().strftime("%H%M%S%f")
                return f"{name_part}_{timestamp}{extension}"
    
    def get_full_output_path(self, directory: str, combination_id: str, method_names: List[str]) -> str:
        """
        Get the full output file path with conflict resolution.
        
        Combines directory creation, filename generation, and conflict resolution
        to provide a complete file path for saving output.
        
        Args:
            directory (str): Output directory path
            combination_id (str): Unique identifier for the combination
            method_names (List[str]): List of preprocessing method names
            
        Returns:
            str: Complete file path for the output file
        """
        # Generate base filename
        filename = self.generate_filename(combination_id, method_names)
        
        # Resolve any naming conflicts
        unique_filename = self.resolve_filename_conflict(directory, filename)
        
        # Return full path
        return str(Path(directory) / unique_filename)
    
    def save_excel(self, data: pd.DataFrame, file_path: str) -> bool:
        """
        Save DataFrame to Excel file with error handling and validation.
        
        Converts a pandas DataFrame to Excel format and saves it to the specified
        file path with comprehensive error handling and validation.
        
        Args:
            data (pd.DataFrame): DataFrame to save
            file_path (str): Full path where the Excel file should be saved
            
        Returns:
            bool: True if save was successful, False otherwise
            
        Requirements: 4.5, 6.4 - DataFrame to Excel conversion with error handling
        """
        try:
            # Validate input data
            if data is None or data.empty:
                logging.error(f"Cannot save empty or None DataFrame to {file_path}")
                return False
            
            # Ensure directory exists
            directory = Path(file_path).parent
            directory.mkdir(parents=True, exist_ok=True)
            
            # Save DataFrame to Excel with formatting
            with pd.ExcelWriter(file_path, engine='openpyxl') as writer:
                data.to_excel(writer, sheet_name='Processed_Data', index=False)
                
                # Get the workbook and worksheet for formatting
                workbook = writer.book
                worksheet = writer.sheets['Processed_Data']
                
                # Auto-adjust column widths for better readability
                for column in worksheet.columns:
                    max_length = 0
                    column_letter = column[0].column_letter
                    
                    for cell in column:
                        try:
                            if len(str(cell.value)) > max_length:
                                max_length = len(str(cell.value))
                        except:
                            pass
                    
                    # Set column width with reasonable limits
                    adjusted_width = min(max_length + 2, 50)
                    worksheet.column_dimensions[column_letter].width = adjusted_width
            
            # Verify file was created successfully
            if not self.verify_excel_file(file_path):
                logging.error(f"File verification failed for {file_path}")
                return False
            
            logging.info(f"Successfully saved Excel file: {file_path}")
            return True
            
        except PermissionError as e:
            logging.error(f"Permission denied when saving to {file_path}: {e}")
            return False
        except pd.errors.ExcelWriterError as e:
            logging.error(f"Excel writer error when saving to {file_path}: {e}")
            return False
        except Exception as e:
            logging.error(f"Unexpected error when saving to {file_path}: {e}")
            return False
    
    def verify_excel_file(self, file_path: str) -> bool:
        """
        Verify Excel file integrity and basic structure.
        
        Performs basic integrity checks on a saved Excel file to ensure
        it was written correctly and can be read back.
        
        Args:
            file_path (str): Path to the Excel file to verify
            
        Returns:
            bool: True if file passes verification, False otherwise
            
        Requirements: 6.4 - output file verification and integrity checks
        """
        try:
            # Check if file exists
            if not Path(file_path).exists():
                logging.error(f"File does not exist: {file_path}")
                return False
            
            # Check file size (should be > 0)
            file_size = Path(file_path).stat().st_size
            if file_size == 0:
                logging.error(f"File is empty: {file_path}")
                return False
            
            # Try to read the file back to verify it's valid Excel
            try:
                test_df = pd.read_excel(file_path, sheet_name='Processed_Data')
                
                # Basic structure validation
                if test_df is None:
                    logging.error(f"Could not read data from {file_path}")
                    return False
                
                # Check if we have at least some data
                if len(test_df.columns) == 0:
                    logging.error(f"No columns found in {file_path}")
                    return False
                
                logging.debug(f"File verification passed for {file_path}: {len(test_df)} rows, {len(test_df.columns)} columns")
                return True
                
            except Exception as read_error:
                logging.error(f"Could not read Excel file {file_path}: {read_error}")
                return False
            
        except Exception as e:
            logging.error(f"Error during file verification for {file_path}: {e}")
            return False
    
    def generate_summary_report(self, results: Dict[str, bool]) -> str:
        """
        Generate processing summary report.
        
        Creates a comprehensive summary report of all processing results,
        including successful and failed combinations.
        
        Args:
            results (Dict[str, bool]): Dictionary mapping file paths to success status
            
        Returns:
            str: Formatted summary report
            
        Requirements: 6.4 - summary report of all generated files
        """
        total_files = len(results)
        successful_files = sum(1 for success in results.values() if success)
        failed_files = total_files - successful_files
        
        # Calculate success rate
        success_rate = (successful_files / total_files * 100) if total_files > 0 else 0
        
        # Build report
        report_lines = [
            "=" * 60,
            "PREPROCESSING PIPELINE SUMMARY REPORT",
            "=" * 60,
            f"Total combinations processed: {total_files}",
            f"Successful outputs: {successful_files}",
            f"Failed outputs: {failed_files}",
            f"Success rate: {success_rate:.1f}%",
            "",
            "SUCCESSFUL FILES:",
            "-" * 20
        ]
        
        # Add successful files
        successful_count = 0
        for file_path, success in results.items():
            if success:
                successful_count += 1
                report_lines.append(f"{successful_count:3d}. {file_path}")
        
        if successful_count == 0:
            report_lines.append("    No successful files generated.")
        
        # Add failed files if any
        if failed_files > 0:
            report_lines.extend([
                "",
                "FAILED FILES:",
                "-" * 20
            ])
            
            failed_count = 0
            for file_path, success in results.items():
                if not success:
                    failed_count += 1
                    report_lines.append(f"{failed_count:3d}. {file_path}")
        
        report_lines.extend([
            "",
            "=" * 60,
            f"Report generated at: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
            "=" * 60
        ])
        
        return "\n".join(report_lines)