"""
Data loading utilities for Excel file processing.

This module provides the DataLoader class for loading Excel files into pandas DataFrames,
validating data structure, and providing data preview functionality.
"""

import pandas as pd
import numpy as np
from typing import Optional, Tuple, List
import logging

from .exceptions import FileOperationError, DataValidationError
from .validation import InputValidator, DataQualityChecker
from .error_recovery import handle_file_operations, handle_data_operations, ErrorRecoveryManager


class DataLoader:
    """
    Handles Excel file loading and data validation operations.
    
    This class provides methods for loading Excel files into pandas DataFrames,
    validating the expected data structure, and providing data preview capabilities.
    """
    
    def __init__(self, error_recovery_manager: Optional[ErrorRecoveryManager] = None):
        """
        Initialize the DataLoader.
        
        Args:
            error_recovery_manager: Optional error recovery manager for handling errors
        """
        self.logger = logging.getLogger(__name__)
        self.error_recovery_manager = error_recovery_manager or ErrorRecoveryManager(self.logger)
    
    @handle_file_operations()
    def load_excel(self, file_path: str) -> pd.DataFrame:
        """
        Load Excel file into DataFrame with comprehensive error handling.
        
        Args:
            file_path (str): Path to the Excel file to load
            
        Returns:
            pd.DataFrame: Loaded DataFrame
            
        Raises:
            FileOperationError: If file loading fails
            DataValidationError: If loaded data is invalid
        """
        try:
            # Validate file path first
            validator = InputValidator()
            validator.validate_file_path(file_path, must_exist=True)
            
            # Try to load the Excel file
            df = pd.read_excel(file_path)
            
            # Validate basic DataFrame structure
            validator.validate_dataframe(df, allow_empty=False)
            
            # Log successful loading
            self.logger.info(f"Successfully loaded Excel file '{file_path}' with {len(df)} rows and {len(df.columns)} columns.")
            
            return df
            
        except FileNotFoundError:
            raise FileOperationError(
                "Excel file not found",
                file_path=file_path,
                operation="load"
            )
        except PermissionError:
            raise FileOperationError(
                "Permission denied accessing file",
                file_path=file_path,
                operation="load",
                details="Check file permissions"
            )
        except pd.errors.EmptyDataError:
            raise DataValidationError(
                "Excel file is empty or contains no data",
                validation_type="file_content",
                expected_format="Non-empty Excel file",
                actual_format="Empty file"
            )
        except Exception as e:
            # Handle Excel-specific errors
            if "Excel" in str(e) or "xlrd" in str(e) or "openpyxl" in str(e):
                raise FileOperationError(
                    "Invalid Excel file format",
                    file_path=file_path,
                    operation="load",
                    details=str(e)
                )
            # Re-raise other exceptions
            raise FileOperationError(
                "Unexpected error loading Excel file",
                file_path=file_path,
                operation="load",
                details=str(e)
            )
        except (DataValidationError, FileOperationError):
            # Re-raise our custom exceptions
            raise
        except Exception as e:
            raise FileOperationError(
                "Unexpected error loading Excel file",
                file_path=file_path,
                operation="load",
                details=str(e)
            )
    
    @handle_data_operations()
    def validate_data_structure(self, data: pd.DataFrame) -> bool:
        """
        Validate that the DataFrame has the expected structure for preprocessing.
        
        Expected structure:
        - First column: Classification IDs or labels (can be any type)
        - Remaining columns: Numerical variable values
        
        Args:
            data (pd.DataFrame): DataFrame to validate
            
        Returns:
            bool: True if validation passes
            
        Raises:
            DataValidationError: If validation fails
        """
        try:
            # Use comprehensive validation from InputValidator
            validator = InputValidator()
            validator.validate_excel_data_structure(data)
            
            self.logger.info(f"Data structure validation passed: {len(data)} rows, {len(data.columns)} columns.")
            
            return True
            
        except DataValidationError:
            # Re-raise validation errors
            raise
        except Exception as e:
            raise DataValidationError(
                "Unexpected error during data structure validation",
                validation_type="structure",
                details=str(e)
            )
    
    def get_data_preview(self, data: pd.DataFrame, max_rows: int = 5) -> str:
        """
        Generate a formatted preview of the data structure.
        
        Args:
            data (pd.DataFrame): DataFrame to preview
            max_rows (int): Maximum number of rows to include in preview
            
        Returns:
            str: Formatted data preview string
        """
        try:
            preview_lines = []
            
            # Basic info
            preview_lines.append("=== DATA PREVIEW ===")
            preview_lines.append(f"Shape: {data.shape[0]} rows × {data.shape[1]} columns")
            preview_lines.append("")
            
            # Column information
            columns = data.columns.tolist()
            if columns:
                id_column = columns[0]
                numeric_columns = columns[1:]
                
                preview_lines.append("Column Structure:")
                preview_lines.append(f"  ID Column: '{id_column}' ({data[id_column].dtype})")
                preview_lines.append(f"  Numeric Columns: {len(numeric_columns)}")
                for i, col in enumerate(numeric_columns[:5]):  # Show first 5 numeric columns
                    preview_lines.append(f"    {i+1}. '{col}' ({data[col].dtype})")
                if len(numeric_columns) > 5:
                    preview_lines.append(f"    ... and {len(numeric_columns) - 5} more columns")
                preview_lines.append("")
            
            # Sample data
            preview_lines.append("Sample Data:")
            sample_data = data.head(max_rows)
            preview_lines.append(sample_data.to_string(max_cols=6, max_colwidth=15))
            
            if len(data) > max_rows:
                preview_lines.append(f"... and {len(data) - max_rows} more rows")
            
            # Data quality info
            preview_lines.append("")
            preview_lines.append("Data Quality:")
            missing_data = data.isnull().sum()
            if missing_data.any():
                preview_lines.append("  Missing values found:")
                for col, missing_count in missing_data[missing_data > 0].items():
                    percentage = (missing_count / len(data)) * 100
                    preview_lines.append(f"    '{col}': {missing_count} ({percentage:.1f}%)")
            else:
                preview_lines.append("  No missing values detected")
            
            return "\n".join(preview_lines)
            
        except Exception as e:
            return f"Error generating data preview: {str(e)}"
    
    def load_and_validate(self, file_path: str) -> Tuple[Optional[pd.DataFrame], bool, str]:
        """
        Load Excel file and validate its structure in one operation.
        
        Args:
            file_path (str): Path to the Excel file
            
        Returns:
            Tuple[Optional[pd.DataFrame], bool, str]: (dataframe, is_valid, message)
        """
        # Load the data
        data = self.load_excel(file_path)
        if data is None:
            return None, False, "Failed to load Excel file."
        
        # Validate the structure
        is_valid, error_message = self.validate_data_structure(data)
        if not is_valid:
            return data, False, error_message
        
        return data, True, "Data loaded and validated successfully."
    
    def analyze_data_quality(self, data: pd.DataFrame) -> dict:
        """
        Perform comprehensive data quality analysis.
        
        Args:
            data (pd.DataFrame): DataFrame to analyze
            
        Returns:
            dict: Data quality analysis results
        """
        try:
            return DataQualityChecker.analyze_data_quality(data)
        except Exception as e:
            self.logger.error(f"Error during data quality analysis: {e}")
            return {"error": f"Data quality analysis failed: {str(e)}"}
    
    def check_data_consistency(self, data: pd.DataFrame) -> List[str]:
        """
        Check for data consistency issues.
        
        Args:
            data (pd.DataFrame): DataFrame to check
            
        Returns:
            List[str]: List of consistency issues found
        """
        try:
            return DataQualityChecker.check_data_consistency(data)
        except Exception as e:
            self.logger.error(f"Error during data consistency check: {e}")
            return [f"Data consistency check failed: {str(e)}"]