"""
Base Validator Interface

Defines the abstract interface for validating solver results.
"""

from abc import ABC, abstractmethod
from typing import Dict, List, Optional, Any, Union
from pathlib import Path


class ValidationResult:
    """Result of a validation operation."""
    
    def __init__(self, success: bool, score: Optional[float] = None, 
                 message: str = "", details: Optional[Dict[str, Any]] = None):
        self.success = success
        self.score = score
        self.message = message
        self.details = details or {}
        
    def __bool__(self) -> bool:
        return self.success


class BaseValidator(ABC):
    """
    Abstract base class for framework validators.
    
    Validators check solver results for correctness and performance.
    """
    
    def __init__(self, extension_name: str):
        """
        Initialize the validator.
        
        Args:
            extension_name: Name of the extension this validator belongs to
        """
        self.extension_name = extension_name
        
    @abstractmethod
    def can_validate(self, result_path: Path) -> bool:
        """
        Check if this validator can validate the given results.
        
        Args:
            result_path: Path to result file or directory
            
        Returns:
            True if validator can handle these results
        """
        pass
    
    @abstractmethod
    def validate_correctness(self, result_path: Path, 
                           reference_path: Optional[Path] = None,
                           tolerance: float = 1e-6) -> ValidationResult:
        """
        Validate the correctness of solver results.
        
        Args:
            result_path: Path to solver results
            reference_path: Path to reference solution (if available)
            tolerance: Numerical tolerance for comparisons
            
        Returns:
            ValidationResult indicating correctness
        """
        pass
    
    @abstractmethod
    def validate_convergence(self, result_path: Path,
                           expected_iterations: Optional[int] = None) -> ValidationResult:
        """
        Validate solver convergence behavior.
        
        Args:
            result_path: Path to solver results
            expected_iterations: Expected number of iterations
            
        Returns:
            ValidationResult indicating convergence quality
        """
        pass
    
    def validate_performance(self, result_path: Path,
                           benchmark_path: Optional[Path] = None) -> ValidationResult:
        """
        Validate solver performance metrics.
        
        Args:
            result_path: Path to solver results
            benchmark_path: Path to benchmark results (if available)
            
        Returns:
            ValidationResult indicating performance quality
        """
        return ValidationResult(success=True, message="Performance validation not implemented")
    
    def extract_metrics(self, result_path: Path) -> Dict[str, Any]:
        """
        Extract performance and quality metrics from results.
        
        Args:
            result_path: Path to solver results
            
        Returns:
            Dictionary of extracted metrics
        """
        return {}
    
    def get_validation_info(self, result_path: Path) -> Dict[str, Any]:
        """
        Get information about validation capabilities for these results.
        
        Args:
            result_path: Path to result file or directory
            
        Returns:
            Dictionary with validation information
        """
        return {
            'validator': self.__class__.__name__,
            'extension': self.extension_name,
            'result_path': str(result_path),
        }
    
    def validate_format(self, result_path: Path) -> ValidationResult:
        """
        Validate that results are in the expected format.
        
        Args:
            result_path: Path to solver results
            
        Returns:
            ValidationResult indicating format validity
        """
        if not result_path.exists():
            return ValidationResult(success=False, message=f"Result path does not exist: {result_path}")
        
        return ValidationResult(success=True, message="Format validation passed")
    
    def get_default_tolerance(self) -> float:
        """
        Get default numerical tolerance for comparisons.
        
        Returns:
            Default tolerance value
        """
        return 1e-6
    
    def compare_solutions(self, solution1_path: Path, solution2_path: Path,
                         tolerance: Optional[float] = None) -> ValidationResult:
        """
        Compare two solution files.
        
        Args:
            solution1_path: Path to first solution
            solution2_path: Path to second solution  
            tolerance: Numerical tolerance for comparison
            
        Returns:
            ValidationResult indicating similarity
        """
        if tolerance is None:
            tolerance = self.get_default_tolerance()
            
        return ValidationResult(success=True, message="Solution comparison not implemented")
    
    def generate_report(self, result_path: Path, 
                       validation_results: List[ValidationResult]) -> str:
        """
        Generate a validation report.
        
        Args:
            result_path: Path to solver results
            validation_results: List of validation results
            
        Returns:
            Formatted validation report
        """
        report_lines = [
            f"Validation Report for {result_path}",
            f"Validator: {self.__class__.__name__}",
            f"Extension: {self.extension_name}",
            "-" * 50,
        ]
        
        for i, result in enumerate(validation_results, 1):
            status = "PASS" if result.success else "FAIL"
            report_lines.append(f"{i}. {status}: {result.message}")
            if result.score is not None:
                report_lines.append(f"   Score: {result.score}")
        
        return "\n".join(report_lines)
    
    def supports_format(self, format_name: str) -> bool:
        """
        Check if validator supports a specific result format.
        
        Args:
            format_name: Name of the format (e.g., 'hdf5', 'csv', 'json')
            
        Returns:
            True if format is supported
        """
        return False
    
    def get_supported_formats(self) -> List[str]:
        """
        Get list of supported result formats.
        
        Returns:
            List of supported format names
        """
        return []
    
    def __str__(self) -> str:
        """String representation of the validator."""
        return f"{self.__class__.__name__}({self.extension_name})"
        
    def __repr__(self) -> str:
        """Detailed string representation."""
        return f"<{self.__class__.__name__}(extension='{self.extension_name}')>"