"""
Hybrid PETSc-Kokkos Validator Implementation.

This module provides the HybridValidator class for validating solver execution
results, extracting performance metrics, and verifying convergence.
"""

import re
import logging
import math
from typing import Dict, List, Optional, Any, Tuple
from pathlib import Path

from gcr_solver_manager.extensions.base_validator import (
    ValidatorInterface, ValidationConfig, ValidationResult, ValidationStatus,
    ConvergenceStatus, ComparisonResult
)
from gcr_solver_manager.extensions.base_runner import RunResult


class HybridValidator(ValidatorInterface):
    """Validator for Hybrid PETSc-Kokkos framework results."""
    
    def __init__(self):
        """Initialize the hybrid validator."""
        self.logger = logging.getLogger(__name__)
        
        # Define patterns for parsing solver output
        self.patterns = {
            'iteration': re.compile(r'Iteration\s*[:\s]*(\d+)', re.IGNORECASE),
            'residual': re.compile(r'Residual\s*[:\s]*([\d\.eE\-\+]+)', re.IGNORECASE),
            'final_residual': re.compile(r'Final\s*residual\s*[:\s]*([\d\.eE\-\+]+)', re.IGNORECASE),
            'convergence': re.compile(r'(converged|diverged|stagnated)', re.IGNORECASE),
            'solver_time': re.compile(r'Solver\s*time\s*[:\s]*([\d\.eE\-\+]+)\s*seconds?', re.IGNORECASE),
            'total_time': re.compile(r'Total\s*time\s*[:\s]*([\d\.eE\-\+]+)\s*seconds?', re.IGNORECASE),
            'memory_usage': re.compile(r'Memory\s*usage\s*[:\s]*([\d\.eE\-\+]+)\s*(MB|GB)', re.IGNORECASE),
            'flop_rate': re.compile(r'FLOP\s*rate\s*[:\s]*([\d\.eE\-\+]+)\s*(MFLOPS|GFLOPS)', re.IGNORECASE),
            'parallel_efficiency': re.compile(r'Parallel\s*efficiency\s*[:\s]*([\d\.eE\-\+]+)', re.IGNORECASE)
        }
        
        # Define solver-specific validation criteria
        self.solver_criteria = {
            'gcr': {
                'typical_convergence_rate': 0.1,  # Residual reduction per iteration
                'max_reasonable_iterations': 500,
                'memory_efficiency_threshold': 0.8
            },
            'ca-gcr': {
                'typical_convergence_rate': 0.1,
                'max_reasonable_iterations': 200,  # CA methods should converge faster
                'communication_reduction_expected': True
            },
            'gmres': {
                'typical_convergence_rate': 0.05,
                'max_reasonable_iterations': 1000,
                'restart_dependency': True
            },
            'bca-gmres': {
                'typical_convergence_rate': 0.05, 
                'max_reasonable_iterations': 400,
                'block_efficiency_expected': True
            }
        }
    
    def validate_result(self, run_result: RunResult, config: ValidationConfig) -> ValidationResult:
        """Validate solver execution results."""
        self.logger.info("Validating hybrid solver results...")
        
        # Initialize validation result
        result = ValidationResult(
            status=ValidationStatus.INVALID,
            valid=False
        )
        
        try:
            # Check basic execution success
            if not run_result.success:
                result.errors.append(f"Solver execution failed: {run_result.error_message}")
                result.status = ValidationStatus.ERROR
                return result
            
            # Extract metrics from output
            extracted_metrics = self.extract_metrics(run_result)
            result.performance_metrics.update(extracted_metrics)
            
            # Validate convergence
            if config.check_convergence:
                conv_status, conv_analysis = self._validate_convergence(run_result, config)
                result.convergence_status = conv_status
                result.convergence_achieved = (conv_status == ConvergenceStatus.CONVERGED)
                result.detailed_analysis['convergence'] = conv_analysis
                
                if 'final_residual' in conv_analysis:
                    result.final_residual = conv_analysis['final_residual']
                if 'iterations' in conv_analysis:
                    result.iterations_used = conv_analysis['iterations']
                if 'residual_history' in conv_analysis:
                    result.residual_history = conv_analysis['residual_history']
            
            # Validate accuracy
            if config.check_accuracy:
                accuracy_analysis = self._validate_accuracy(run_result, config)
                result.accuracy_metrics.update(accuracy_analysis)
                result.detailed_analysis['accuracy'] = accuracy_analysis
            
            # Validate performance
            if config.check_performance:
                perf_analysis = self._validate_performance(run_result, config)
                result.detailed_analysis['performance'] = perf_analysis
                
                # Check performance thresholds
                if config.max_execution_time and result.execution_time:
                    if result.execution_time > config.max_execution_time:
                        result.warnings.append(
                            f"Execution time ({result.execution_time:.2f}s) exceeds threshold "
                            f"({config.max_execution_time:.2f}s)"
                        )
            
            # Calculate overall validation score
            result.score = self._calculate_validation_score(result, config)
            
            # Determine overall status
            if result.errors:
                result.status = ValidationStatus.ERROR
                result.valid = False
            elif not result.convergence_achieved and config.check_convergence:
                result.status = ValidationStatus.INVALID
                result.valid = False
                result.errors.append("Solver did not converge")
            elif result.warnings:
                result.status = ValidationStatus.WARNING
                result.valid = True
            else:
                result.status = ValidationStatus.VALID
                result.valid = True
            
            self.logger.info(f"Validation completed: {result.status.value} (score: {result.score:.1f})")
            
        except Exception as e:
            result.status = ValidationStatus.ERROR
            result.valid = False
            result.errors.append(f"Validation error: {str(e)}")
            self.logger.error(f"Validation failed: {e}", exc_info=True)
        
        return result
    
    def extract_metrics(self, run_result: RunResult) -> Dict[str, Any]:
        """Extract performance and convergence metrics."""
        self.logger.debug("Extracting metrics from solver output...")
        
        metrics = {}
        output = f"{run_result.stdout}\n{run_result.stderr}"
        
        # Basic execution metrics
        metrics['execution_time'] = run_result.duration
        metrics['exit_code'] = run_result.exit_code
        metrics['success'] = run_result.success
        
        # Extract solver-specific metrics using patterns
        for metric_name, pattern in self.patterns.items():
            matches = pattern.findall(output)
            if matches:
                try:
                    if metric_name in ['iteration']:
                        metrics[metric_name] = int(matches[-1])  # Take last match
                    elif metric_name in ['residual', 'final_residual', 'solver_time', 'total_time', 'parallel_efficiency']:
                        metrics[metric_name] = float(matches[-1])
                    elif metric_name in ['memory_usage']:
                        value, unit = matches[-1]
                        memory_mb = float(value)
                        if unit.upper() == 'GB':
                            memory_mb *= 1024
                        metrics['memory_usage_mb'] = memory_mb
                    elif metric_name in ['flop_rate']:
                        value, unit = matches[-1]
                        flop_rate = float(value)
                        if unit.upper() == 'GFLOPS':
                            flop_rate *= 1000
                        metrics['flop_rate_mflops'] = flop_rate
                except (ValueError, IndexError):
                    self.logger.debug(f"Failed to parse {metric_name}: {matches}")
        
        # Extract residual history
        residual_history = self._extract_residual_history(output)
        if residual_history:
            metrics['residual_history'] = residual_history
            metrics['initial_residual'] = residual_history[0]
            metrics['convergence_factor'] = self._calculate_convergence_factor(residual_history)
        
        # Extract solver-specific metrics
        solver_type = self._detect_solver_type(output)
        if solver_type:
            metrics['solver_type'] = solver_type
            solver_metrics = self._extract_solver_specific_metrics(output, solver_type)
            metrics.update(solver_metrics)
        
        # Calculate derived metrics
        derived_metrics = self._calculate_derived_metrics(metrics)
        metrics.update(derived_metrics)
        
        return metrics
    
    def _validate_convergence(self, run_result: RunResult, config: ValidationConfig) -> Tuple[ConvergenceStatus, Dict[str, Any]]:
        """Validate solver convergence behavior."""
        output = f"{run_result.stdout}\n{run_result.stderr}"
        
        # Extract convergence information
        analysis = {
            'method': 'output_parsing',
            'tolerance_used': config.convergence_tolerance
        }
        
        # Check for explicit convergence messages
        conv_pattern = self.patterns['convergence']
        conv_matches = conv_pattern.findall(output)
        if conv_matches:
            last_status = conv_matches[-1].lower()
            analysis['explicit_status'] = last_status
            
            if last_status == 'converged':
                status = ConvergenceStatus.CONVERGED
            elif last_status == 'diverged':
                status = ConvergenceStatus.DIVERGED
            elif last_status == 'stagnated':
                status = ConvergenceStatus.STAGNATED
            else:
                status = ConvergenceStatus.UNKNOWN
        else:
            # Analyze residual history if available
            residual_history = self._extract_residual_history(output)
            if residual_history:
                status, residual_analysis = self.validate_convergence(
                    residual_history, config.convergence_tolerance, config.max_iterations_allowed
                )
                analysis.update(residual_analysis)
            else:
                # Check final residual against tolerance
                final_residual = None
                residual_matches = self.patterns['final_residual'].findall(output) or \
                                self.patterns['residual'].findall(output)
                if residual_matches:
                    try:
                        final_residual = float(residual_matches[-1])
                        analysis['final_residual'] = final_residual
                        
                        if final_residual <= config.convergence_tolerance:
                            status = ConvergenceStatus.CONVERGED
                        else:
                            status = ConvergenceStatus.DIVERGED
                    except ValueError:
                        status = ConvergenceStatus.UNKNOWN
                else:
                    status = ConvergenceStatus.UNKNOWN
        
        return status, analysis
    
    def _validate_accuracy(self, run_result: RunResult, config: ValidationConfig) -> Dict[str, Any]:
        """Validate solution accuracy."""
        accuracy_metrics = {}
        
        # Extract norm information
        output = f"{run_result.stdout}\n{run_result.stderr}"
        
        # Look for solution norms
        norm_patterns = [
            re.compile(r'L2\s*norm\s*[:\s]*([\d\.eE\-\+]+)', re.IGNORECASE),
            re.compile(r'Infinity\s*norm\s*[:\s]*([\d\.eE\-\+]+)', re.IGNORECASE),
            re.compile(r'Solution\s*norm\s*[:\s]*([\d\.eE\-\+]+)', re.IGNORECASE)
        ]
        
        for i, pattern in enumerate(norm_patterns):
            matches = pattern.findall(output)
            if matches:
                try:
                    norm_names = ['l2_norm', 'inf_norm', 'solution_norm']
                    accuracy_metrics[norm_names[i]] = float(matches[-1])
                except ValueError:
                    pass
        
        # Check against reference solution if provided
        if config.reference_solution and config.reference_solution.exists():
            reference_accuracy = self._compare_with_reference(run_result, config.reference_solution)
            accuracy_metrics.update(reference_accuracy)
        
        # Validate numerical stability
        stability_analysis = self._check_numerical_stability(output)
        accuracy_metrics.update(stability_analysis)
        
        return accuracy_metrics
    
    def _validate_performance(self, run_result: RunResult, config: ValidationConfig) -> Dict[str, Any]:
        """Validate performance characteristics."""
        perf_analysis = {}
        
        # Basic timing analysis
        perf_analysis['total_execution_time'] = run_result.duration
        
        # Extract solver timing if available
        solver_time = None
        for metric_name in ['solver_time', 'total_time']:
            if metric_name in run_result.metrics:
                solver_time = run_result.metrics[metric_name]
                break
        
        if solver_time:
            perf_analysis['solver_time'] = solver_time
            perf_analysis['overhead_time'] = run_result.duration - solver_time
            perf_analysis['solver_efficiency'] = solver_time / run_result.duration
        
        # Memory usage analysis
        if 'memory_usage_mb' in run_result.metrics:
            memory_mb = run_result.metrics['memory_usage_mb']
            perf_analysis['memory_usage_mb'] = memory_mb
            
            # Compare with theoretical requirements if available
            if 'problem_size' in run_result.metrics:
                theoretical_memory = self._estimate_memory_requirements(run_result.metrics)
                if theoretical_memory:
                    perf_analysis['memory_efficiency'] = theoretical_memory / memory_mb
        
        # Parallel efficiency analysis
        if 'parallel_efficiency' in run_result.metrics:
            perf_analysis['parallel_efficiency'] = run_result.metrics['parallel_efficiency']
        
        # FLOP rate analysis
        if 'flop_rate_mflops' in run_result.metrics:
            perf_analysis['flop_rate_mflops'] = run_result.metrics['flop_rate_mflops']
        
        return perf_analysis
    
    def _extract_residual_history(self, output: str) -> List[float]:
        """Extract residual history from solver output."""
        residuals = []
        
        # Pattern for iteration lines with residuals
        iter_patterns = [
            re.compile(r'(?:Iteration|Iter)\s*(\d+).*?(?:Residual|Res).*?([\d\.eE\-\+]+)', re.IGNORECASE),
            re.compile(r'(\d+)\s+([\d\.eE\-\+]+)', re.MULTILINE),  # Simple numeric format
            re.compile(r'KSP Residual norm\s+([\d\.eE\-\+]+)', re.IGNORECASE)  # PETSc format
        ]
        
        for pattern in iter_patterns:
            matches = pattern.findall(output)
            if matches:
                try:
                    if len(matches[0]) == 2:  # (iteration, residual) pairs
                        residuals = [float(match[1]) for match in matches]
                    else:  # Single residual values
                        residuals = [float(match) for match in matches]
                    break
                except (ValueError, IndexError):
                    continue
        
        return residuals
    
    def _calculate_convergence_factor(self, residual_history: List[float]) -> Optional[float]:
        """Calculate average convergence factor from residual history."""
        if len(residual_history) < 2:
            return None
        
        try:
            # Calculate geometric mean of reduction factors
            factors = []
            for i in range(1, len(residual_history)):
                if residual_history[i] > 0 and residual_history[i-1] > 0:
                    factor = residual_history[i] / residual_history[i-1]
                    factors.append(factor)
            
            if factors:
                # Geometric mean
                product = 1.0
                for factor in factors:
                    product *= factor
                return product ** (1.0 / len(factors))
        
        except (ValueError, ZeroDivisionError):
            pass
        
        return None
    
    def _detect_solver_type(self, output: str) -> Optional[str]:
        """Detect solver type from output."""
        solver_patterns = {
            'gcr': re.compile(r'GCR|Generalized Conjugate Residual', re.IGNORECASE),
            'ca-gcr': re.compile(r'CA-GCR|Communication.Avoiding.GCR', re.IGNORECASE),
            'gmres': re.compile(r'GMRES|Generalized Minimal Residual', re.IGNORECASE),
            'bca-gmres': re.compile(r'BCA-GMRES|Block.Communication.Avoiding.GMRES', re.IGNORECASE)
        }
        
        for solver_type, pattern in solver_patterns.items():
            if pattern.search(output):
                return solver_type
        
        return None
    
    def _extract_solver_specific_metrics(self, output: str, solver_type: str) -> Dict[str, Any]:
        """Extract solver-specific metrics."""
        metrics = {}
        
        if solver_type in ['ca-gcr', 'bca-gmres']:
            # Communication-avoiding metrics
            comm_pattern = re.compile(r'Communication\s*rounds\s*[:\s]*(\d+)', re.IGNORECASE)
            matches = comm_pattern.findall(output)
            if matches:
                metrics['communication_rounds'] = int(matches[-1])
        
        if solver_type in ['gmres', 'bca-gmres']:
            # Restart information
            restart_pattern = re.compile(r'Restart\s*[:\s]*(\d+)', re.IGNORECASE)
            matches = restart_pattern.findall(output)
            if matches:
                metrics['restart_value'] = int(matches[-1])
        
        if 'bca' in solver_type:
            # Block size information
            block_pattern = re.compile(r'Block\s*size\s*[:\s]*(\d+)', re.IGNORECASE)
            matches = block_pattern.findall(output)
            if matches:
                metrics['block_size'] = int(matches[-1])
        
        return metrics
    
    def _calculate_derived_metrics(self, metrics: Dict[str, Any]) -> Dict[str, Any]:
        """Calculate derived performance metrics."""
        derived = {}
        
        # Convergence rate
        if 'residual_history' in metrics:
            residuals = metrics['residual_history']
            if len(residuals) >= 2:
                initial_residual = residuals[0]
                final_residual = residuals[-1]
                iterations = len(residuals) - 1
                
                if initial_residual > 0 and final_residual > 0:
                    reduction_factor = final_residual / initial_residual
                    if reduction_factor > 0:
                        derived['convergence_rate'] = -math.log10(reduction_factor) / iterations
        
        # Time per iteration
        if 'solver_time' in metrics and 'iteration' in metrics:
            if metrics['iteration'] > 0:
                derived['time_per_iteration'] = metrics['solver_time'] / metrics['iteration']
        
        # Memory per process (if MPI info available)
        if 'memory_usage_mb' in metrics:
            # Would need MPI process count for accurate calculation
            derived['memory_per_process_mb'] = metrics['memory_usage_mb']
        
        return derived
    
    def _calculate_validation_score(self, result: ValidationResult, config: ValidationConfig) -> float:
        """Calculate overall validation score (0-100)."""
        score = 0.0
        max_score = 100.0
        
        # Convergence score (40 points)
        if result.convergence_achieved:
            score += 40.0
        elif result.convergence_status == ConvergenceStatus.STAGNATED:
            score += 20.0  # Partial credit for stagnation
        
        # Accuracy score (30 points)
        if result.accuracy_metrics:
            # Base score for having accuracy metrics
            score += 15.0
            
            # Additional score based on solution quality
            if 'solution_error' in result.accuracy_metrics:
                error = result.accuracy_metrics['solution_error']
                if error < config.solution_accuracy_threshold:
                    score += 15.0
                else:
                    # Partial credit based on error magnitude
                    partial_credit = max(0, 15.0 * (1.0 - math.log10(error / config.solution_accuracy_threshold)))
                    score += min(15.0, partial_credit)
        
        # Performance score (20 points)
        if result.execution_time:
            # Base score for successful execution
            score += 10.0
            
            # Additional score for performance
            if config.max_execution_time and result.execution_time <= config.max_execution_time:
                score += 10.0
            else:
                # Partial credit based on performance
                if config.max_execution_time:
                    ratio = min(1.0, config.max_execution_time / result.execution_time)
                    score += 10.0 * ratio
                else:
                    score += 5.0  # Default partial credit
        
        # Stability score (10 points)
        if not result.errors:
            score += 5.0
        if len(result.warnings) <= 2:  # Few warnings are acceptable
            score += 5.0
        elif len(result.warnings) <= 5:
            score += 2.5
        
        return min(max_score, score)
    
    def _compare_with_reference(self, run_result: RunResult, reference_path: Path) -> Dict[str, Any]:
        """Compare results with reference solution."""
        # Placeholder implementation - would need specific file format handling
        comparison = {
            'reference_available': True,
            'reference_path': str(reference_path)
        }
        
        # This would be implemented based on the specific file format
        # used by the hybrid framework for solution output
        
        return comparison
    
    def _check_numerical_stability(self, output: str) -> Dict[str, Any]:
        """Check for numerical stability issues."""
        stability = {}
        
        # Look for NaN or infinity values
        nan_pattern = re.compile(r'\bnan\b|\binf\b|\binfinity\b', re.IGNORECASE)
        if nan_pattern.search(output):
            stability['numerical_issues'] = True
            stability['issue_type'] = 'NaN or Inf detected'
        else:
            stability['numerical_issues'] = False
        
        return stability
    
    def _estimate_memory_requirements(self, metrics: Dict[str, Any]) -> Optional[float]:
        """Estimate theoretical memory requirements."""
        # Placeholder - would implement based on problem characteristics
        return None
    
    def supports_framework(self, framework_name: str) -> bool:
        """Check if validator supports a specific framework."""
        supported_frameworks = ['hybrid', 'petsc-kokkos', 'hybrid-petsc-kokkos']
        return framework_name.lower() in supported_frameworks