"""
PETSc-Only Validator Implementation.

This validator checks PETSc solver results for convergence and accuracy.
"""

import re
import logging
from typing import Dict, List, Optional, Any

from gcr_solver_manager.extensions.base_validator import (
    ValidatorInterface, ValidationConfig, ValidationResult, ComparisonResult
)
from gcr_solver_manager.extensions.base_runner import RunResult

logger = logging.getLogger(__name__)


class PETScOnlyValidator(ValidatorInterface):
    """Validator for PETSc-Only framework."""
    
    def validate_result(self, run_result: RunResult, config: ValidationConfig) -> ValidationResult:
        """Validate PETSc solver execution results."""
        warnings = []
        errors = []
        
        try:
            # Check if solver ran successfully
            if not run_result.success:
                errors.append(f"PETSc solver execution failed with exit code {run_result.exit_code}")
                return ValidationResult(
                    valid=False,
                    convergence_achieved=False,
                    accuracy_metrics={},
                    performance_metrics={},
                    warnings=warnings,
                    errors=errors
                )
            
            # Extract metrics from solver output
            metrics = self.extract_metrics(run_result)
            
            # Check convergence
            convergence_achieved = self._check_convergence(metrics, config)
            if not convergence_achieved:
                if config.check_convergence:
                    errors.append("PETSc solver did not converge within specified criteria")
                else:
                    warnings.append("PETSc solver convergence not verified")
            
            # Check accuracy
            accuracy_metrics = self._compute_accuracy_metrics(metrics, config)
            
            # Check performance
            performance_metrics = self._compute_performance_metrics(run_result, metrics)
            
            # Validate PETSc-specific requirements
            self._validate_petsc_specific(metrics, warnings, errors)
            
            # Overall validation
            valid = (
                run_result.success and 
                (convergence_achieved or not config.check_convergence) and
                len(errors) == 0
            )
            
            return ValidationResult(
                valid=valid,
                convergence_achieved=convergence_achieved,
                accuracy_metrics=accuracy_metrics,
                performance_metrics=performance_metrics,
                warnings=warnings,
                errors=errors
            )
            
        except Exception as e:
            errors.append(f"PETSc validation failed: {e}")
            return ValidationResult(
                valid=False,
                convergence_achieved=False,
                accuracy_metrics={},
                performance_metrics={},
                warnings=warnings,
                errors=errors
            )
    
    def extract_metrics(self, run_result: RunResult) -> Dict[str, Any]:
        """Extract PETSc-specific performance and convergence metrics."""
        metrics = {}
        
        try:
            # Start with any metrics from the runner
            if run_result.metrics:
                metrics.update(run_result.metrics)
            
            # Combine stdout and stderr for analysis
            output = run_result.stdout + "\n" + run_result.stderr
            lines = output.split('\n')
            
            # Extract detailed PETSc information
            for line in lines:
                line_stripped = line.strip()
                line_lower = line_stripped.lower()
                
                # KSP convergence information
                if 'ksp object' in line_lower:
                    metrics['ksp_configured'] = True
                
                # Extract solver type
                if 'type:' in line_lower and any(solver in line_lower for solver in ['gcr', 'gmres', 'cg', 'bicg']):
                    for solver in ['gcr', 'gmres', 'cg', 'bicg']:
                        if solver in line_lower:
                            metrics['actual_solver_type'] = solver
                            break
                
                # Matrix information
                if 'mat object' in line_lower:
                    metrics['matrix_configured'] = True
                
                if 'rows=' in line_lower and 'cols=' in line_lower:
                    # Extract matrix dimensions
                    row_match = re.search(r'rows=(\d+)', line_lower)
                    col_match = re.search(r'cols=(\d+)', line_lower)
                    if row_match:
                        metrics['matrix_rows'] = int(row_match.group(1))
                    if col_match:
                        metrics['matrix_cols'] = int(col_match.group(1))
                
                # Preconditioning information
                if 'pc object' in line_lower:
                    metrics['preconditioner_configured'] = True
                
                # Memory usage (if reported)
                if 'memory' in line_lower and 'used' in line_lower:
                    memory_match = re.search(r'([0-9.]+)\s*[kmg]?b', line_lower)
                    if memory_match:
                        metrics['memory_used'] = memory_match.group(1)
                
                # Timing information from PETSc logs
                if 'time =' in line_lower or 'elapsed time' in line_lower:
                    time_match = re.search(r'([0-9.]+)\s*sec', line_lower)
                    if time_match:
                        metrics['petsc_timing'] = float(time_match.group(1))
                
                # Error detection
                if 'error' in line_lower and 'petsc' in line_lower:
                    metrics['petsc_error'] = line_stripped
            
            # Add execution metrics
            metrics['total_execution_time'] = run_result.duration
            metrics['exit_code'] = run_result.exit_code
            
            # Compute derived metrics
            if 'initial_residual_norm' in metrics and 'final_residual_norm' in metrics:
                initial = metrics['initial_residual_norm']
                final = metrics['final_residual_norm']
                if initial > 0:
                    metrics['residual_reduction_factor'] = final / initial
                    metrics['residual_reduction_orders'] = -1 * (final / initial).log10() if final > 0 else float('inf')
                    
        except Exception as e:
            logger.debug(f"Failed to extract PETSc metrics: {e}")
        
        return metrics
    
    def compare_results(self, result1: RunResult, result2: RunResult) -> ComparisonResult:
        """Compare results from different PETSc runs."""
        try:
            metrics1 = self.extract_metrics(result1)
            metrics2 = self.extract_metrics(result2)
            
            differences = {}
            
            # Compare KSP iterations
            if 'ksp_iterations' in metrics1 and 'ksp_iterations' in metrics2:
                iter1 = metrics1['ksp_iterations']
                iter2 = metrics2['ksp_iterations']
                differences['iteration_count_diff'] = abs(iter1 - iter2)
                differences['iteration_count_relative_diff'] = abs(iter1 - iter2) / max(iter1, iter2) if max(iter1, iter2) > 0 else 0
            
            # Compare final residual norms
            accuracy_diff = float('inf')
            if 'final_residual_norm' in metrics1 and 'final_residual_norm' in metrics2:
                norm1 = metrics1['final_residual_norm']
                norm2 = metrics2['final_residual_norm']
                accuracy_diff = abs(norm1 - norm2) / max(norm1, norm2) if max(norm1, norm2) > 0 else 0
                differences['residual_norm_relative_diff'] = accuracy_diff
            
            # Compare execution times
            perf_diff = abs(result1.duration - result2.duration) / max(result1.duration, result2.duration)
            differences['execution_time_relative_diff'] = perf_diff
            
            # Compare convergence status
            conv1 = metrics1.get('converged', False)
            conv2 = metrics2.get('converged', False)
            differences['convergence_status_same'] = conv1 == conv2
            
            # Compare solver types (should be same for meaningful comparison)
            solver1 = metrics1.get('actual_solver_type', 'unknown')
            solver2 = metrics2.get('actual_solver_type', 'unknown')
            differences['solver_type_same'] = solver1 == solver2
            
            # Determine similarity for PETSc results
            similar = (
                accuracy_diff < 1e-6 and    # Very small residual norm difference
                perf_diff < 0.3 and         # Less than 30% performance difference
                conv1 == conv2 and          # Same convergence status
                solver1 == solver2          # Same solver type
            )
            
            return ComparisonResult(
                similar=similar,
                accuracy_difference=accuracy_diff,
                performance_difference=perf_diff,
                differences=differences
            )
            
        except Exception as e:
            logger.error(f"Failed to compare PETSc results: {e}")
            return ComparisonResult(
                similar=False,
                accuracy_difference=float('inf'),
                performance_difference=float('inf'),
                differences={'error': str(e)}
            )
    
    def _check_convergence(self, metrics: Dict[str, Any], config: ValidationConfig) -> bool:
        """Check if PETSc solver converged based on metrics and configuration."""
        # Check explicit convergence flag from PETSc
        if 'converged' in metrics:
            return metrics['converged']
        
        # Check convergence reason
        if 'convergence_reason' in metrics:
            reason = metrics['convergence_reason']
            return 'CONVERGED' in reason
        
        # Check final residual norm against tolerance
        if 'final_residual_norm' in metrics:
            final_norm = metrics['final_residual_norm']
            return final_norm < config.tolerance
        
        # Check if we have iteration count within bounds
        if 'ksp_iterations' in metrics:
            iterations = metrics['ksp_iterations']
            return iterations < config.max_iterations
        
        # If no explicit convergence information, assume converged if execution succeeded
        return True
    
    def _compute_accuracy_metrics(self, metrics: Dict[str, Any], config: ValidationConfig) -> Dict[str, float]:
        """Compute PETSc-specific accuracy metrics."""
        accuracy_metrics = {}
        
        # Basic residual information
        if 'final_residual_norm' in metrics:
            accuracy_metrics['final_residual_norm'] = metrics['final_residual_norm']
        
        if 'initial_residual_norm' in metrics:
            accuracy_metrics['initial_residual_norm'] = metrics['initial_residual_norm']
        
        # Residual reduction metrics
        if 'residual_reduction_factor' in metrics:
            accuracy_metrics['residual_reduction_factor'] = metrics['residual_reduction_factor']
        
        if 'residual_reduction_orders' in metrics:
            accuracy_metrics['residual_reduction_orders'] = metrics['residual_reduction_orders']
        
        # Compare against tolerance
        if 'final_residual_norm' in metrics:
            final_norm = metrics['final_residual_norm']
            accuracy_metrics['tolerance_ratio'] = final_norm / config.tolerance
        
        return accuracy_metrics
    
    def _compute_performance_metrics(self, run_result: RunResult, metrics: Dict[str, Any]) -> Dict[str, float]:
        """Compute PETSc-specific performance metrics."""
        performance_metrics = {}
        
        # Basic timing
        performance_metrics['total_execution_time'] = run_result.duration
        
        if 'petsc_timing' in metrics:
            performance_metrics['petsc_solver_time'] = metrics['petsc_timing']
        
        if 'solver_time' in metrics:
            performance_metrics['reported_solver_time'] = metrics['solver_time']
        
        # Iteration-based metrics
        if 'ksp_iterations' in metrics:
            iterations = metrics['ksp_iterations']
            performance_metrics['ksp_iterations'] = iterations
            
            if run_result.duration > 0:
                performance_metrics['time_per_iteration'] = run_result.duration / iterations
        
        # Matrix size information
        if 'matrix_rows' in metrics:
            performance_metrics['matrix_rows'] = metrics['matrix_rows']
        
        if 'matrix_cols' in metrics:
            performance_metrics['matrix_cols'] = metrics['matrix_cols']
        
        return performance_metrics
    
    def _validate_petsc_specific(self, metrics: Dict[str, Any], warnings: List[str], errors: List[str]) -> None:
        """Perform PETSc-specific validation checks."""
        # Check if KSP was properly configured
        if not metrics.get('ksp_configured', False):
            warnings.append("No KSP configuration detected in output")
        
        # Check if matrix was properly set up
        if not metrics.get('matrix_configured', False):
            warnings.append("No matrix configuration detected in output")
        
        # Check for PETSc errors
        if 'petsc_error' in metrics:
            errors.append(f"PETSc error detected: {metrics['petsc_error']}")
        
        # Check for reasonable iteration count
        if 'ksp_iterations' in metrics:
            iterations = metrics['ksp_iterations']
            if iterations <= 0:
                errors.append("Invalid iteration count: solver did not iterate")
            elif iterations == 1:
                warnings.append("Solver converged in 1 iteration - check problem setup")
        
        # Check matrix dimensions
        if 'matrix_rows' in metrics and 'matrix_cols' in metrics:
            rows = metrics['matrix_rows']
            cols = metrics['matrix_cols']
            if rows != cols:
                warnings.append(f"Non-square matrix detected ({rows}x{cols})")
            if rows <= 0 or cols <= 0:
                errors.append("Invalid matrix dimensions")