"""
Original GCR-NCCL Validator Implementation.

This validator checks solver results for convergence and accuracy.
"""

import re
import logging
from typing import Dict, List, Optional, Any

from gcr_solver_manager.extensions.base_validator import (
    ValidatorInterface, ValidationConfig, ValidationResult, ComparisonResult
)
from gcr_solver_manager.extensions.base_runner import RunResult

logger = logging.getLogger(__name__)


class OriginalGCRValidator(ValidatorInterface):
    """Validator for Original GCR-NCCL framework."""
    
    def validate_result(self, run_result: RunResult, config: ValidationConfig) -> ValidationResult:
        """Validate solver execution results."""
        warnings = []
        errors = []
        
        try:
            # Check if solver ran successfully
            if not run_result.success:
                errors.append(f"Solver execution failed with exit code {run_result.exit_code}")
                return ValidationResult(
                    valid=False,
                    convergence_achieved=False,
                    accuracy_metrics={},
                    performance_metrics={},
                    warnings=warnings,
                    errors=errors
                )
            
            # Extract metrics from solver output
            metrics = self.extract_metrics(run_result)
            
            # Check convergence
            convergence_achieved = self._check_convergence(metrics, config)
            if not convergence_achieved:
                if config.check_convergence:
                    errors.append("Solver did not converge within specified tolerance")
                else:
                    warnings.append("Solver convergence not verified")
            
            # Check accuracy
            accuracy_metrics = self._compute_accuracy_metrics(metrics, config)
            
            # Check performance
            performance_metrics = self._compute_performance_metrics(run_result, metrics)
            
            # Overall validation
            valid = (
                run_result.success and 
                (convergence_achieved or not config.check_convergence) and
                len(errors) == 0
            )
            
            return ValidationResult(
                valid=valid,
                convergence_achieved=convergence_achieved,
                accuracy_metrics=accuracy_metrics,
                performance_metrics=performance_metrics,
                warnings=warnings,
                errors=errors
            )
            
        except Exception as e:
            errors.append(f"Validation failed: {e}")
            return ValidationResult(
                valid=False,
                convergence_achieved=False,
                accuracy_metrics={},
                performance_metrics={},
                warnings=warnings,
                errors=errors
            )
    
    def extract_metrics(self, run_result: RunResult) -> Dict[str, Any]:
        """Extract performance and convergence metrics."""
        metrics = {}
        
        try:
            # Combine stdout and stderr for analysis
            output = run_result.stdout + "\n" + run_result.stderr
            lines = output.split('\n')
            
            # Extract various metrics
            for line in lines:
                line = line.strip()
                
                # Final residual norm
                if 'final' in line.lower() and 'residual' in line.lower() and 'norm' in line.lower():
                    match = re.search(r'([0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?)', line)
                    if match:
                        metrics['final_residual_norm'] = float(match.group(1))
                
                # Initial residual norm
                if 'initial' in line.lower() and 'residual' in line.lower() and 'norm' in line.lower():
                    match = re.search(r'([0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?)', line)
                    if match:
                        metrics['initial_residual_norm'] = float(match.group(1))
                
                # Iteration count
                if 'iteration' in line.lower() and ':' in line:
                    match = re.search(r'iteration[s]?\s*:?\s*(\d+)', line.lower())
                    if match:
                        metrics['iterations'] = int(match.group(1))
                
                # Convergence status
                if 'converged' in line.lower():
                    if 'not' in line.lower() or 'failed' in line.lower():
                        metrics['converged'] = False
                    else:
                        metrics['converged'] = True
                
                # Execution time (if reported by solver)
                if 'time' in line.lower() and ('sec' in line.lower() or 'second' in line.lower()):
                    match = re.search(r'([0-9]*\.?[0-9]+)', line)
                    if match:
                        metrics['solver_time'] = float(match.group(1))
            
            # Add basic metrics from run_result
            metrics['execution_duration'] = run_result.duration
            metrics['exit_code'] = run_result.exit_code
            
            # Include any metrics already extracted by runner
            if run_result.metrics:
                metrics.update(run_result.metrics)
            
        except Exception as e:
            logger.debug(f"Failed to extract metrics: {e}")
        
        return metrics
    
    def compare_results(self, result1: RunResult, result2: RunResult) -> ComparisonResult:
        """Compare results from different runs."""
        try:
            metrics1 = self.extract_metrics(result1)
            metrics2 = self.extract_metrics(result2)
            
            differences = {}
            
            # Compare final residual norms
            if 'final_residual_norm' in metrics1 and 'final_residual_norm' in metrics2:
                norm1 = metrics1['final_residual_norm']
                norm2 = metrics2['final_residual_norm']
                accuracy_diff = abs(norm1 - norm2) / max(norm1, norm2) if max(norm1, norm2) > 0 else 0
                differences['residual_norm_relative_diff'] = accuracy_diff
            else:
                accuracy_diff = float('inf')
            
            # Compare execution times
            perf_diff = abs(result1.duration - result2.duration) / max(result1.duration, result2.duration)
            differences['execution_time_relative_diff'] = perf_diff
            
            # Compare iteration counts
            if 'iterations' in metrics1 and 'iterations' in metrics2:
                iter_diff = abs(metrics1['iterations'] - metrics2['iterations'])
                differences['iteration_count_diff'] = iter_diff
            
            # Compare convergence status
            conv1 = metrics1.get('converged', False)
            conv2 = metrics2.get('converged', False)
            differences['convergence_status_same'] = conv1 == conv2
            
            # Determine similarity (results are similar if differences are small)
            similar = (
                accuracy_diff < 1e-6 and  # Very small residual norm difference
                perf_diff < 0.2 and       # Less than 20% performance difference
                conv1 == conv2            # Same convergence status
            )
            
            return ComparisonResult(
                similar=similar,
                accuracy_difference=accuracy_diff,
                performance_difference=perf_diff,
                differences=differences
            )
            
        except Exception as e:
            logger.error(f"Failed to compare results: {e}")
            return ComparisonResult(
                similar=False,
                accuracy_difference=float('inf'),
                performance_difference=float('inf'),
                differences={'error': str(e)}
            )
    
    def _check_convergence(self, metrics: Dict[str, Any], config: ValidationConfig) -> bool:
        """Check if solver converged based on metrics and configuration."""
        # Check explicit convergence flag
        if 'converged' in metrics:
            return metrics['converged']
        
        # Check final residual norm against tolerance
        if 'final_residual_norm' in metrics:
            final_norm = metrics['final_residual_norm']
            return final_norm < config.tolerance
        
        # Check iteration count
        if 'iterations' in metrics:
            return metrics['iterations'] < config.max_iterations
        
        # If no convergence information available, assume convergence if solver succeeded
        return True
    
    def _compute_accuracy_metrics(self, metrics: Dict[str, Any], config: ValidationConfig) -> Dict[str, float]:
        """Compute accuracy-related metrics."""
        accuracy_metrics = {}
        
        if 'final_residual_norm' in metrics:
            accuracy_metrics['final_residual_norm'] = metrics['final_residual_norm']
        
        if 'initial_residual_norm' in metrics and 'final_residual_norm' in metrics:
            initial = metrics['initial_residual_norm']
            final = metrics['final_residual_norm']
            if initial > 0:
                accuracy_metrics['residual_reduction'] = final / initial
                accuracy_metrics['residual_reduction_log'] = -1 * (final / initial).log10() if final > 0 else float('inf')
        
        return accuracy_metrics
    
    def _compute_performance_metrics(self, run_result: RunResult, metrics: Dict[str, Any]) -> Dict[str, float]:
        """Compute performance-related metrics."""
        performance_metrics = {}
        
        performance_metrics['total_execution_time'] = run_result.duration
        
        if 'solver_time' in metrics:
            performance_metrics['solver_time'] = metrics['solver_time']
        
        if 'iterations' in metrics:
            performance_metrics['iterations'] = metrics['iterations']
            if run_result.duration > 0:
                performance_metrics['time_per_iteration'] = run_result.duration / metrics['iterations']
        
        return performance_metrics