"""
Solver comparison module for comparing results across different implementations.
"""

import logging
import sqlite3
from datetime import datetime, timedelta
from typing import List, Dict, Any, Optional, Tuple
from pathlib import Path
import json
import re

logger = logging.getLogger(__name__)


class SolverComparison:
    """
    Compare solver results across different implementations (CPU, CUDA, HIP).
    
    This class provides functionality to:
    - Compare final norms between implementations
    - Analyze convergence rates across implementations
    - Compare performance metrics
    - Generate comparison reports
    """
    
    def __init__(self, db_path: Optional[str] = None):
        """
        Initialize the solver comparison system.
        
        Args:
            db_path: Path to the SQLite database. If None, uses default location.
        """
        if db_path is None:
            # Use same default as SimpleDatabase
            db_path = "gcr_logs.db"
        
        self.db_path = str(Path(db_path).resolve())
        self._ensure_database_exists()
    
    def _ensure_database_exists(self):
        """Ensure the database file exists and is accessible."""
        try:
            db_path = Path(self.db_path)
            if not db_path.exists():
                logger.warning(f"Database not found at {self.db_path}")
                return
            
            # Test connection
            with sqlite3.connect(self.db_path, timeout=5) as conn:
                conn.execute("SELECT COUNT(*) FROM runs LIMIT 1")
        except Exception as e:
            logger.error(f"Database connection failed: {e}")
    
    def compare_implementations(
        self,
        solver: str,
        implementations: List[str],
        days: int = 7,
        metric: str = 'final_norm',
        tolerance: float = 1e-10
    ) -> Dict[str, Any]:
        """
        Compare solver results across different implementations.
        
        Args:
            solver: Solver type ('gcr', 'ca-gcr', 'gmres', 'bca-gmres')
            implementations: List of implementations to compare ('cpu', 'cuda', 'hip')
            days: Number of days to look back for data
            metric: Comparison metric ('final_norm', 'convergence_rate', 'performance')
            tolerance: Tolerance for considering values as equal
            
        Returns:
            Dictionary containing comparison results and statistics
        """
        try:
            # Get data for each implementation
            cutoff_date = datetime.now() - timedelta(days=days)
            
            implementation_data = {}
            for impl in implementations:
                data = self._get_implementation_data(solver, impl, cutoff_date)
                if data:
                    implementation_data[impl] = data
                    logger.info(f"Found {len(data)} runs for {solver}-{impl}")
                else:
                    logger.warning(f"No data found for {solver}-{impl}")
            
            if len(implementation_data) < 2:
                logger.warning("Need at least 2 implementations with data for comparison")
                return {}
            
            # Perform comparison based on metric
            if metric == 'final_norm':
                return self._compare_final_norms(implementation_data, tolerance)
            elif metric == 'convergence_rate':
                return self._compare_convergence_rates(implementation_data, tolerance)
            elif metric == 'performance':
                return self._compare_performance(implementation_data, tolerance)
            else:
                logger.error(f"Unknown comparison metric: {metric}")
                return {}
                
        except Exception as e:
            logger.error(f"Comparison failed: {e}")
            return {}
    
    def _get_implementation_data(
        self, 
        solver: str, 
        implementation: str, 
        cutoff_date: datetime
    ) -> List[Dict[str, Any]]:
        """Get test run data for a specific solver and implementation."""
        try:
            with sqlite3.connect(self.db_path) as conn:
                conn.row_factory = sqlite3.Row
                cursor = conn.cursor()
                
                # Query for test runs matching the criteria
                query = """
                    SELECT * FROM runs 
                    WHERE type = 'test' 
                    AND solver = ? 
                    AND gpu = ? 
                    AND status = 'success'
                    AND created_at >= ?
                    ORDER BY created_at DESC
                """
                
                cursor.execute(query, (solver, implementation, cutoff_date.isoformat()))
                rows = cursor.fetchall()
                
                results = []
                for row in rows:
                    try:
                        # Parse the results JSON
                        results_data = json.loads(row['results']) if row['results'] else {}
                        config_data = json.loads(row['config']) if row['config'] else {}
                        
                        run_data = {
                            'id': row['id'],
                            'solver': row['solver'],
                            'gpu': row['gpu'],
                            'status': row['status'],
                            'created_at': row['created_at'],
                            'log_path': row['log_path'],
                            'config': config_data,
                            'results': results_data
                        }
                        results.append(run_data)
                        
                    except json.JSONDecodeError as e:
                        logger.warning(f"Failed to parse JSON for run {row['id']}: {e}")
                        continue
                
                return results
                
        except Exception as e:
            logger.error(f"Failed to get implementation data: {e}")
            return []
    
    def _compare_final_norms(
        self, 
        implementation_data: Dict[str, List[Dict[str, Any]]], 
        tolerance: float
    ) -> Dict[str, Any]:
        """Compare final norm values between implementations."""
        comparisons = []
        consistent_results = 0
        inconsistent_results = 0
        differences = []
        
        # Get implementations with data
        implementations = list(implementation_data.keys())
        
        # Compare each pair of implementations
        for i in range(len(implementations)):
            for j in range(i + 1, len(implementations)):
                impl1, impl2 = implementations[i], implementations[j]
                
                # Get recent norm values for both implementations
                norms1 = self._extract_final_norms(implementation_data[impl1])
                norms2 = self._extract_final_norms(implementation_data[impl2])
                
                if not norms1 or not norms2:
                    continue
                
                # Compare most recent norms
                norm1 = norms1[0] if norms1 else None
                norm2 = norms2[0] if norms2 else None
                
                if norm1 is not None and norm2 is not None:
                    difference = abs(norm1 - norm2)
                    differences.append(difference)
                    
                    if difference <= tolerance:
                        status = "consistent"
                        consistent_results += 1
                    else:
                        status = "inconsistent"
                        inconsistent_results += 1
                    
                    comparisons.append({
                        'implementations': (impl1, impl2),
                        'values': (norm1, norm2),
                        'difference': difference,
                        'status': status,
                        'tolerance': tolerance
                    })
        
        # Calculate summary statistics
        summary = {
            'total_comparisons': len(comparisons),
            'consistent_results': consistent_results,
            'inconsistent_results': inconsistent_results,
            'tolerance': tolerance,
            'implementations': implementations
        }
        
        if differences:
            summary['max_difference'] = max(differences)
            summary['avg_difference'] = sum(differences) / len(differences)
            summary['min_difference'] = min(differences)
        
        return {
            'metric': 'final_norm',
            'summary': summary,
            'comparisons': comparisons,
            'implementation_stats': {
                impl: {
                    'total_runs': len(data),
                    'recent_norms': self._extract_final_norms(data)[:3]  # Show 3 most recent
                }
                for impl, data in implementation_data.items()
            }
        }
    
    def _compare_convergence_rates(
        self, 
        implementation_data: Dict[str, List[Dict[str, Any]]], 
        tolerance: float
    ) -> Dict[str, Any]:
        """Compare convergence rates between implementations."""
        comparisons = []
        consistent_results = 0
        inconsistent_results = 0
        differences = []
        
        implementations = list(implementation_data.keys())
        
        for i in range(len(implementations)):
            for j in range(i + 1, len(implementations)):
                impl1, impl2 = implementations[i], implementations[j]
                
                rates1 = self._extract_convergence_rates(implementation_data[impl1])
                rates2 = self._extract_convergence_rates(implementation_data[impl2])
                
                if not rates1 or not rates2:
                    continue
                
                rate1 = rates1[0] if rates1 else None
                rate2 = rates2[0] if rates2 else None
                
                if rate1 is not None and rate2 is not None:
                    difference = abs(rate1 - rate2)
                    differences.append(difference)
                    
                    # Use relative tolerance for convergence rates
                    rel_tolerance = tolerance * max(abs(rate1), abs(rate2), 1.0)
                    
                    if difference <= rel_tolerance:
                        status = "consistent"
                        consistent_results += 1
                    else:
                        status = "inconsistent"
                        inconsistent_results += 1
                    
                    comparisons.append({
                        'implementations': (impl1, impl2),
                        'values': (rate1, rate2),
                        'difference': difference,
                        'status': status,
                        'tolerance': rel_tolerance
                    })
        
        summary = {
            'total_comparisons': len(comparisons),
            'consistent_results': consistent_results,
            'inconsistent_results': inconsistent_results,
            'tolerance': tolerance,
            'implementations': implementations
        }
        
        if differences:
            summary['max_difference'] = max(differences)
            summary['avg_difference'] = sum(differences) / len(differences)
        
        return {
            'metric': 'convergence_rate',
            'summary': summary,
            'comparisons': comparisons
        }
    
    def _compare_performance(
        self, 
        implementation_data: Dict[str, List[Dict[str, Any]]], 
        tolerance: float
    ) -> Dict[str, Any]:
        """Compare performance metrics between implementations."""
        performance_stats = {}
        
        for impl, data in implementation_data.items():
            times = []
            for run in data:
                # Extract timing information from results
                results = run.get('results', {})
                total_time = results.get('total_time')
                solver_time = results.get('solver_time')
                
                if total_time is not None:
                    times.append(total_time)
                elif solver_time is not None:
                    times.append(solver_time)
            
            if times:
                performance_stats[impl] = {
                    'avg_time': sum(times) / len(times),
                    'min_time': min(times),
                    'max_time': max(times),
                    'runs': len(times),
                    'times': times[:5]  # Show first 5 times
                }
        
        # Compare average performance
        comparisons = []
        implementations = list(performance_stats.keys())
        
        for i in range(len(implementations)):
            for j in range(i + 1, len(implementations)):
                impl1, impl2 = implementations[i], implementations[j]
                
                if impl1 in performance_stats and impl2 in performance_stats:
                    time1 = performance_stats[impl1]['avg_time']
                    time2 = performance_stats[impl2]['avg_time']
                    
                    speedup = time1 / time2 if time2 > 0 else float('inf')
                    
                    comparisons.append({
                        'implementations': (impl1, impl2),
                        'avg_times': (time1, time2),
                        'speedup': speedup,
                        'faster_implementation': impl2 if speedup > 1.0 else impl1
                    })
        
        return {
            'metric': 'performance',
            'performance_stats': performance_stats,
            'comparisons': comparisons,
            'summary': {
                'implementations': implementations,
                'total_comparisons': len(comparisons)
            }
        }
    
    def _extract_final_norms(self, runs_data: List[Dict[str, Any]]) -> List[float]:
        """Extract final norm values from run results."""
        norms = []
        for run in runs_data:
            results = run.get('results', {})
            
            # Try different possible keys for final norm
            final_norm = None
            for key in ['final_norm', 'final_residual', 'norm', 'residual_norm']:
                if key in results:
                    try:
                        final_norm = float(results[key])
                        break
                    except (ValueError, TypeError):
                        continue
            
            # Try to extract from log if not in results
            if final_norm is None and run.get('log_path'):
                final_norm = self._extract_norm_from_log(run['log_path'])
            
            if final_norm is not None:
                norms.append(final_norm)
        
        return norms
    
    def _extract_convergence_rates(self, runs_data: List[Dict[str, Any]]) -> List[float]:
        """Extract convergence rates from run results."""
        rates = []
        for run in runs_data:
            results = run.get('results', {})
            
            # Try to get convergence rate
            rate = results.get('convergence_rate')
            if rate is not None:
                try:
                    rates.append(float(rate))
                    continue
                except (ValueError, TypeError):
                    pass
            
            # Calculate rate from iterations and final norm if available
            iterations = results.get('iterations')
            final_norm = results.get('final_norm')
            initial_norm = results.get('initial_norm', 1.0)  # Default to 1.0
            
            if iterations and final_norm and initial_norm:
                try:
                    # Calculate reduction rate per iteration
                    reduction = final_norm / initial_norm
                    if reduction > 0 and iterations > 0:
                        rate = -np.log(reduction) / iterations
                        rates.append(rate)
                except:
                    pass
        
        return rates
    
    def _extract_norm_from_log(self, log_path: str) -> Optional[float]:
        """Extract final norm from log file using regex patterns."""
        try:
            if not Path(log_path).exists():
                return None
            
            with open(log_path, 'r') as f:
                content = f.read()
            
            # Common patterns for final norm in logs
            patterns = [
                r'Final norm[:\s]+([0-9.e+-]+)',
                r'final residual[:\s]+([0-9.e+-]+)',
                r'Converged.*norm[:\s]+([0-9.e+-]+)',
                r'norm.*final[:\s]+([0-9.e+-]+)',
            ]
            
            for pattern in patterns:
                matches = re.findall(pattern, content, re.IGNORECASE)
                if matches:
                    try:
                        return float(matches[-1])  # Take the last match
                    except ValueError:
                        continue
            
            return None
            
        except Exception as e:
            logger.warning(f"Failed to extract norm from log {log_path}: {e}")
            return None


# Import numpy for calculations if available
try:
    import numpy as np
except ImportError:
    # Fallback implementation for log calculation
    import math
    class _NumpyFallback:
        @staticmethod
        def log(x):
            return math.log(x)
    np = _NumpyFallback()