"""
Solver log analysis module for GCR Solver Manager.

Integrated analysis functionality with improved auto-detection of solver types.
"""

import re
import logging
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple, Optional, Any
import matplotlib.pyplot as plt
import numpy as np

logger = logging.getLogger(__name__)


class SolverLogAnalyzer:
    """Enhanced solver log analyzer with auto-detection capabilities."""
    
    def __init__(self):
        """Initialize the analyzer."""
        self.entries = {}
        self.classnames = []
        self.tag_order = []
        self.solver_groups = {}
        self.residual_data = {}
    
    def analyze_log_content(self, log_content: str) -> Dict[str, Any]:
        """
        Analyze log content and return comprehensive results.
        
        Args:
            log_content: Raw log content as string
            
        Returns:
            Dictionary containing analysis results
        """
        # Detect processing mode
        mode = self._detect_mode(log_content)
        
        if mode == 'debug':
            return self._analyze_debug_mode(log_content)
        else:
            return self._analyze_non_debug_mode(log_content)
    
    def _detect_mode(self, log_content: str) -> str:
        """Auto-detect if this is debug or non-debug mode."""
        debug_patterns = [
            r'\[.*-debug\].*norm=',
            r'DEBUG - Entering',
            r'debug.*step=.*norm=',
            r'maxit=1'
        ]
        
        debug_count = sum(1 for pattern in debug_patterns 
                         if re.search(pattern, log_content, re.IGNORECASE))
        
        return 'debug' if debug_count >= 2 else 'non-debug'
    
    def _analyze_debug_mode(self, log_content: str) -> Dict[str, Any]:
        """Analyze debug mode logs with norm data."""
        self.entries, self.classnames, self.tag_order = self._parse_debug_norms(log_content)
        self.solver_groups = self._group_classnames_by_solver(self.classnames)
        
        results = {
            'mode': 'debug',
            'total_entries': len(self.entries),
            'classnames': self.classnames,
            'solver_groups': self.solver_groups,
            'entries': self.entries,
            'comparison_table': self._generate_comparison_table(),
            'summary': self._generate_summary()
        }
        
        return results
    
    def _analyze_non_debug_mode(self, log_content: str) -> Dict[str, Any]:
        """Analyze non-debug mode logs with residual data."""
        self.residual_data = self._parse_residual_norms(log_content)
        
        results = {
            'mode': 'non-debug',
            'residual_data': self.residual_data,
            'plot_data': self._prepare_plot_data(),
            'summary': self._generate_residual_summary()
        }
        
        return results
    
    def _parse_debug_norms(self, log_content: str) -> Tuple[Dict, List[str], List[str]]:
        """Parse debug mode norm data with improved detection."""
        entries = {}
        classnames = set()
        tag_order = []
        seen_tags = set()
        
        # Enhanced patterns for debug norm detection
        debug_patterns = [
            # Standard pattern: [CLASSNAME] tag step=N norm=VALUE
            re.compile(r'\[([A-Z\-_0-9]+)\]\s*([a-zA-Z_][a-zA-Z0-9_]*)\s*step=(-?\d+)\s*norm=([0-9.eE+-]+)', re.IGNORECASE),
            # Alternative pattern: [CLASSNAME-debug] tag step=N norm=VALUE
            re.compile(r'\[([A-Z\-_0-9]+-debug)\]\s*([a-zA-Z_][a-zA-Z0-9_]*)\s*step=(-?\d+)\s*norm=([0-9.eE+-]+)', re.IGNORECASE),
            # NCCL pattern: [NCCL-CLASSNAME-debug] tag step=N norm=VALUE
            re.compile(r'\[(NCCL-[A-Z\-_0-9]+-debug)\]\s*([a-zA-Z_][a-zA-Z0-9_]*)\s*step=(-?\d+)\s*norm=([0-9.eE+-]+)', re.IGNORECASE)
        ]
        
        for line in log_content.split('\n'):
            for pattern in debug_patterns:
                match = pattern.search(line)
                if match:
                    classname, tag, step, norm_value = match.groups()
                    key = f"{tag}_step{step}"
                    
                    if key not in entries:
                        entries[key] = {}
                    
                    entries[key][classname] = norm_value
                    classnames.add(classname)
                    
                    # Maintain tag order
                    if key not in seen_tags:
                        tag_order.append(key)
                        seen_tags.add(key)
                    break
        
        return entries, sorted(classnames), tag_order
    
    def _group_classnames_by_solver(self, classnames: List[str]) -> Dict[str, Dict[str, str]]:
        """Enhanced solver type detection with better pattern matching."""
        solver_groups = defaultdict(dict)
        
        for classname in classnames:
            solver_type, implementation = self._identify_solver_and_implementation(classname)
            
            if solver_type and implementation:
                solver_groups[solver_type][implementation] = classname
            else:
                # Fallback: create a group even if we can't fully identify
                fallback_solver = self._extract_primary_solver_name(classname)
                solver_groups[fallback_solver][classname] = classname
        
        return dict(solver_groups)
    
    def _identify_solver_and_implementation(self, classname: str) -> Tuple[Optional[str], Optional[str]]:
        """Identify solver type and implementation from classname."""
        # Clean classname for analysis
        clean_name = classname.upper()
        
        # Remove common prefixes and suffixes
        clean_name = re.sub(r'^(NCCL-|CPU-|HIP-|CUDA-)', '', clean_name)
        clean_name = re.sub(r'(-DEBUG|-RELEASE)$', '', clean_name)
        
        # Solver type detection (order matters - check most specific first)
        solver_type = None
        if 'BCA-GMRES' in clean_name or 'BCA_GMRES' in clean_name:
            solver_type = 'BCA-GMRES'
        elif 'CA-GCR' in clean_name or 'CA_GCR' in clean_name or 'CAGCR' in clean_name:
            solver_type = 'CA-GCR'
        elif 'GMRES' in clean_name:
            solver_type = 'GMRES'
        elif 'GCR' in clean_name:
            solver_type = 'GCR'
        
        # Implementation detection
        implementation = None
        original_upper = classname.upper()
        
        if 'NCCL-' in original_upper or 'CUDA' in original_upper:
            implementation = 'CUDA'
        elif 'HIP' in original_upper:
            implementation = 'HIP'
        elif 'CPU' in original_upper or original_upper.startswith('C-') or (
            not any(gpu in original_upper for gpu in ['CUDA', 'HIP', 'NCCL'])):
            implementation = 'CPU'
        else:
            # Try to infer from context
            if 'NCCL' in original_upper:
                implementation = 'CUDA'
            else:
                implementation = 'Unknown'
        
        return solver_type, implementation
    
    def _extract_primary_solver_name(self, classname: str) -> str:
        """Extract primary solver name as fallback."""
        # Remove common prefixes/suffixes and extract core name
        clean = re.sub(r'^(NCCL-|CPU-|HIP-|CUDA-)', '', classname, flags=re.IGNORECASE)
        clean = re.sub(r'(-debug|-release)$', '', clean, flags=re.IGNORECASE)
        
        # Extract the main solver part
        if 'GMRES' in clean.upper():
            return 'GMRES-family'
        elif 'GCR' in clean.upper():
            return 'GCR-family' 
        else:
            return clean or 'Unknown'
    
    def _parse_residual_norms(self, log_content: str) -> Dict[str, Any]:
        """Parse residual norm data from non-debug logs."""
        residual_data = {}
        
        # Enhanced patterns for residual norm extraction
        residual_pattern = re.compile(
            r'\[(GMRES|GCR|CA-GCR|BCA-GMRES)\] Iteration (\d+): residual norm = ([0-9]+\.?[0-9]*[Ee]?[\+\-]?[0-9]*)', 
            re.IGNORECASE
        )
        
        current_solver = None
        current_implementation = None
        
        for i, line in enumerate(log_content.split('\n')):
            # Detect implementation changes
            if 'Simplified C implementation' in line:
                current_implementation = 'C'
            elif 'DEBUG - Entering cu_solve_helmholts' in line or 'CUDA' in line:
                current_implementation = 'CUDA'
            elif 'HIP' in line:
                current_implementation = 'HIP'
            
            # Extract residual norms
            match = residual_pattern.search(line)
            if match and current_implementation:
                solver = match.group(1)
                iteration = int(match.group(2))
                residual = float(match.group(3))
                
                key = f"{solver}_{current_implementation}"
                if key not in residual_data:
                    residual_data[key] = {'iterations': [], 'residuals': []}
                
                residual_data[key]['iterations'].append(iteration)
                residual_data[key]['residuals'].append(residual)
        
        return residual_data
    
    def _generate_comparison_table(self) -> List[str]:
        """Generate a formatted comparison table."""
        if not self.entries or not self.solver_groups:
            return ["No comparison data available"]
        
        table_lines = []
        
        for solver_type, implementations in self.solver_groups.items():
            table_lines.append(f"\n{'='*80}")
            table_lines.append(f"SOLVER: {solver_type}")
            table_lines.append('='*80)
            
            # Table header
            impl_names = list(implementations.keys())
            classnames = list(implementations.values())
            
            header = f"{'Tag':<40}"
            for impl in impl_names:
                header += f" {impl:<15}"
            if len(impl_names) > 1:
                header += f" {'Diff %':<15}"
            table_lines.append(header)
            table_lines.append('-' * len(header))
            
            # Table rows
            displayed_count = 0
            for tag in self.tag_order:
                if displayed_count >= 20:  # Limit display
                    table_lines.append(f"... and {len(self.tag_order) - displayed_count} more entries")
                    break
                
                if tag in self.entries:
                    values = []
                    row = f"{tag:<40}"
                    
                    for classname in classnames:
                        if classname in self.entries[tag]:
                            val = self.entries[tag][classname]
                            values.append(val)
                            row += f" {val:<15}"
                        else:
                            row += f" {'-':<15}"
                    
                    # Calculate difference if we have exactly 2 values
                    if len(values) == 2 and len(impl_names) == 2:
                        diff = self._calculate_diff_percent(values[0], values[1])
                        row += f" {diff:<15}"
                    
                    table_lines.append(row)
                    displayed_count += 1
            
            table_lines.append(f"\nTotal entries: {len([tag for tag in self.tag_order if tag in self.entries])}")
        
        return table_lines
    
    def _calculate_diff_percent(self, val1: str, val2: str) -> str:
        """Calculate percentage difference between two values."""
        try:
            v1 = float(val1)
            v2 = float(val2)
            
            if v1 == 0 and v2 == 0:
                return "0.00000"
            elif v1 == 0:
                return "inf"
            else:
                return f"{((v2 - v1) / v1 * 100):.5f}"
        except (ValueError, ZeroDivisionError):
            return "nan"
    
    def _generate_summary(self) -> Dict[str, Any]:
        """Generate analysis summary."""
        return {
            'total_entries': len(self.entries),
            'solver_types': len(self.solver_groups),
            'classnames_detected': len(self.classnames),
            'solver_breakdown': {k: len(v) for k, v in self.solver_groups.items()}
        }
    
    def _prepare_plot_data(self) -> Dict[str, Any]:
        """Prepare data for plotting residual convergence."""
        return self.residual_data
    
    def _generate_residual_summary(self) -> Dict[str, Any]:
        """Generate summary for residual data."""
        summary = {}
        for key, data in self.residual_data.items():
            if data['residuals']:
                summary[key] = {
                    'final_residual': data['residuals'][-1],
                    'iterations': len(data['residuals']),
                    'convergence_rate': self._calculate_convergence_rate(data['residuals'])
                }
        return summary
    
    def _calculate_convergence_rate(self, residuals: List[float]) -> float:
        """Calculate average convergence rate."""
        if len(residuals) < 2:
            return 0.0
        
        rates = []
        for i in range(1, len(residuals)):
            if residuals[i-1] > 0 and residuals[i] > 0:
                rate = residuals[i] / residuals[i-1]
                rates.append(rate)
        
        return sum(rates) / len(rates) if rates else 1.0
    
    def generate_report(self, results: Dict[str, Any]) -> str:
        """Generate a formatted text report."""
        report_lines = []
        
        if results['mode'] == 'debug':
            report_lines.append(f"Log analysis: Type={results['mode']}")
            report_lines.append(f"Processing debug mode norm data...")
            report_lines.append("")
            
            # Show detected classnames
            report_lines.append("Detected classnames:")
            for classname in results['classnames']:
                count = sum(1 for entry in results['entries'].values() if classname in entry)
                report_lines.append(f"{classname} ({count} entries)")
            report_lines.append("")
            
            # Show solver groups
            if results['solver_groups']:
                for solver_type, implementations in results['solver_groups'].items():
                    report_lines.append(f"✅ Identified {solver_type} solver:")
                    for impl, classname in implementations.items():
                        report_lines.append(f"   - {impl}: {classname}")
                report_lines.append("")
            else:
                report_lines.append("⚠️  Could not identify solver types from classnames.")
                report_lines.append("")
            
            # Add comparison table
            report_lines.extend(results['comparison_table'])
            
        else:  # non-debug mode
            report_lines.append(f"Log analysis: Type={results['mode']}")
            report_lines.append("Processing residual convergence data...")
            
            for solver_key, data in results['residual_data'].items():
                report_lines.append(f"\n{solver_key}:")
                report_lines.append(f"  Iterations: {len(data['residuals'])}")
                if data['residuals']:
                    report_lines.append(f"  Final residual: {data['residuals'][-1]:.6e}")
        
        return "\n".join(report_lines)


def analyze_log_file(file_path: str) -> Dict[str, Any]:
    """
    Convenience function to analyze a log file.
    
    Args:
        file_path: Path to the log file
        
    Returns:
        Analysis results dictionary
    """
    analyzer = SolverLogAnalyzer()
    
    try:
        with open(file_path, 'r') as f:
            content = f.read()
        
        results = analyzer.analyze_log_content(content)
        results['file_path'] = file_path
        results['report'] = analyzer.generate_report(results)
        
        return results
        
    except Exception as e:
        logger.error(f"Failed to analyze log file {file_path}: {e}")
        return {'error': str(e), 'file_path': file_path}


def analyze_log_content(content: str) -> Dict[str, Any]:
    """
    Convenience function to analyze log content directly.
    
    Args:
        content: Log content as string
        
    Returns:
        Analysis results dictionary
    """
    analyzer = SolverLogAnalyzer()
    results = analyzer.analyze_log_content(content)
    results['report'] = analyzer.generate_report(results)
    return results