#!/usr/bin/env python3
"""
EnKF stdout Analysis Tool
Automatically extracts and analyzes key information from EnKF stdout files
Based on the analysis patterns from EnKF_Run_Analysis_Report.md
"""

import re
import sys
from datetime import datetime
from collections import defaultdict
from typing import Dict, List, Tuple, Optional, Any
import json
import os
import copy

class EnKFAnalyzer:
    def __init__(self, stdout_file: str):
        self.stdout_file = stdout_file
        self.content = []
        self.analysis = {
            'execution_status': {},
            'configuration': {},
            'grid_info': {},
            'ensemble_info': {},
            'observations': {},
            'innovation_stats': {'prior': {}, 'posterior': {}},
            'core_update': {},
            'increments': {},
            'performance': {},
            'issues': [],
            'diagnosis': {}
        }
        
    def read_file(self):
        """Read the stdout file content"""
        try:
            with open(self.stdout_file, 'r') as f:
                self.content = f.readlines()
        except FileNotFoundError:
            print(f"Error: File {self.stdout_file} not found")
            sys.exit(1)
            
    def analyze(self):
        """Main analysis function"""
        self.extract_execution_status()
        self.extract_configuration()
        self.extract_grid_info()
        self.extract_ensemble_info()
        self.extract_observations()
        self.extract_innovation_stats()
        self.extract_core_update()
        self.extract_increments()
        self.extract_performance()
        self.identify_issues()
        self.diagnose_problems()
        
    def extract_execution_status(self):
        """Extract program execution status"""
        status = self.analysis['execution_status']
        
        # Look for exit code
        for line in self.content:
            if "Exit code" in line or "exit" in line.lower():
                match = re.search(r'exit.*?(\d+)', line, re.IGNORECASE)
                if match:
                    status['exit_code'] = int(match.group(1))
                    status['completed'] = (status['exit_code'] == 0)
                    
            # Check for normal completion
            if "program ended normally" in line or "ENDING DATE-TIME" in line:
                status['completed'] = True
                
            # Extract timing
            if "wall time" in line.lower():
                match = re.search(r'wall time.*?(\d+\.?\d*)', line)
                if match:
                    status['wall_time_seconds'] = float(match.group(1))
                    
    def extract_configuration(self):
        """Extract EnKF configuration"""
        config = self.analysis['configuration']
        
        for i, line in enumerate(self.content):
            # Analysis time - look for multiple patterns
            if "analysis time" in line:
                match = re.search(r'(\d{10})', line)  # YYYYMMDDHH format
                if match:
                    time_str = match.group(1)
                    year = time_str[0:4]
                    month = time_str[4:6]
                    day = time_str[6:8]
                    hour = time_str[8:10]
                    config['analysis_time'] = f"{year}-{month}-{day} {hour}:00 UTC"
                else:
                    match = re.search(r'(\d{4})-(\d{2})-(\d{2})\s+(\d{2})', line)
                    if match:
                        config['analysis_time'] = f"{match.group(1)}-{match.group(2)}-{match.group(3)} {match.group(4)}:00 UTC"
                        
            # Ensemble size - look for direct statement
            if "members" in line and not "number of ensemble members" in self.content[i-1] if i > 0 else True:
                match = re.search(r'(\d+)\s+members', line)
                if match:
                    config['ensemble_size'] = int(match.group(1))
                    
            # Also check namelist parameters
            if "NANALS=" in line:
                match = re.search(r'NANALS=(\d+)', line)
                if match:
                    config['ensemble_size'] = int(match.group(1))
                    
            # MPI tasks - look for processor count
            if "running on" in line and "processors" in line:
                match = re.search(r'running on\s+(\d+)\s+processors', line)
                if match:
                    config['mpi_tasks'] = int(match.group(1))
                    
            # Model type
            if "REGIONAL=" in line:
                if "REGIONAL=T" in line:
                    config['model_type'] = 'WRF-ARW'
                    config['regional'] = True
                else:
                    config['model_type'] = 'Global'
                    config['regional'] = False
                    
            # Look for model-specific indicators
            if "wrf_nmm_regional" in line:
                config['model_type'] = 'WRF-NMM'
            elif "wrf_mass_regional" in line or "arw" in line.lower():
                config['model_type'] = 'WRF-ARW'
                
    def extract_grid_info(self):
        """Extract grid information"""
        grid = self.analysis['grid_info']
        
        for line in self.content:
            # Grid dimensions from namelist
            if "NLONS=" in line:
                match = re.search(r'NLONS=(\d+)', line)
                if match:
                    grid['nx'] = int(match.group(1))
                    
            if "NLATS=" in line:
                match = re.search(r'NLATS=(\d+)', line)
                if match:
                    grid['ny'] = int(match.group(1))
                    
            if "NLEVS=" in line:
                match = re.search(r'NLEVS=(\d+)', line)
                if match:
                    grid['nz'] = int(match.group(1))
                    
            # Grid dimensions from initialization
            if "Control levels:" in line:
                # Extract the number of levels from control variable dimensions
                match = re.search(r'(\d+)\s*$', line)
                if match:
                    grid['control_vector_size'] = int(match.group(1))
                    
            # Grid mismatch errors
            if "nlons ingested from file" in line:
                match = re.search(r'nlons ingested from file\s*=\s*(\d+)', line)
                if match:
                    grid['file_nx'] = int(match.group(1))
                    
            if "nlons specified in namelist" in line:
                match = re.search(r'nlons specified in namelist\s*=\s*(\d+)', line)
                if match:
                    grid['namelist_nx'] = int(match.group(1))
                    
    def extract_ensemble_info(self):
        """Extract ensemble-specific information"""
        ensemble = self.analysis['ensemble_info']
        
        # Initialize spread tracking
        ensemble['spread_by_variable'] = {}
        ensemble['identical_members'] = False
        
        for line in self.content:
            # Look for ensemble spread in innovation statistics
            if "sqrt(S)" in line and "region" in line:
                # This is a header line for innovation stats
                continue
                
            # Parse innovation stat lines for spread
            spread_pattern = re.compile(r'(NH|TR|GL|SH)\s+all\s+(\w+)\s+\d+.*?([\d.]+E[+-]\d+)\s+([\d.]+E[+-]\d+)$')
            match = spread_pattern.search(line)
            if match:
                region = match.group(1)
                var_type = match.group(2)
                sqrt_s = float(match.group(3))  # This is sqrt(S)
                
                if var_type not in ensemble['spread_by_variable']:
                    ensemble['spread_by_variable'][var_type] = {}
                ensemble['spread_by_variable'][var_type][region] = sqrt_s
                
        # Check if spread is near zero (indicating identical members)
        if ensemble['spread_by_variable']:
            all_spreads = []
            for regions in ensemble['spread_by_variable'].values():
                all_spreads.extend(regions.values())
            if all_spreads and max(all_spreads) < 1e-5:
                ensemble['identical_members'] = True
                
    def extract_observations(self):
        """Extract observation statistics"""
        obs = self.analysis['observations']
        obs['types_configured'] = {}
        
        for i, line in enumerate(self.content):
            # Total observations
            if "total number of obs" in line and "=" in line:
                match = re.search(r'total number of obs\s*(\d+)', line)
                if match:
                    obs['total'] = int(match.group(1))
                    
            # Observation counts by type from diagnostic files
            if "nobs_conv, nobs_oz, nobs_sat" in line:
                match = re.search(r'=\s*(\d+)\s+(\d+)\s+(\d+)', line)
                if match:
                    obs['conv_count'] = int(match.group(1))
                    obs['ozone_count'] = int(match.group(2))
                    obs['radiance_count'] = int(match.group(3))
                    
            # Observation types from convinfo processing
            if "columns below obtype,nread, nkeep" in line and i+1 < len(self.content):
                # Parse the following lines for observation counts
                j = i + 1
                while j < len(self.content) and self.content[j].strip():
                    parts = self.content[j].split()
                    if len(parts) >= 3:
                        try:
                            obs_type = parts[0]
                            nread = int(parts[1])
                            nkeep = int(parts[2])
                            obs['types_configured'][obs_type] = {
                                'read': nread,
                                'keep': nkeep
                            }
                        except ValueError:
                            break
                    j += 1
                    
            # Observations used vs skipped
            if "obs skipped" in line and "used" in line:
                match = re.search(r'(\d+)\s+out of\s+(\d+)\s+obs skipped,\s+(\d+)\s+used', line)
                if match:
                    obs['skipped'] = int(match.group(1))
                    obs['total_processed'] = int(match.group(2))
                    obs['used'] = int(match.group(3))
                    
    def extract_innovation_stats(self):
        """Extract innovation statistics (prior and posterior)"""
        prior_section = False
        posterior_section = False
        
        for line in self.content:
            # Detect sections
            if "innovation statistics for prior" in line:
                prior_section = True
                posterior_section = False
                continue
            elif "innovation statistics for posterior" in line:
                prior_section = False
                posterior_section = True
                continue
                
            # Parse statistics lines
            if prior_section or posterior_section:
                # Pattern: region, obtype, nobs, bias, innov stdev, sqrt(S+R), sqrt(S), sqrt(R)
                stat_pattern = re.compile(
                    r'(NH|TR|GL|SH)\s+all\s+(\w+)\s+(\d+)\s+'
                    r'([\d.-]+E[+-]\d+)\s+([\d.-]+E[+-]\d+)\s+'
                    r'([\d.-]+E[+-]\d+)\s+([\d.-]+E[+-]\d+)\s+([\d.-]+E[+-]\d+)'
                )
                match = stat_pattern.search(line)
                if match:
                    region = match.group(1)
                    ob_type = match.group(2)
                    nobs = int(match.group(3))
                    bias = float(match.group(4))
                    innov_stdev = float(match.group(5))
                    sqrt_spr = float(match.group(6))
                    sqrt_s = float(match.group(7))
                    sqrt_r = float(match.group(8))
                    
                    stats_dict = self.analysis['innovation_stats']['prior' if prior_section else 'posterior']
                    if ob_type not in stats_dict:
                        stats_dict[ob_type] = {}
                    
                    stats_dict[ob_type][region] = {
                        'nobs': nobs,
                        'bias': bias,
                        'innov_stdev': innov_stdev,
                        'sqrt_s_plus_r': sqrt_spr,
                        'sqrt_s': sqrt_s,
                        'sqrt_r': sqrt_r
                    }
                    
    def extract_core_update(self):
        """Extract core EnKF update information"""
        update = self.analysis['core_update']
        update['iterations'] = []
        
        for line in self.content:
            # Determine update method
            if "letkf_update" in line.lower():
                update['update_method'] = 'LETKF'
            elif "enkf_update" in line.lower() and 'update_method' not in update:
                update['update_method'] = 'EnKF'
                
            # Localization factor
            if "mean covl_fact" in line:
                match = re.search(r'mean covl_fact\s*=\s*([\d.E+-]+|NaN)', line)
                if match:
                    covl_value = match.group(1)
                    iteration = {
                        'covl_fact': covl_value,
                        'is_nan': covl_value == 'NaN'
                    }
                    
                    # Look for obs used on same or next line
                    obs_match = re.search(r'(\d+)\s+out of\s+(\d+)\s+obs skipped,\s+(\d+)\s+used', line)
                    if obs_match:
                        iteration['obs_skipped'] = int(obs_match.group(1))
                        iteration['obs_total'] = int(obs_match.group(2))
                        iteration['obs_used'] = int(obs_match.group(3))
                        
                    update['iterations'].append(iteration)
                    
    def extract_increments(self):
        """Extract analysis increments"""
        inc = self.analysis['increments']
        
        # Pattern for increment lines - handle different formats
        patterns = [
            re.compile(r'ens\.\s*mean\s*anal\.\s*increment\s*min/max\s*(\w+)\s+([\d.E+-]+)\s+([\d.E+-]+)'),
            re.compile(r'increment\s+(\w+)\s+([\d.E+-]+)\s+([\d.E+-]+)\s+([\d.E+-]+)'),
            re.compile(r'(\w+)\s+increment:\s*mean\s*=\s*([\d.E+-]+)\s*min\s*=\s*([\d.E+-]+)\s*max\s*=\s*([\d.E+-]+)')
        ]
        
        for line in self.content:
            for pattern in patterns:
                match = pattern.search(line)
                if match:
                    if len(match.groups()) == 3:
                        var_name = match.group(1)
                        inc[var_name] = {
                            'min': float(match.group(2)),
                            'max': float(match.group(3))
                        }
                    elif len(match.groups()) == 4:
                        var_name = match.group(1)
                        inc[var_name] = {
                            'mean': float(match.group(2)),
                            'min': float(match.group(3)),
                            'max': float(match.group(4))
                        }
                    break
                    
    def extract_performance(self):
        """Extract performance metrics"""
        perf = self.analysis['performance']
        
        for line in self.content:
            # Wall time
            if "wall time" in line.lower():
                match = re.search(r'wall time.*?(\d+\.?\d*)', line)
                if match:
                    perf['wall_time_seconds'] = float(match.group(1))
                    
            # Memory usage
            if "maximum resident set size" in line:
                match = re.search(r'(\d+)', line)
                if match:
                    perf['memory_kb'] = int(match.group(1))
                    
    def identify_issues(self):
        """Identify potential issues and warnings"""
        issues = self.analysis['issues']
        
        for line in self.content:
            # Grid dimension mismatches
            if "Failed in subroutine getgridinfo" in line:
                issues.append({
                    'type': 'fatal_error',
                    'category': 'grid_mismatch',
                    'message': line.strip()
                })
                
            # NaN values
            if "NaN" in line:
                issues.append({
                    'type': 'numerical_error',
                    'category': 'nan_detected',
                    'message': line.strip()
                })
                
            # All observations skipped
            if "obs skipped" in line:
                match = re.search(r'(\d+)\s+out of\s+(\d+)\s+obs skipped', line)
                if match and match.group(1) == match.group(2):
                    issues.append({
                        'type': 'data_assimilation_failure',
                        'category': 'all_obs_skipped',
                        'message': f"All {match.group(1)} observations were skipped"
                    })
                    
            # Warnings
            if "WARNING" in line:
                issues.append({
                    'type': 'warning',
                    'message': line.strip()
                })
                
    def diagnose_problems(self):
        """Diagnose common EnKF problems based on the analysis"""
        diagnosis = self.analysis['diagnosis']
        diagnosis['problems'] = []
        diagnosis['root_causes'] = []
        
        # Check for zero ensemble spread
        ensemble = self.analysis['ensemble_info']
        if ensemble.get('identical_members'):
            diagnosis['problems'].append('Zero ensemble spread detected')
            diagnosis['root_causes'].append('All ensemble members are identical')
            
        # Check for all observations skipped
        obs = self.analysis['observations']
        if obs.get('used') == 0 and obs.get('total_processed', 0) > 0:
            diagnosis['problems'].append('No observations assimilated')
            if ensemble.get('identical_members'):
                diagnosis['root_causes'].append(
                    'Zero spread causes division by zero in LETKF gain calculation'
                )
                
        # Check for NaN in localization
        update = self.analysis['core_update']
        if update.get('iterations'):
            nan_count = sum(1 for it in update['iterations'] if it.get('is_nan'))
            if nan_count > 0:
                diagnosis['problems'].append(f'NaN detected in {nan_count} iterations')
                diagnosis['root_causes'].append(
                    'Mathematical instability in localization calculation'
                )
                
        # Check grid dimension mismatch
        grid = self.analysis['grid_info']
        if 'file_nx' in grid and 'namelist_nx' in grid:
            if grid['file_nx'] != grid['namelist_nx']:
                diagnosis['problems'].append('Grid dimension mismatch')
                diagnosis['root_causes'].append(
                    f"File has {grid['file_nx']} x-points but namelist expects {grid['namelist_nx']}"
                )
                
    def generate_report(self) -> str:
        """Generate a formatted analysis report"""
        report = []
        report.append("=" * 80)
        report.append("EnKF STDOUT ANALYSIS REPORT")
        report.append("=" * 80)
        report.append("")
        
        # Execution Status
        report.append("EXECUTION STATUS:")
        status = self.analysis['execution_status']
        if status.get('completed'):
            report.append("  Status: COMPLETED")
            report.append(f"  Exit Code: {status.get('exit_code', 'N/A')}")
        else:
            report.append("  Status: FAILED OR INCOMPLETE")
        if 'wall_time_seconds' in status:
            report.append(f"  Wall Time: {status['wall_time_seconds']:.1f} seconds")
        report.append("")
        
        # Configuration
        report.append("CONFIGURATION:")
        config = self.analysis['configuration']
        if 'analysis_time' in config:
            report.append(f"  Analysis Time: {config['analysis_time']}")
        if 'ensemble_size' in config:
            report.append(f"  Ensemble Size: {config['ensemble_size']}")
        if 'model_type' in config:
            report.append(f"  Model Type: {config['model_type']}")
        if 'mpi_tasks' in config:
            report.append(f"  MPI Tasks: {config['mpi_tasks']}")
        report.append("")
        
        # Grid Information
        report.append("GRID INFORMATION:")
        grid = self.analysis['grid_info']
        if all(k in grid for k in ['nx', 'ny', 'nz']):
            report.append(f"  Grid Dimensions: {grid['nx']} x {grid['ny']} x {grid['nz']}")
        if 'file_nx' in grid and 'namelist_nx' in grid:
            if grid['file_nx'] != grid['namelist_nx']:
                report.append(f"  ⚠️  MISMATCH: File has {grid['file_nx']} x-points, namelist expects {grid['namelist_nx']}")
        report.append("")
        
        # Ensemble Analysis
        report.append("ENSEMBLE ANALYSIS:")
        ensemble = self.analysis['ensemble_info']
        if ensemble.get('spread_by_variable'):
            report.append("  Ensemble Spread (sqrt(S)) by Variable:")
            for var_type, regions in ensemble['spread_by_variable'].items():
                if 'NH' in regions:  # Just show NH for brevity
                    report.append(f"    {var_type:8s}: {regions['NH']:.3e}")
            
            # Check for zero spread
            if ensemble.get('identical_members'):
                report.append("  ⚠️  WARNING: Near-zero ensemble spread detected!")
        report.append("")
        
        # Observations
        report.append("OBSERVATIONS:")
        obs = self.analysis['observations']
        if 'total' in obs:
            report.append(f"  Total Observations: {obs['total']:,}")
        if 'used' in obs and 'total_processed' in obs:
            report.append(f"  Observations Used: {obs['used']:,} of {obs['total_processed']:,}")
            report.append(f"  Observations Skipped: {obs.get('skipped', 0):,}")
            usage_rate = obs['used'] / obs['total_processed'] * 100 if obs['total_processed'] > 0 else 0
            report.append(f"  Usage Rate: {usage_rate:.1f}%")
        report.append("")
        
        # Core Update Analysis
        report.append("CORE UPDATE ANALYSIS:")
        update = self.analysis['core_update']
        if update.get('iterations'):
            report.append(f"  Update Method: {update.get('update_method', 'Unknown')}")
            report.append(f"  Iterations Performed: {len(update['iterations'])}")
            
            # Check for NaN
            nan_iterations = [i for i, it in enumerate(update['iterations']) if it.get('is_nan')]
            if nan_iterations:
                report.append(f"  ⚠️  NaN detected in iterations: {nan_iterations}")
                
            # Observation usage summary
            total_used = sum(it.get('obs_used', 0) for it in update['iterations'])
            if total_used == 0:
                report.append("  ⚠️  NO OBSERVATIONS ASSIMILATED IN ANY ITERATION")
        report.append("")
        
        # Innovation Statistics Comparison
        report.append("INNOVATION STATISTICS:")
        prior_stats = self.analysis['innovation_stats']['prior']
        post_stats = self.analysis['innovation_stats']['posterior']
        
        if prior_stats and post_stats:
            report.append("  Variable   Prior Bias    Post Bias    Prior Spread   Post Spread")
            report.append("  " + "-" * 65)
            for var_type in sorted(set(prior_stats.keys()) | set(post_stats.keys())):
                if var_type in prior_stats and 'NH' in prior_stats[var_type]:
                    prior_bias = prior_stats[var_type]['NH']['bias']
                    prior_spread = prior_stats[var_type]['NH']['sqrt_s']
                else:
                    prior_bias = prior_spread = 0.0
                    
                if var_type in post_stats and 'NH' in post_stats[var_type]:
                    post_bias = post_stats[var_type]['NH']['bias']
                    post_spread = post_stats[var_type]['NH']['sqrt_s']
                else:
                    post_bias = post_spread = 0.0
                    
                report.append(f"  {var_type:8s} {prior_bias:11.3e} {post_bias:11.3e} "
                            f"{prior_spread:11.3e} {post_spread:11.3e}")
        report.append("")
        
        # Analysis Increments
        if self.analysis['increments']:
            report.append("ANALYSIS INCREMENTS:")
            report.append("  Variable      Mean         Min          Max")
            report.append("  " + "-" * 45)
            for var, stats in sorted(self.analysis['increments'].items()):
                if 'mean' in stats:
                    report.append(f"  {var:10s} {stats['mean']:10.2e} {stats['min']:10.2e} {stats['max']:10.2e}")
                else:
                    report.append(f"  {var:10s}      -       {stats['min']:10.2e} {stats['max']:10.2e}")
            report.append("")
            
        # Problem Diagnosis
        diagnosis = self.analysis['diagnosis']
        if diagnosis['problems']:
            report.append("PROBLEM DIAGNOSIS:")
            report.append("  Detected Problems:")
            for problem in diagnosis['problems']:
                report.append(f"    • {problem}")
            if diagnosis['root_causes']:
                report.append("  Root Causes:")
                for cause in diagnosis['root_causes']:
                    report.append(f"    • {cause}")
            report.append("")
            
        # Issues and Warnings
        if self.analysis['issues']:
            report.append("ISSUES AND WARNINGS:")
            issue_counts = defaultdict(int)
            for issue in self.analysis['issues']:
                issue_counts[issue['type']] += 1
            
            for issue_type, count in sorted(issue_counts.items()):
                report.append(f"  {issue_type}: {count} occurrences")
            report.append("")
            
        # Overall Assessment
        report.append("OVERALL ASSESSMENT:")
        if diagnosis['problems'] and 'No observations assimilated' in diagnosis['problems']:
            report.append("  ⚠️  EnKF completed but FAILED to assimilate any observations")
        elif status.get('completed'):
            report.append("  ✅ EnKF completed successfully")
        else:
            report.append("  ❌ EnKF failed to complete")
            
        report.append("")
        report.append("=" * 80)
        
        return '\n'.join(report)
    
    def save_json(self, output_file: str):
        """Save analysis results as JSON"""
        with open(output_file, 'w') as f:
            json.dump(self.analysis, f, indent=2)
            
    def run(self):
        """Run the complete analysis"""
        self.read_file()
        self.analyze()
        return self.generate_report()


def main():
    """Main function"""
    json_file = None
    args = sys.argv[1:]

    if args and not args[0].startswith('--'):
        stdout_file = args.pop(0)
    else:
        script_dir = os.path.dirname(os.path.abspath(__file__))
        stdout_file = os.path.join(script_dir, 'enkf_stdout')

    if '--json' in args:
        try:
            json_idx = args.index('--json')
            json_file = args[json_idx + 1]
        except (IndexError, ValueError):
            print("Usage: python analyze_enkf_stdout.py [<enkf_stdout_file>] [--json output.json]")
            sys.exit(1)

    analyzer = EnKFAnalyzer(stdout_file)
    
    try:
        report = analyzer.run()
        print(report)
        
        if json_file:
            output_data = copy.deepcopy(analyzer.analysis)
            if 'issues' in output_data:
                output_data['issues'] = [
                    issue for issue in output_data['issues']
                    if issue.get('type') != 'warning'
                ]
            with open(json_file, 'w') as f:
                json.dump(output_data, f, indent=2)
            print(f"\nJSON analysis saved to: {json_file}")
            
    except Exception as e:
        print(f"Error analyzing file: {e}", file=sys.stderr)
        sys.exit(1)


if __name__ == "__main__":
    main() 