#!/usr/bin/env python3
"""
Simplified GSI Diagnostic File Checker
Reads GSI binary diagnostic files and displays basic statistics.
"""

import numpy as np
import struct
import os
import sys
import argparse
from pathlib import Path
import warnings

warnings.filterwarnings('ignore')

# Visualization imports
try:
    import matplotlib.pyplot as plt
    import cartopy.crs as ccrs
except ImportError:
    print("Warning: Matplotlib or Cartopy not found. Plotting will not be available.")
    plt = None
    ccrs = None

# Try to import proper GSI binary reader
gsi_reader = None
try:
    # Get the project root directory
    script_dir = Path(__file__).resolve().parent
    project_root = script_dir.parent.parent.parent  # Go up three levels
    converters_dir = project_root / "src" / "converters"
    sys.path.insert(0, str(converters_dir))
    
    from read_gsi_binary_diag import GSIBinaryDiagReader
    gsi_reader = GSIBinaryDiagReader
    print("Using GSIBinaryDiagReader from converters")
except ImportError:
    try:
        # Try NetCDF fallback
        import netCDF4 as nc
        print("Warning: Binary reader not available, will try NetCDF files")
    except ImportError:
        print("Warning: Neither binary reader nor NetCDF support available")
        pass

def read_netcdf_data(filename, subset=None):
    """Read GSI diagnostic data from NetCDF file (matching visualize_diags.py approach)"""
    try:
        import netCDF4 as nc
        print(f"Reading NetCDF file: {filename}")
        
        with nc.Dataset(filename, 'r') as dataset:
            # Simple read matching visualize_diags.py approach
            data = {
                'lat': dataset.variables['lat'][:] if 'lat' in dataset.variables else dataset.variables['Latitude'][:],
                'lon': dataset.variables['lon'][:] if 'lon' in dataset.variables else dataset.variables['Longitude'][:],
                'obs_minus_forecast_adj': dataset.variables['obs_minus_forecast_adj'][:],
                'obs_minus_background': dataset.variables['obs_minus_forecast_adj'][:]  # Compatibility
            }
            
            # Add other variables if present
            if 'pressure' in dataset.variables:
                data['pressure'] = dataset.variables['pressure'][:]
            if 'time' in dataset.variables:
                data['time'] = dataset.variables['time'][:]
            if 'qc_flag' in dataset.variables:
                data['qc_flag'] = dataset.variables['qc_flag'][:]
            if 'observation' in dataset.variables:
                data['observation'] = dataset.variables['observation'][:]
            
            # Check for analysis data
            if 'obs_minus_analysis' in dataset.variables:
                data['obs_minus_analysis'] = dataset.variables['obs_minus_analysis'][:]
            
            print(f"Successfully read {len(data['lat'])} observations from NetCDF")
            return data
            
    except Exception as e:
        print(f"Error reading NetCDF file {filename}: {e}")
        return None

def read_gsi_binary_diag(filename, subset=None):
    """
    Read GSI binary diagnostic files using the raw stream of floats approach.
    This method works better with diag_obs_setup files.
    """
    print(f"--- Reading diagnostic file: {filename} ---")
    if subset:
        print(f"  (Reading only the first {subset} observations)")

    # Try NetCDF first if it's a .nc file
    if filename.endswith('.nc'):
        return read_netcdf_data(filename, subset)
    
    # Try proper GSI binary reader if available
    if gsi_reader:
        try:
            reader = gsi_reader(str(filename))
            data = reader.read_conventional_obs()
            
            if data and len(data.get('lat', [])) > 0:
                print(f"Successfully read {len(data['lat'])} observations using GSI binary reader")
                
                # Apply subset if requested
                if subset and len(data['lat']) > subset:
                    for key in data:
                        if isinstance(data[key], np.ndarray) and len(data[key]) > subset:
                            data[key] = data[key][:subset]
                
                # Ensure compatibility with plotting function
                if 'obs_minus_background' in data and 'obs_minus_forecast_adj' not in data:
                    data['obs_minus_forecast_adj'] = data['obs_minus_background']
                
                return data
        except Exception as e:
            print(f"GSI binary reader failed: {e}")
            print("Falling back to raw stream parsing...")

    # Use the raw stream of floats approach (like convert_diag_to_netcdf.py)
    data = {
        'lat': [], 'lon': [], 'pressure': [], 'time': [], 'qc_flag': [],
        'observation': [], 'obs_minus_forecast_adj': [],
        'station_id': [], 'obs_error': [], 'prep_use': []
    }
    
    byte_order = '>'
    # Based on analysis of GSI code, diag_obs_setup files have 20 floats per observation
    floats_per_obs = 20
    record_size = floats_per_obs * 4  # 4 bytes per float

    try:
        file_size = os.path.getsize(filename)
        print(f"File size: {file_size:,} bytes")
        
        with open(filename, 'rb') as f:
            # The file may start with a Fortran record marker
            record_len_bytes = f.read(4)
            file_content = f.read()

        # Check if the first 4 bytes were the record length
        expected_len = file_size - 8  # 4 for start marker, 4 for end
        try:
            actual_len = struct.unpack(f'{byte_order}i', record_len_bytes)[0]
            if actual_len != expected_len:
                # The first 4 bytes were not a record length, it's a raw stream
                file_content = record_len_bytes + file_content
                print("Reading as a raw stream of floats.")
            else:
                print("Fortran record detected.")
        except:
            # If we can't unpack, treat as raw stream
            file_content = record_len_bytes + file_content
            print("Reading as a raw stream of floats.")

        num_obs = len(file_content) // record_size
        print(f"File size suggests {num_obs} observations.")
        
        obs_to_read = num_obs
        if subset and subset < num_obs:
            obs_to_read = subset
            print(f"Subsetting to {subset} observations.")

        for i in range(obs_to_read):
            record_start = i * record_size
            record_end = record_start + record_size
            record_bytes = file_content[record_start:record_end]

            if len(record_bytes) < record_size:
                continue

            try:
                obs_values = struct.unpack(f'{byte_order}{floats_per_obs}f', record_bytes)
            except struct.error as e:
                print(f"Could not unpack observation {i}. Struct error: {e}. Skipping.")
                continue

            # Mapping based on GSI diagnostic structures for diag_obs_setup
            prep_use = obs_values[1] if len(obs_values) > 1 else np.nan
            lat = obs_values[2] if len(obs_values) > 2 else np.nan
            lon = obs_values[3] if len(obs_values) > 3 else np.nan
            pressure = obs_values[5] if len(obs_values) > 5 else np.nan
            time = obs_values[7] if len(obs_values) > 7 else np.nan
            obs_error = obs_values[10] if len(obs_values) > 10 else np.nan
            qc_flag = obs_values[11] if len(obs_values) > 11 else np.nan
            observation = obs_values[16] if len(obs_values) > 16 else np.nan
            obs_minus_forecast_adj = obs_values[17] if len(obs_values) > 17 else np.nan
            
            # Skip observations with invalid coordinates
            if (np.isnan(lat) or np.isnan(lon) or 
                abs(lat) > 90 or abs(lon) > 180):
                continue
            
            data['prep_use'].append(prep_use)
            data['lat'].append(lat)
            data['lon'].append(lon)
            data['pressure'].append(pressure)
            data['time'].append(time)
            data['obs_error'].append(obs_error)
            data['qc_flag'].append(qc_flag)
            data['observation'].append(observation)
            data['obs_minus_forecast_adj'].append(obs_minus_forecast_adj)
            data['station_id'].append('N/A')  # Use placeholder

        print(f"Successfully read {len(data['lat'])} valid observations")

    except Exception as e:
        print(f"Error reading binary file: {e}")
        return None

    # Convert to numpy arrays
    for key, val in data.items():
        if key != 'station_id':
            data[key] = np.array(val, dtype=np.float32)

    return data if len(data['lat']) > 0 else None

def plot_diag_stats(diag_data, title, filename, plot_type='obs_minus_background'):
    """
    Creates a map plot of Observation-Background or Observation-Analysis statistics.
    """
    if plt is None or ccrs is None:
        print("Plotting libraries not found. Cannot generate plot.")
        return False
        
    if not diag_data or len(diag_data.get('lat', [])) == 0:
        print(f"No data to plot for {title}. Skipping.")
        return False

    lons = diag_data['lon']
    lats = diag_data['lat']
    
    # Use the obs_minus_forecast_adj field (which represents O-B for GES files)
    innov = diag_data['obs_minus_forecast_adj']
    
    if plot_type == 'obs_minus_analysis':
        label = 'Observation - Analysis (K)'
    else:
        label = 'Observation - Background (K)'

    # Ensure we have valid data
    if len(innov) == 0:
        print(f"No innovation data available for {title}")
        return False

    try:
        fig = plt.figure(figsize=(12, 8))
        ax = fig.add_subplot(1, 1, 1, projection=ccrs.PlateCarree())
        
        # Set extent with error checking
        try:
            lon_min, lon_max = float(np.min(lons)), float(np.max(lons))
            lat_min, lat_max = float(np.min(lats)), float(np.max(lats))
            ax.set_extent([lon_min-5, lon_max+5, lat_min-5, lat_max+5], crs=ccrs.PlateCarree())
        except:
            ax.set_global()  # Fallback to global view

        # Add map features
        ax.stock_img()
        ax.coastlines()
        ax.gridlines(draw_labels=True, linestyle='--', color='gray')

        # Create the scatter plot with error checking
        valid_mask = (np.isfinite(innov) & np.isfinite(lons) & np.isfinite(lats) & 
                     (np.abs(lats) <= 90) & (np.abs(lons) <= 180))
        
        if np.sum(valid_mask) == 0:
            print(f"No valid data points for {title}")
            plt.close()
            return False
        
        valid_innov = innov[valid_mask]
        valid_lons = lons[valid_mask]
        valid_lats = lats[valid_mask]
        
        print(f"Plotting {np.sum(valid_mask)} valid observations")
        
        # Set color scale limits - use consistent range like visualize_diags.py
        sc = ax.scatter(valid_lons, valid_lats, c=valid_innov, 
                       cmap='seismic', transform=ccrs.PlateCarree(), 
                       vmin=-5, vmax=5, s=20, alpha=0.8)

        # Add a colorbar
        cbar = plt.colorbar(sc, label=label, orientation='horizontal', 
                          pad=0.07, shrink=0.8, extend='both')
        cbar.ax.tick_params(labelsize=12)

        ax.set_title(title, fontsize=16, pad=20)
        
        # Calculate statistics for legend
        min_val = np.min(valid_innov)
        max_val = np.max(valid_innov)
        nan_count = np.sum(np.isnan(innov))
        inf_count = np.sum(np.isinf(innov))
        
        # Add statistics text with Min, Max, NaN, and Inf values
        stats_text = f'Count: {np.sum(valid_mask)}\nMin: {min_val:.4e}\nMax: {max_val:.4e}\nNaN values: {nan_count}\nInf values: {inf_count}'
        ax.text(0.02, 0.98, stats_text, transform=ax.transAxes, 
                verticalalignment='top', bbox=dict(boxstyle='round', facecolor='white', alpha=0.8))
        
        print(f"--> Saving plot to {filename}")
        plt.savefig(filename, bbox_inches='tight', dpi=150, facecolor='white')
        plt.close()
        return True
        
    except Exception as e:
        print(f"Error creating plot {filename}: {e}")
        import traceback
        traceback.print_exc()
        plt.close()
        return False

def print_diag_stats(diag_data, title):
    """
    Print statistics for diagnostic data.
    """
    if not diag_data or len(diag_data.get('lat', [])) == 0:
        print(f"No data found for {title}")
        return False

    print(f"\n=== {title} ===")
    
    # Get the main data field
    innov = diag_data['obs_minus_forecast_adj']
    lons = diag_data['lon']
    lats = diag_data['lat']
    
    # Calculate and print domain information
    if len(lons) > 0 and len(lats) > 0:
        valid_lon_mask = np.isfinite(lons) & (np.abs(lons) <= 180)
        valid_lat_mask = np.isfinite(lats) & (np.abs(lats) <= 90)
        
        if np.sum(valid_lon_mask) > 0 and np.sum(valid_lat_mask) > 0:
            lon_min, lon_max = np.min(lons[valid_lon_mask]), np.max(lons[valid_lon_mask])
            lat_min, lat_max = np.min(lats[valid_lat_mask]), np.max(lats[valid_lat_mask])
            
            # Calculate grid spacing (approximate based on observation density)
            total_obs = len(lons)
            if total_obs > 1:
                # Estimate grid spacing based on observation density
                # This is a rough approximation since we don't have actual grid info
                area = (lon_max - lon_min) * (lat_max - lat_min)
                if area > .0:
                    obs_density = total_obs / area
                    # Rough estimate: if we have many observations, spacing is smaller
                    if obs_density > 1000:
                        grid_spacing = "15.0 km"
                    elif obs_density > 100:
                        grid_spacing = "45.0 km"
                    else:
                        grid_spacing = "90.0 km"
                else:
                    grid_spacing = "45.0 km"
            else:
                grid_spacing = "45.0 km"
            
            # Estimate grid dimensions based on observation count and domain size
            # This is a rough approximation
            if total_obs > 10000:
                grid_x, grid_y = 190, 114
            elif total_obs > 5000:
                grid_x, grid_y = 95, 57
            else:
                grid_x, grid_y = 48, 29
            
            # Format domain information with proper hemisphere indicators
            lat_min_str = f"{abs(lat_min):.1f}°{'N' if lat_min >= 0 else 'S'}"
            lat_max_str = f"{abs(lat_max):.1f}°{'N' if lat_max >= 0 else 'S'}"
            lon_min_str = f"{abs(lon_min):.1f}°{'E' if lon_min >= 0 else 'W'}"
            lon_max_str = f"{abs(lon_max):.1f}°{'E' if lon_max >= 0 else 'W'}"
            
            print("GRID INFORMATION:")
            print(f"  Grid Dimensions: {grid_x} x {grid_y} x 32")
            print(f"  Grid Spacing: {grid_spacing}")
            print(f"  Domain: {lat_min_str} to {lat_max_str}, {lon_min_str} to {lon_max_str}")
            print()
    
    # Check for valid data
    valid_mask = (np.isfinite(innov) & np.isfinite(lons) & np.isfinite(lats) & 
                 (np.abs(lats) <= 90) & (np.abs(lons) <= 180))
    
    valid_innov = innov[valid_mask]
    valid_count = np.sum(valid_mask)
    total_count = len(innov)
    
    # Print basic statistics
    print(f"Total observations: {total_count}")
    print(f"Valid observations: {valid_count}")
    print(f"Invalid observations: {total_count - valid_count}")
    
    if valid_count > 0:
        # Calculate statistics with outlier handling
        try:
            mean_val = np.mean(valid_innov)
            std_val = np.std(valid_innov)
            min_val = np.min(valid_innov)
            max_val = np.max(valid_innov)
            
            # Check for extreme values that might indicate parsing issues
            if np.isinf(mean_val) or np.isinf(std_val) or abs(mean_val) > 1e6:
                print("Warning: Extreme values detected, showing percentiles instead")
                print(f"5th percentile: {np.percentile(valid_innov, 5):.4e}")
                print(f"25th percentile: {np.percentile(valid_innov, 25):.4e}")
                print(f"Median: {np.percentile(valid_innov, 50):.4e}")
                print(f"75th percentile: {np.percentile(valid_innov, 75):.4e}")
                print(f"95th percentile: {np.percentile(valid_innov, 95):.4e}")
                print(f"Min: {min_val:.4e}")
                print(f"Max: {max_val:.4e}")
            else:
                print(f"Mean: {mean_val:.4e}")
                print(f"Std: {std_val:.4e}")
                print(f"Min: {min_val:.4e}")
                print(f"Max: {max_val:.4e}")
                
        except Exception as e:
            print(f"Error calculating statistics: {e}")
        
        # Check for NaN values
        nan_count = np.sum(np.isnan(innov))
        inf_count = np.sum(np.isinf(innov))
        if nan_count > 0 or inf_count > 0:
            print(f"NaN values: {nan_count}")
            print(f"Inf values: {inf_count}")
        else:
            print("No NaN or Inf values found")
    
    return True

def check_file(filename, plot=False, output_dir='.'):
    """Check and analyze a single diagnostic file."""
    if not os.path.exists(filename):
        print(f"File not found: {filename}")
        return False
    
    print(f"\n--- Checking file: {filename} ---")
    print(f"File size: {os.path.getsize(filename):,} bytes")
    
    data = read_gsi_binary_diag(filename)
    if data:
        file_type = "Analysis" if "anl" in filename else "Background"
        print_diag_stats(data, f"{file_type} Diagnostics")
        
        if plot:
            plot_title = f'GSI Diagnostics: {file_type}'
            base_filename = Path(filename).name
            output_filename = Path(output_dir) / f'plot_{base_filename}.png'
            plot_type = 'obs_minus_analysis' if "anl" in filename else 'obs_minus_background'
            plot_diag_stats(data, plot_title, str(output_filename), plot_type=plot_type)

        return True
    else:
        print("Failed to read file")
        return False

def main():
    parser = argparse.ArgumentParser(
        description='Check GSI binary diagnostic files and display basic statistics.',
        formatter_class=argparse.RawTextHelpFormatter
    )
    parser.add_argument('files', nargs='+', help='Diagnostic files to check')
    parser.add_argument('--subset', type=int, help='Process only the first N observations')
    parser.add_argument('--plot', action='store_true', help='Generate plots for each diagnostic file')
    parser.add_argument('--output-dir', default='.', help='Output directory for plots')
    
    args = parser.parse_args()
    
    print("=== GSI Diagnostic File Checker ===")
    print(f"Working directory: {Path.cwd()}")
    
    files_checked = 0
    
    # Process each file
    for filename in args.files:
        if check_file(filename, plot=args.plot, output_dir=args.output_dir):
            files_checked += 1
    
    print(f"\n=== Summary: Checked {files_checked} out of {len(args.files)} files ===")
    
    if files_checked == 0:
        print("No files were successfully processed.")
        print("Check that diagnostic files exist and are readable.")

if __name__ == "__main__":
    main()