"""
    EnsembleObservations

EnKF-specific observation processing for GSI data assimilation system.
This module handles observation operations unique to ensemble methods, including
ensemble-based observation perturbations, innovation statistics computation, 
and bias correction updates. Based on GSI/EnKF Fortran implementations from
readconvobs.f90, readsatobs.f90, and readozobs.f90.

Key Differences from Variational Methods:
1. **Observation Perturbations**: EnKF requires perturbed observations for filter consistency
2. **Ensemble Statistics**: Innovation statistics computed from ensemble forecasts
3. **Bias Correction**: Ensemble-based bias coefficient updates
4. **Quality Control**: Different QC criteria for ensemble vs variational methods
5. **Multi-platform Support**: Integrated handling of conventional, satellite, and ozone observations

Mathematical Framework:
For EnKF consistency, observations are perturbed:
```
y_i = y + ε_i,  where ε_i ~ N(0, R)
```

Innovation statistics are computed as:
```
d_i = y - H(x_f^i)  (individual innovations)
d̄ = (1/N) Σ d_i    (ensemble mean innovation)  
S = (1/(N-1)) Σ (d_i - d̄)(d_i - d̄)ᵀ  (innovation covariance)
```

Bias correction update:
```
β^{new} = β^{old} + α * (y - H(x̄_f) - β^{old})
```
"""
module EnsembleObservations

using LinearAlgebra
using Statistics
using Random
using Distributions
using HDF5
using NetCDF
using ..GSICoreAnalysis: AbstractAnalysisConfig, AbstractStateVector
using ..StateVectors: StateVector
using ..ObservationOperators: ObservationData, apply_observation_operator

export EnKFObservationConfig, EnKFObservationData, ObservationPlatform
export InnovationStatistics, BiasCorrection, QualityControl
export process_enkf_observations, perturb_observations!, compute_innovation_stats
export update_bias_correction!, enkf_quality_control!, read_diag_files
export write_enkf_diagnostics, observation_operator_ensemble
export CONVENTIONAL, SATELLITE, OZONE, GPS, RADAR

# Observation platform types
@enum ObservationPlatform begin
    CONVENTIONAL = 1
    SATELLITE = 2  
    OZONE = 3
    GPS = 4
    RADAR = 5
end

"""
    EnKFObservationConfig{T<:AbstractFloat}

Configuration for EnKF observation processing.
"""
struct EnKFObservationConfig{T<:AbstractFloat}
    # Data paths and formats
    obs_path::String              # Path to observation files
    date_string::String           # Analysis date/time string
    netcdf_diag::Bool            # Use NetCDF format (vs binary)
    
    # Perturbation settings
    perturb_observations::Bool    # Enable observation perturbations
    perturbation_seed::Int       # Random seed for reproducibility
    inflation_factor::T          # Observation error inflation factor
    
    # Platform-specific settings
    platforms::Vector{ObservationPlatform}  # Observation platforms to process
    platform_weights::Dict{ObservationPlatform, T}  # Platform reliability weights
    
    # Quality control parameters
    error_limit_factor::T        # Error limit as multiple of prescribed error
    pressure_limit_low::T        # Lower pressure limit (Pa)
    pressure_limit_high::T       # Upper pressure limit (Pa)
    gross_error_threshold::T     # Gross error detection threshold
    
    # Innovation statistics
    compute_innovation_stats::Bool  # Calculate innovation statistics
    save_innovation_diag::Bool     # Save innovation diagnostics
    
    # Bias correction
    enable_bias_correction::Bool   # Enable bias correction updates
    bias_update_rate::T           # Bias correction learning rate
    bias_predictor_order::Int     # Order of bias predictor polynomials
    
    # Parallel processing
    nprocs::Int                   # Number of MPI processes
    proc_id::Int                  # Current process ID
end

"""
    EnKFObservationData{T<:AbstractFloat}

EnKF-specific observation data structure.
"""
struct EnKFObservationData{T<:AbstractFloat}
    # Basic observation information
    nobs::Int                           # Total number of observations
    obs_values::Vector{T}              # Observation values
    obs_errors::Vector{T}              # Observation error standard deviations
    obs_locations::Matrix{T}           # Observation locations (lat, lon, pressure)
    obs_times::Vector{T}               # Observation times
    obs_types::Vector{String}          # Observation type identifiers
    obs_platforms::Vector{ObservationPlatform}  # Platform types
    
    # Quality control flags
    qc_flags::Vector{Int}              # Quality control status
    gross_error_flags::Vector{Bool}    # Gross error detection flags
    
    # Ensemble-specific data
    ensemble_forecasts::Matrix{T}      # H(x_f^i) for each member (nobs × nmembers)
    perturbed_obs::Matrix{T}          # Perturbed observations (nobs × nmembers)
    innovation_matrix::Matrix{T}       # Innovation matrix (nobs × nmembers)
    
    # Bias correction information
    bias_predictors::Matrix{T}         # Bias predictor variables (nobs × npred)
    bias_coefficients::Vector{T}       # Current bias correction coefficients
    
    # Metadata
    platform_counts::Dict{ObservationPlatform, Int}  # Observations per platform
    rejection_counts::Dict{String, Int}               # QC rejection statistics
end

"""
    InnovationStatistics{T<:AbstractFloat}

Innovation statistics for ensemble diagnostics.
"""
struct InnovationStatistics{T<:AbstractFloat}
    # Basic statistics
    mean_innovation::Vector{T}         # Mean innovation by observation type
    innovation_variance::Vector{T}     # Innovation variance by observation type
    innovation_covariance::Matrix{T}   # Full innovation covariance matrix
    
    # Ensemble spread statistics  
    ensemble_spread::Vector{T}         # Ensemble spread in observation space
    spread_skill_ratio::Vector{T}      # Spread-skill relationship
    
    # Regional statistics
    innovation_nh::Vector{T}           # Northern hemisphere innovations
    innovation_sh::Vector{T}           # Southern hemisphere innovations
    innovation_tr::Vector{T}           # Tropical innovations
    
    # Platform-specific statistics
    platform_statistics::Dict{ObservationPlatform, Dict{String, T}}
    
    # Diagnostic metrics
    chi_squared::Vector{T}             # Chi-squared statistic by type
    degrees_of_freedom::Vector{Int}    # DOF for chi-squared test
    consistency_ratio::Vector{T}       # Observation-forecast consistency
end

"""
    BiasCorrection{T<:AbstractFloat}

Bias correction system for EnKF observations.
"""
struct BiasCorrection{T<:AbstractFloat}
    # Bias coefficients by observation type and platform
    coefficients::Dict{Tuple{String, ObservationPlatform}, Vector{T}}
    
    # Predictor information
    predictor_names::Vector{String}    # Names of predictor variables
    predictor_scaling::Vector{T}       # Scaling factors for predictors
    
    # Update statistics
    update_counts::Dict{String, Int}   # Number of updates per type
    update_history::Vector{T}         # History of bias updates
    
    # Quality metrics
    bias_reduction::Dict{String, T}    # Bias reduction by observation type
    prediction_skill::Dict{String, T}  # Bias correction prediction skill
end

"""
    QualityControl{T<:AbstractFloat}

Quality control results and statistics.
"""
struct QualityControl{T<:AbstractFloat}
    # Rejection flags and counts
    rejected_obs::Vector{Bool}         # Rejected observation flags
    rejection_reasons::Vector{String}  # Reason codes for rejections
    
    # Error statistics
    normalized_departures::Vector{T}   # Normalized O-F departures
    posterior_probabilities::Vector{T} # Bayesian QC probabilities
    
    # Platform-specific QC
    platform_qc::Dict{ObservationPlatform, Vector{Bool}}
    
    # Gross error detection
    gross_error_probability::Vector{T} # Probability of gross error
    outlier_threshold::T              # Current outlier detection threshold
end

"""
    process_enkf_observations(config, ensemble_forecasts, obs_data_files)

Main entry point for EnKF observation processing.

# Arguments
- `config`: EnKFObservationConfig
- `ensemble_forecasts`: Vector of StateVector ensemble members
- `obs_data_files`: List of observation data files

# Returns
- `EnKFObservationData`: Processed observation data
- `InnovationStatistics`: Innovation diagnostics
- `BiasCorrection`: Updated bias correction
- `QualityControl`: Quality control results
"""
function process_enkf_observations(
    config::EnKFObservationConfig{T},
    ensemble_forecasts::Vector{StateVector{T}},
    obs_data_files::Vector{String}
) where T<:AbstractFloat
    
    println("Processing EnKF observations...")
    println("  Date: $(config.date_string)")
    println("  Platforms: $(config.platforms)")
    println("  Files: $(length(obs_data_files))")
    
    # Initialize containers
    all_obs_data = EnKFObservationData{T}[]
    
    # Process each observation file/platform
    for (platform, obs_file) in zip(config.platforms, obs_data_files)
        println("  Processing $(platform) observations from $(obs_file)")
        
        # Read observation data
        if config.netcdf_diag
            obs_data = read_netcdf_observations(obs_file, platform, config)
        else
            obs_data = read_binary_observations(obs_file, platform, config)
        end
        
        # Apply observation operator to ensemble
        ensemble_obs = observation_operator_ensemble(obs_data, ensemble_forecasts, platform)
        
        # Perturb observations for EnKF consistency
        if config.perturb_observations
            perturbed_obs = perturb_observations!(obs_data, ensemble_obs, config)
        else
            perturbed_obs = repeat(obs_data.obs_values, 1, length(ensemble_forecasts))
        end
        
        # Quality control
        qc_results = enkf_quality_control!(obs_data, ensemble_obs, config)
        
        # Compute innovations
        innovations = compute_innovations(obs_data, ensemble_obs, perturbed_obs)
        
        # Create processed observation data
        processed_obs = EnKFObservationData(
            length(obs_data.obs_values),
            obs_data.obs_values,
            obs_data.obs_errors,
            obs_data.obs_locations,
            obs_data.obs_times,
            obs_data.obs_types,
            fill(platform, length(obs_data.obs_values)),
            qc_results.rejected_obs,
            qc_results.gross_error_probability .> 0.5,
            ensemble_obs,
            perturbed_obs,
            innovations,
            obs_data.bias_predictors,
            obs_data.bias_coefficients,
            Dict(platform => length(obs_data.obs_values)),
            Dict("total" => count(qc_results.rejected_obs))
        )
        
        push!(all_obs_data, processed_obs)
    end
    
    # Combine observations from all platforms
    combined_obs = combine_observation_data(all_obs_data)
    
    # Compute innovation statistics
    if config.compute_innovation_stats
        innovation_stats = compute_innovation_stats(combined_obs, config)
    else
        innovation_stats = nothing
    end
    
    # Update bias correction
    bias_correction = nothing
    if config.enable_bias_correction
        bias_correction = update_bias_correction!(combined_obs, config)
    end
    
    # Final quality control results
    final_qc = aggregate_quality_control(all_obs_data)
    
    return combined_obs, innovation_stats, bias_correction, final_qc
end

"""
    read_netcdf_observations(filename, platform, config)

Read observations from NetCDF diagnostic files.
"""
function read_netcdf_observations(
    filename::String,
    platform::ObservationPlatform,
    config::EnKFObservationConfig{T}
) where T<:AbstractFloat
    
    # Platform-specific observation type mappings
    obs_type_map = Dict(
        CONVENTIONAL => ["t", "q", "ps", "uv", "gps", "spd", "pw", "dw", "rw"],
        SATELLITE => ["amsua", "amsub", "mhs", "hirs", "airs", "iasi"],
        OZONE => ["omi", "sbuv", "gome", "ozonesonde"],
        GPS => ["gpsro", "gpsbend"],
        RADAR => ["rw", "dw"]
    )
    
    obs_types = get(obs_type_map, platform, ["unknown"])
    
    # Read NetCDF data
    ncid = NetCDF.open(filename, mode=NC_NOWRITE)
    
    try
        # Read basic observation data
        obs_vals = NetCDF.readvar(ncid, "obs", Float64)
        obs_errs = NetCDF.readvar(ncid, "oberr", Float64)
        
        # Location information
        lats = NetCDF.readvar(ncid, "lat", Float64)
        lons = NetCDF.readvar(ncid, "lon", Float64)
        pressures = NetCDF.readvar(ncid, "pres", Float64)
        
        # Time information (convert to analysis time relative)
        obs_times = NetCDF.readvar(ncid, "time", Float64)
        
        # Quality control information
        qc_flags = NetCDF.readvar(ncid, "qcflag", Int32)
        
        nobs = length(obs_vals)
        locations = Matrix{T}(undef, nobs, 3)
        locations[:, 1] = T.(lats)
        locations[:, 2] = T.(lons)  
        locations[:, 3] = T.(pressures)
        
        # Initialize bias predictors (simplified)
        bias_predictors = zeros(T, nobs, config.bias_predictor_order)
        if config.bias_predictor_order > 0
            # Polynomial predictors based on location
            for i in 1:nobs
                lat_norm = lats[i] / 90.0  # Normalize latitude
                for order in 1:config.bias_predictor_order
                    bias_predictors[i, order] = lat_norm^order
                end
            end
        end
        
        # Initialize bias coefficients
        bias_coefficients = zeros(T, config.bias_predictor_order)
        
        return (
            obs_values = T.(obs_vals),
            obs_errors = T.(obs_errs),
            obs_locations = locations,
            obs_times = T.(obs_times),
            obs_types = fill(string(platform), nobs),
            qc_flags = Vector{Int}(qc_flags),
            bias_predictors = bias_predictors,
            bias_coefficients = bias_coefficients
        )
        
    finally
        NetCDF.close(ncid)
    end
end

"""
    read_binary_observations(filename, platform, config)

Read observations from binary diagnostic files.
"""
function read_binary_observations(
    filename::String,
    platform::ObservationPlatform,
    config::EnKFObservationConfig{T}
) where T<:AbstractFloat
    
    # Implementation for binary file reading
    # This is a simplified version - actual implementation would need
    # to handle GSI binary format specifics
    
    println("  Reading binary observation file: $filename")
    
    # For now, create synthetic data structure
    # In practice, this would read the binary GSI diagnostic format
    nobs = 1000  # Placeholder
    
    locations = randn(T, nobs, 3)  # lat, lon, pressure
    locations[:, 1] .*= 90    # latitude range
    locations[:, 2] .*= 180   # longitude range  
    locations[:, 3] = abs.(locations[:, 3]) .* 50000 .+ 50000  # pressure range
    
    return (
        obs_values = randn(T, nobs),
        obs_errors = abs.(randn(T, nobs)) .+ T(0.1),
        obs_locations = locations,
        obs_times = zeros(T, nobs),
        obs_types = fill(string(platform), nobs),
        qc_flags = zeros(Int, nobs),
        bias_predictors = zeros(T, nobs, config.bias_predictor_order),
        bias_coefficients = zeros(T, config.bias_predictor_order)
    )
end

"""
    observation_operator_ensemble(obs_data, ensemble_forecasts, platform)

Apply observation operator to ensemble forecasts.
"""
function observation_operator_ensemble(
    obs_data::NamedTuple,
    ensemble_forecasts::Vector{StateVector{T}},
    platform::ObservationPlatform
) where T<:AbstractFloat
    
    nobs = length(obs_data.obs_values)
    nmembers = length(ensemble_forecasts)
    ensemble_obs = Matrix{T}(undef, nobs, nmembers)
    
    # Apply observation operator to each ensemble member
    for (imem, forecast) in enumerate(ensemble_forecasts)
        obs_equivalent = apply_observation_operator(forecast, obs_data.obs_locations, platform)
        ensemble_obs[:, imem] = obs_equivalent
    end
    
    return ensemble_obs
end

"""
    perturb_observations!(obs_data, ensemble_obs, config)

Generate perturbed observations for EnKF consistency.
"""
function perturb_observations!(
    obs_data::NamedTuple,
    ensemble_obs::Matrix{T},
    config::EnKFObservationConfig{T}
) where T<:AbstractFloat
    
    nobs, nmembers = size(ensemble_obs)
    perturbed_obs = Matrix{T}(undef, nobs, nmembers)
    
    # Set random seed for reproducibility
    Random.seed!(config.perturbation_seed)
    
    # Generate independent perturbations for each ensemble member
    for imem in 1:nmembers
        for iobs in 1:nobs
            obs_error = obs_data.obs_errors[iobs] * config.inflation_factor
            perturbation = randn(T) * obs_error
            perturbed_obs[iobs, imem] = obs_data.obs_values[iobs] + perturbation
        end
    end
    
    return perturbed_obs
end

"""
    compute_innovations(obs_data, ensemble_obs, perturbed_obs)

Compute innovation matrix (O - H(x)) for ensemble.
"""
function compute_innovations(
    obs_data::NamedTuple,
    ensemble_obs::Matrix{T},
    perturbed_obs::Matrix{T}
) where T<:AbstractFloat
    
    # Innovation = Perturbed Observation - Ensemble Forecast
    return perturbed_obs - ensemble_obs
end

"""
    compute_innovation_stats(obs_data, config)

Compute comprehensive innovation statistics.
"""
function compute_innovation_stats(
    obs_data::EnKFObservationData{T},
    config::EnKFObservationConfig{T}
) where T<:AbstractFloat
    
    nobs, nmembers = size(obs_data.innovation_matrix)
    
    # Basic innovation statistics
    mean_innovation = vec(mean(obs_data.innovation_matrix, dims=2))
    innovation_variance = vec(var(obs_data.innovation_matrix, dims=2))
    
    # Innovation covariance matrix (expensive for large problems)
    innovation_covariance = cov(obs_data.innovation_matrix')
    
    # Ensemble spread in observation space
    ensemble_spread = vec(std(obs_data.ensemble_forecasts, dims=2))
    
    # Spread-skill ratio
    innovation_std = sqrt.(innovation_variance)
    spread_skill_ratio = ensemble_spread ./ (innovation_std .+ 1e-10)
    
    # Regional statistics (simplified)
    nh_indices = obs_data.obs_locations[:, 1] .>= 20.0  # Northern Hemisphere
    sh_indices = obs_data.obs_locations[:, 1] .<= -20.0  # Southern Hemisphere  
    tr_indices = .!(nh_indices .|| sh_indices)  # Tropics
    
    innovation_nh = nh_indices ? mean_innovation[nh_indices] : T[]
    innovation_sh = sh_indices ? mean_innovation[sh_indices] : T[]
    innovation_tr = tr_indices ? mean_innovation[tr_indices] : T[]
    
    # Platform-specific statistics
    platform_stats = Dict{ObservationPlatform, Dict{String, T}}()
    for platform in unique(obs_data.obs_platforms)
        platform_indices = obs_data.obs_platforms .== platform
        if any(platform_indices)
            platform_innovations = mean_innovation[platform_indices]
            platform_stats[platform] = Dict(
                "mean" => mean(platform_innovations),
                "std" => std(platform_innovations),
                "count" => count(platform_indices)
            )
        end
    end
    
    # Chi-squared diagnostic
    chi_squared = mean_innovation.^2 ./ (innovation_variance .+ 1e-10)
    degrees_of_freedom = fill(nmembers - 1, nobs)
    
    # Consistency ratio (ensemble spread vs innovation spread)
    consistency_ratio = ensemble_spread.^2 ./ (innovation_variance .+ 1e-10)
    
    return InnovationStatistics(
        mean_innovation,
        innovation_variance,
        innovation_covariance,
        ensemble_spread,
        spread_skill_ratio,
        innovation_nh,
        innovation_sh, 
        innovation_tr,
        platform_stats,
        chi_squared,
        degrees_of_freedom,
        consistency_ratio
    )
end

"""
    enkf_quality_control!(obs_data, ensemble_obs, config)

Apply EnKF-specific quality control procedures.
"""
function enkf_quality_control!(
    obs_data::NamedTuple,
    ensemble_obs::Matrix{T},
    config::EnKFObservationConfig{T}
) where T<:AbstractFloat
    
    nobs = length(obs_data.obs_values)
    rejected_obs = fill(false, nobs)
    rejection_reasons = fill("", nobs)
    normalized_departures = zeros(T, nobs)
    posterior_probabilities = fill(T(1.0), nobs)
    gross_error_probability = zeros(T, nobs)
    
    # Ensemble mean forecast
    ensemble_mean = vec(mean(ensemble_obs, dims=2))
    ensemble_std = vec(std(ensemble_obs, dims=2))
    
    for iobs in 1:nobs
        # Normalized departure (O-F)/σ_o
        departure = obs_data.obs_values[iobs] - ensemble_mean[iobs]
        obs_error = obs_data.obs_errors[iobs]
        normalized_departures[iobs] = departure / obs_error
        
        # Gross error detection using ensemble spread
        total_error = sqrt(obs_error^2 + ensemble_std[iobs]^2)
        normalized_departure_total = abs(departure) / total_error
        
        # Probability of gross error (simple Bayesian approach)
        if normalized_departure_total > config.gross_error_threshold
            gross_error_probability[iobs] = min(normalized_departure_total / 10, T(0.9))
        end
        
        # Apply various QC checks
        qc_failed = false
        reason = ""
        
        # 1. Pressure bounds check
        pressure = obs_data.obs_locations[iobs, 3]
        if pressure < config.pressure_limit_low || pressure > config.pressure_limit_high
            qc_failed = true
            reason = "pressure_bounds"
        end
        
        # 2. Error limit check
        if obs_error > config.error_limit_factor * median(obs_data.obs_errors)
            qc_failed = true
            reason = "large_error"
        end
        
        # 3. Gross error check
        if gross_error_probability[iobs] > 0.5
            qc_failed = true
            reason = "gross_error"
        end
        
        # 4. Pre-existing QC flag
        if obs_data.qc_flags[iobs] != 0
            qc_failed = true
            reason = "preqc_flag"
        end
        
        rejected_obs[iobs] = qc_failed
        rejection_reasons[iobs] = reason
        
        # Posterior probability (1 - rejection probability)
        posterior_probabilities[iobs] = qc_failed ? T(0.0) : T(1.0) - gross_error_probability[iobs]
    end
    
    # Platform-specific QC results
    platform_qc = Dict{ObservationPlatform, Vector{Bool}}()
    platform_qc[CONVENTIONAL] = rejected_obs  # Simplified
    
    return QualityControl(
        rejected_obs,
        rejection_reasons,
        normalized_departures,
        posterior_probabilities,
        platform_qc,
        gross_error_probability,
        config.gross_error_threshold
    )
end

"""
    update_bias_correction!(obs_data, config)

Update bias correction coefficients using ensemble statistics.
"""
function update_bias_correction!(
    obs_data::EnKFObservationData{T},
    config::EnKFObservationConfig{T}
) where T<:AbstractFloat
    
    # Initialize bias correction structure
    coefficients = Dict{Tuple{String, ObservationPlatform}, Vector{T}}()
    predictor_names = ["constant", "latitude", "latitude^2"]
    predictor_scaling = ones(T, length(predictor_names))
    update_counts = Dict{String, Int}()
    update_history = T[]
    bias_reduction = Dict{String, T}()
    prediction_skill = Dict{String, T}()
    
    # Update bias coefficients for each observation type/platform combination
    unique_types = unique(obs_data.obs_types)
    unique_platforms = unique(obs_data.obs_platforms)
    
    for obs_type in unique_types
        for platform in unique_platforms
            key = (obs_type, platform)
            
            # Find observations of this type/platform
            type_indices = (obs_data.obs_types .== obs_type) .& 
                          (obs_data.obs_platforms .== platform) .&
                          .!obs_data.qc_flags
            
            if count(type_indices) < 10  # Need minimum observations
                continue
            end
            
            # Extract relevant data
            innovations = obs_data.innovation_matrix[type_indices, :]
            predictors = obs_data.bias_predictors[type_indices, :]
            old_coeffs = obs_data.bias_coefficients
            
            # Ensemble mean innovation
            mean_innovation = vec(mean(innovations, dims=2))
            
            # Update bias coefficients using least squares
            if size(predictors, 2) > 0 && size(predictors, 1) > size(predictors, 2)
                try
                    # Weighted least squares update
                    weights = 1 ./ (obs_data.obs_errors[type_indices].^2 .+ 1e-10)
                    W = Diagonal(weights)
                    
                    # Solve weighted normal equations
                    XTW = predictors' * W
                    XTWX = XTW * predictors
                    XTWy = XTW * mean_innovation
                    
                    # Regularized solution (add small diagonal for stability)
                    λ = T(1e-6) * tr(XTWX) / size(XTWX, 1)
                    new_coeffs = (XTWX + λ * I) \ XTWy
                    
                    # Apply learning rate
                    α = config.bias_update_rate
                    updated_coeffs = (1 - α) * old_coeffs + α * new_coeffs
                    
                    coefficients[key] = updated_coeffs
                    update_counts[string(key)] = count(type_indices)
                    
                    # Compute bias reduction
                    old_bias = predictors * old_coeffs
                    new_bias = predictors * updated_coeffs
                    old_rms = sqrt(mean((mean_innovation - old_bias).^2))
                    new_rms = sqrt(mean((mean_innovation - new_bias).^2))
                    bias_reduction[string(key)] = max(0, (old_rms - new_rms) / old_rms)
                    
                    push!(update_history, norm(updated_coeffs - old_coeffs))
                    
                catch err
                    # Handle numerical issues gracefully
                    coefficients[key] = old_coeffs
                    update_counts[string(key)] = 0
                    bias_reduction[string(key)] = T(0)
                end
            else
                coefficients[key] = old_coeffs
            end
        end
    end
    
    return BiasCorrection(
        coefficients,
        predictor_names,
        predictor_scaling,
        update_counts,
        update_history,
        bias_reduction,
        prediction_skill
    )
end

"""
    combine_observation_data(obs_data_list)

Combine observation data from multiple platforms.
"""
function combine_observation_data(obs_data_list::Vector{EnKFObservationData{T}}) where T<:AbstractFloat
    if isempty(obs_data_list)
        error("No observation data to combine")
    end
    
    if length(obs_data_list) == 1
        return obs_data_list[1]
    end
    
    # Concatenate all fields
    nobs_total = sum(data.nobs for data in obs_data_list)
    nmembers = size(obs_data_list[1].ensemble_forecasts, 2)
    
    # Initialize combined arrays
    combined_obs_values = Vector{T}(undef, nobs_total)
    combined_obs_errors = Vector{T}(undef, nobs_total)
    combined_locations = Matrix{T}(undef, nobs_total, 3)
    combined_times = Vector{T}(undef, nobs_total)
    combined_types = Vector{String}(undef, nobs_total)
    combined_platforms = Vector{ObservationPlatform}(undef, nobs_total)
    combined_qc_flags = Vector{Int}(undef, nobs_total)
    combined_gross_error = Vector{Bool}(undef, nobs_total)
    combined_ensemble_forecasts = Matrix{T}(undef, nobs_total, nmembers)
    combined_perturbed_obs = Matrix{T}(undef, nobs_total, nmembers)
    combined_innovations = Matrix{T}(undef, nobs_total, nmembers)
    
    # Copy data from each platform
    offset = 0
    combined_platform_counts = Dict{ObservationPlatform, Int}()
    combined_rejection_counts = Dict{String, Int}()
    
    for obs_data in obs_data_list
        indices = (offset + 1):(offset + obs_data.nobs)
        
        combined_obs_values[indices] = obs_data.obs_values
        combined_obs_errors[indices] = obs_data.obs_errors
        combined_locations[indices, :] = obs_data.obs_locations
        combined_times[indices] = obs_data.obs_times
        combined_types[indices] = obs_data.obs_types
        combined_platforms[indices] = obs_data.obs_platforms
        combined_qc_flags[indices] = obs_data.qc_flags
        combined_gross_error[indices] = obs_data.gross_error_flags
        combined_ensemble_forecasts[indices, :] = obs_data.ensemble_forecasts
        combined_perturbed_obs[indices, :] = obs_data.perturbed_obs
        combined_innovations[indices, :] = obs_data.innovation_matrix
        
        # Merge dictionaries
        merge!(+, combined_platform_counts, obs_data.platform_counts)
        merge!(+, combined_rejection_counts, obs_data.rejection_counts)
        
        offset += obs_data.nobs
    end
    
    # Create combined bias predictors (simplified - use first dataset's structure)
    first_data = obs_data_list[1]
    npred = size(first_data.bias_predictors, 2)
    combined_bias_predictors = zeros(T, nobs_total, npred)
    combined_bias_coefficients = copy(first_data.bias_coefficients)
    
    return EnKFObservationData(
        nobs_total,
        combined_obs_values,
        combined_obs_errors,
        combined_locations,
        combined_times,
        combined_types,
        combined_platforms,
        combined_qc_flags,
        combined_gross_error,
        combined_ensemble_forecasts,
        combined_perturbed_obs,
        combined_innovations,
        combined_bias_predictors,
        combined_bias_coefficients,
        combined_platform_counts,
        combined_rejection_counts
    )
end

"""
    aggregate_quality_control(obs_data_list)

Aggregate quality control results from multiple platforms.
"""
function aggregate_quality_control(obs_data_list::Vector{EnKFObservationData{T}}) where T<:AbstractFloat
    nobs_total = sum(data.nobs for data in obs_data_list)
    
    rejected_obs = Vector{Bool}(undef, nobs_total)
    rejection_reasons = Vector{String}(undef, nobs_total)
    normalized_departures = Vector{T}(undef, nobs_total)
    posterior_probabilities = Vector{T}(undef, nobs_total)
    gross_error_probability = Vector{T}(undef, nobs_total)
    
    # Aggregate platform-specific QC
    platform_qc = Dict{ObservationPlatform, Vector{Bool}}()
    
    offset = 0
    for obs_data in obs_data_list
        indices = (offset + 1):(offset + obs_data.nobs)
        
        rejected_obs[indices] = obs_data.qc_flags
        rejection_reasons[indices] = fill("", obs_data.nobs)  # Simplified
        normalized_departures[indices] = zeros(T, obs_data.nobs)  # Would compute from data
        posterior_probabilities[indices] = ones(T, obs_data.nobs)  # Simplified
        gross_error_probability[indices] = T.(obs_data.gross_error_flags)
        
        # Add to platform QC
        for platform in unique(obs_data.obs_platforms)
            if haskey(platform_qc, platform)
                append!(platform_qc[platform], obs_data.qc_flags[obs_data.obs_platforms .== platform])
            else
                platform_qc[platform] = obs_data.qc_flags[obs_data.obs_platforms .== platform]
            end
        end
        
        offset += obs_data.nobs
    end
    
    return QualityControl(
        rejected_obs,
        rejection_reasons,
        normalized_departures,
        posterior_probabilities,
        platform_qc,
        gross_error_probability,
        T(3.0)  # Default threshold
    )
end

"""
    write_enkf_diagnostics(obs_data, innovation_stats, bias_correction, filename)

Write EnKF observation diagnostics to file.
"""
function write_enkf_diagnostics(
    obs_data::EnKFObservationData{T},
    innovation_stats::Union{InnovationStatistics{T}, Nothing},
    bias_correction::Union{BiasCorrection{T}, Nothing},
    filename::String
) where T<:AbstractFloat
    
    println("Writing EnKF observation diagnostics to $filename")
    
    # Write to HDF5 format for comprehensive diagnostics
    h5open(filename, "w") do file
        # Basic observation information
        write(file, "nobs", obs_data.nobs)
        write(file, "obs_values", obs_data.obs_values)
        write(file, "obs_errors", obs_data.obs_errors)
        write(file, "obs_locations", obs_data.obs_locations)
        write(file, "qc_flags", obs_data.qc_flags)
        
        # Ensemble information
        write(file, "ensemble_forecasts", obs_data.ensemble_forecasts)
        write(file, "innovation_matrix", obs_data.innovation_matrix)
        
        # Innovation statistics
        if innovation_stats !== nothing
            stats_group = create_group(file, "innovation_statistics")
            write(stats_group, "mean_innovation", innovation_stats.mean_innovation)
            write(stats_group, "innovation_variance", innovation_stats.innovation_variance)
            write(stats_group, "ensemble_spread", innovation_stats.ensemble_spread)
            write(stats_group, "spread_skill_ratio", innovation_stats.spread_skill_ratio)
            write(stats_group, "chi_squared", innovation_stats.chi_squared)
            write(stats_group, "consistency_ratio", innovation_stats.consistency_ratio)
        end
        
        # Bias correction information
        if bias_correction !== nothing
            bias_group = create_group(file, "bias_correction")
            write(bias_group, "predictor_names", bias_correction.predictor_names)
            write(bias_group, "update_history", bias_correction.update_history)
            
            # Write bias reduction by type
            for (key, value) in bias_correction.bias_reduction
                write(bias_group, "bias_reduction_$key", value)
            end
        end
        
        # Platform counts
        platform_group = create_group(file, "platform_counts")
        for (platform, count) in obs_data.platform_counts
            write(platform_group, string(platform), count)
        end
    end
    
    println("EnKF diagnostics written successfully")
end

"""
    read_diag_files(config)

Read diagnostic files and return observation file list.
"""
function read_diag_files(config::EnKFObservationConfig{T}) where T<:AbstractFloat
    obs_files = String[]
    
    for platform in config.platforms
        # Construct filename based on platform and date
        if config.netcdf_diag
            filename = joinpath(config.obs_path, "diag_$(lowercase(string(platform)))_$(config.date_string).nc")
        else
            filename = joinpath(config.obs_path, "diag_$(lowercase(string(platform)))_$(config.date_string).bin")
        end
        
        if isfile(filename)
            push!(obs_files, filename)
        else
            @warn "Observation file not found: $filename"
        end
    end
    
    return obs_files
end

end # module EnsembleObservations