# MCMC Chain Diagnostics and Convergence Monitoring for FLEXINVERT
# Comprehensive implementation of convergence diagnostics following established literature

module Diagnostics

using LinearAlgebra
using Statistics
using StatsBase
using Distributions
using FFTW
using ..MCMCTypes

export MCMCDiagnostics, DiagnosticResults, ConvergenceMonitor
export compute_rhat, compute_ess, compute_autocorrelation, compute_geweke
export evaluate_convergence, create_diagnostic_summary
export update_diagnostics!, check_convergence, early_stopping_criterion
export compute_bulk_ess, compute_tail_ess, compute_multivariate_rhat
export FlexinvertDiagnostics, flux_budget_convergence, regional_correlation_analysis

"""
    DiagnosticResults

Container for all convergence diagnostic results.
"""
mutable struct DiagnosticResults{T<:AbstractFloat}
    # Basic chain diagnostics
    rhat::Vector{T}                          # Gelman-Rubin R̂ per parameter
    multivariate_rhat::T                     # Multivariate R̂

    # Effective sample sizes
    bulk_ess::Vector{T}                      # Bulk ESS (central quantiles)
    tail_ess::Vector{T}                      # Tail ESS (extreme quantiles)

    # Autocorrelation analysis
    autocorr_function::Matrix{T}             # Autocorrelation functions (n_lags × n_params)
    autocorr_time::Vector{T}                 # Integrated autocorrelation times
    mixing_time::Vector{T}                   # Exponential decay times

    # Geweke diagnostics
    geweke_z::Vector{T}                      # Z-scores for stationarity
    geweke_p_values::Vector{T}               # P-values for stationarity tests

    # Trace analysis
    trend_statistics::Vector{T}              # Linear trend coefficients
    drift_detection::Vector{Bool}            # Significant drift flags
    chain_variance_ratios::Vector{T}         # Within vs between chain variance

    # Convergence status
    converged::Vector{Bool}                  # Per-parameter convergence flags
    overall_converged::Bool                  # Overall convergence status
    convergence_iteration::Int               # Iteration when convergence achieved

    # FLEXINVERT-specific diagnostics
    flux_budget_converged::Bool              # Mass balance convergence
    regional_correlations::Matrix{T}         # Regional flux correlations
    constraint_violations::Vector{T}         # Physical constraint violations

    # Metadata
    n_chains::Int                           # Number of chains
    n_samples::Int                          # Samples per chain
    n_parameters::Int                       # Number of parameters
    computation_time::T                     # Time to compute diagnostics

    DiagnosticResults{T}() where T = new{T}()
end

DiagnosticResults() = DiagnosticResults{Float64}()

"""
    ConvergenceMonitor

Real-time convergence monitoring for streaming diagnostic computation.
"""
mutable struct ConvergenceMonitor{T<:AbstractFloat}
    # Thresholds
    rhat_threshold::T                        # R̂ threshold for convergence (default 1.1)
    ess_threshold::T                         # Minimum ESS threshold (default 400)
    geweke_alpha::T                          # Significance level for Geweke test

    # Monitoring windows
    min_samples::Int                         # Minimum samples before checking convergence
    check_frequency::Int                     # How often to check convergence
    window_size::Int                         # Moving window size for streaming diagnostics

    # Early stopping
    patience::Int                           # Iterations to wait after convergence
    early_stop_enabled::Bool                # Whether early stopping is enabled

    # History tracking
    rhat_history::Vector{Vector{T}}         # Historical R̂ values
    ess_history::Vector{Vector{T}}          # Historical ESS values
    convergence_history::Vector{Bool}       # Historical convergence flags

    # Current state
    last_check_iteration::Int               # Last iteration diagnostics were computed
    convergence_first_achieved::Int         # When convergence was first achieved
    consecutive_converged::Int              # Consecutive convergence checks

    function ConvergenceMonitor{T}(;
        rhat_threshold::T = 1.1,
        ess_threshold::T = 400.0,
        geweke_alpha::T = 0.05,
        min_samples::Int = 100,
        check_frequency::Int = 50,
        window_size::Int = 1000,
        patience::Int = 100,
        early_stop_enabled::Bool = true
    ) where T
        new{T}(
            rhat_threshold, ess_threshold, geweke_alpha,
            min_samples, check_frequency, window_size,
            patience, early_stop_enabled,
            Vector{T}[], Vector{T}[], Bool[],
            0, 0, 0
        )
    end
end

ConvergenceMonitor(; kwargs...) = ConvergenceMonitor{Float64}(; kwargs...)

"""
    MCMCDiagnostics

Main diagnostics engine for comprehensive MCMC analysis.
"""
struct MCMCDiagnostics{T<:AbstractFloat}
    monitor::ConvergenceMonitor{T}

    # Configuration
    max_lag::Int                            # Maximum lag for autocorrelation
    geweke_first_fraction::T                # First portion fraction for Geweke test
    geweke_last_fraction::T                 # Last portion fraction for Geweke test

    # Memory management
    use_streaming::Bool                     # Use streaming computation to save memory
    store_full_autocorr::Bool              # Store full autocorrelation functions

    function MCMCDiagnostics{T}(monitor::ConvergenceMonitor{T};
        max_lag::Int = 200,
        geweke_first_fraction::T = 0.1,
        geweke_last_fraction::T = 0.5,
        use_streaming::Bool = true,
        store_full_autocorr::Bool = false
    ) where T
        new{T}(monitor, max_lag, geweke_first_fraction, geweke_last_fraction,
               use_streaming, store_full_autocorr)
    end
end

MCMCDiagnostics(monitor::ConvergenceMonitor; kwargs...) =
    MCMCDiagnostics{Float64}(monitor; kwargs...)

# =============================================================================
# Gelman-Rubin R̂ Statistic Implementation
# =============================================================================

"""
    compute_rhat(chains::Array{T,3}) where T

Compute univariate R̂ statistic for each parameter across multiple chains.
`chains` should be n_samples × n_parameters × n_chains.

Following Gelman et al. (2013) and Vehtari et al. (2021) recommendations.
"""
function compute_rhat(chains::Array{T,3}) where T
    n_samples, n_params, n_chains = size(chains)

    if n_chains < 2
        @warn "R̂ requires at least 2 chains, returning ones"
        return ones(T, n_params)
    end

    if n_samples < 4
        @warn "R̂ requires at least 4 samples per chain, returning Inf"
        return fill(T(Inf), n_params)
    end

    rhat = zeros(T, n_params)

    for p in 1:n_params
        # Extract parameter p across all chains
        param_chains = chains[:, p, :]  # n_samples × n_chains

        # Chain means and overall mean
        chain_means = mean(param_chains, dims=1)[1, :]  # n_chains
        overall_mean = mean(chain_means)

        # Between-chain variance (B)
        B = n_samples * var(chain_means; corrected=true)

        # Within-chain variance (W)
        chain_vars = [var(param_chains[:, c]; corrected=true) for c in 1:n_chains]
        W = mean(chain_vars)

        # Handle edge cases
        if W ≈ 0.0
            if B ≈ 0.0
                # All chains are constant and equal
                rhat[p] = 1.0
            else
                # Chains are constant but different
                rhat[p] = T(Inf)
            end
            continue
        end

        # Marginal posterior variance estimate
        var_plus = ((n_samples - 1) / n_samples) * W + (1 / n_samples) * B

        # R̂ statistic
        rhat[p] = sqrt(var_plus / W)

        # Numerical stability
        if !isfinite(rhat[p]) || rhat[p] < 1.0
            rhat[p] = T(Inf)
        end
    end

    return rhat
end

"""
    compute_multivariate_rhat(chains::Array{T,3}) where T

Compute multivariate R̂ statistic using the maximum eigenvalue approach.
"""
function compute_multivariate_rhat(chains::Array{T,3}) where T
    n_samples, n_params, n_chains = size(chains)

    if n_chains < 2 || n_samples < n_params + 1
        return T(Inf)
    end

    try
        # Compute between-chain and within-chain covariance matrices
        chain_means = [mean(chains[:, :, c], dims=1)[1, :] for c in 1:n_chains]
        overall_mean = mean(hcat(chain_means...), dims=2)[:, 1]

        # Between-chain covariance
        B_matrix = zeros(T, n_params, n_params)
        for c in 1:n_chains
            diff = chain_means[c] - overall_mean
            B_matrix += n_samples * (diff * diff')
        end
        B_matrix ./= (n_chains - 1)

        # Within-chain covariance
        W_matrix = zeros(T, n_params, n_params)
        for c in 1:n_chains
            chain_data = chains[:, :, c]
            chain_cov = cov(chain_data)
            W_matrix += chain_cov
        end
        W_matrix ./= n_chains

        # Regularization for numerical stability
        reg_factor = T(1e-10) * tr(W_matrix)
        W_matrix += reg_factor * I

        # Compute multivariate R̂
        inv_W = inv(W_matrix)
        eigen_vals = eigvals(inv_W * B_matrix)
        max_eigenval = maximum(real.(eigen_vals))

        multivariate_rhat = ((n_samples - 1) / n_samples) +
                           ((n_chains + 1) / n_chains) * max_eigenval

        return sqrt(multivariate_rhat)

    catch e
        @warn "Failed to compute multivariate R̂: $e"
        return T(Inf)
    end
end

# =============================================================================
# Effective Sample Size (ESS) Implementation
# =============================================================================

"""
    autocorrelation_function(x::Vector{T}, max_lag::Int) where T

Compute autocorrelation function using FFT for efficiency.
"""
function autocorrelation_function(x::Vector{T}, max_lag::Int) where T
    n = length(x)
    max_lag = min(max_lag, n - 1)

    if n < 2
        return zeros(T, max_lag + 1)
    end

    # Center the data
    x_centered = x .- mean(x)

    # Zero-pad for FFT
    n_padded = nextpow(2, 2 * n - 1)
    x_padded = zeros(eltype(x_centered), n_padded)
    x_padded[1:n] = x_centered

    # Compute autocorrelation via FFT
    X = fft(x_padded)
    autocorr_full = real(ifft(X .* conj(X)))

    # Normalize and extract relevant lags
    autocorr = autocorr_full[1:(max_lag + 1)] ./ autocorr_full[1]

    return autocorr
end

"""
    integrated_autocorr_time(autocorr::Vector{T}, c::T = 5.0) where T

Compute integrated autocorrelation time with automatic windowing.
"""
function integrated_autocorr_time(autocorr::Vector{T}, c::T = 5.0) where T
    n_lags = length(autocorr)

    if n_lags < 2
        return T(1.0)
    end

    # Cumulative sum for integrated time
    τ_int = T(1.0)  # Start with lag 0 contribution

    for W in 2:n_lags
        # Sum up to window W
        if W <= length(autocorr)
            τ_int = 1 + 2 * sum(autocorr[2:W])
        end

        # Automatic windowing: stop when W ≥ c * τ_int
        if W >= c * τ_int
            break
        end

        # Also stop if autocorrelation becomes negative
        if W <= length(autocorr) && autocorr[W] <= 0
            break
        end
    end

    return max(τ_int, T(1.0))
end

"""
    compute_ess(chain::Vector{T}, max_lag::Int = 200) where T

Compute effective sample size for a single chain.
"""
function compute_ess(chain::Vector{T}, max_lag::Int = 200) where T
    n = length(chain)

    if n < 4
        return T(1.0)
    end

    # Compute autocorrelation
    autocorr = autocorrelation_function(chain, max_lag)

    # Integrated autocorrelation time
    τ_int = integrated_autocorr_time(autocorr)

    # Effective sample size
    ess = n / (2 * τ_int)

    return max(ess, T(1.0))
end

"""
    compute_bulk_ess(chains::Array{T,3}, param_idx::Int) where T

Compute bulk ESS for a parameter using rank-normalized chains.
"""
function compute_bulk_ess(chains::Array{T,3}, param_idx::Int) where T
    n_samples, _, n_chains = size(chains)

    # Combine all chains for this parameter
    combined = vec(chains[:, param_idx, :])  # Flatten across chains

    # Rank normalize (Vehtari et al. 2021)
    ranks = ordinalrank(combined)
    rank_normalized = (ranks .- 0.5) ./ length(ranks)

    # Split back into chains
    rank_chains = reshape(rank_normalized, n_samples, n_chains)

    # Compute ESS for each chain and take minimum
    ess_values = [compute_ess(rank_chains[:, c]) for c in 1:n_chains]

    return minimum(ess_values)
end

"""
    compute_tail_ess(chains::Array{T,3}, param_idx::Int, prob::T = 0.05) where T

Compute tail ESS for extreme quantiles.
"""
function compute_tail_ess(chains::Array{T,3}, param_idx::Int, prob::T = 0.05) where T
    n_samples, _, n_chains = size(chains)

    # Combine all chains for this parameter
    combined = vec(chains[:, param_idx, :])

    # Compute quantiles
    q_low = quantile(combined, prob)
    q_high = quantile(combined, 1 - prob)

    # Create indicator variables for tail regions
    indicators_low = combined .<= q_low
    indicators_high = combined .>= q_high

    # Reshape back to chains
    low_chains = reshape(indicators_low, n_samples, n_chains)
    high_chains = reshape(indicators_high, n_samples, n_chains)

    # Compute ESS for tail indicators
    ess_low = minimum([compute_ess(Float64.(low_chains[:, c])) for c in 1:n_chains])
    ess_high = minimum([compute_ess(Float64.(high_chains[:, c])) for c in 1:n_chains])

    return min(ess_low, ess_high)
end

# =============================================================================
# Geweke Diagnostics Implementation
# =============================================================================

"""
    compute_geweke(chain::Vector{T}, first_frac::T = 0.1, last_frac::T = 0.5) where T

Compute Geweke convergence diagnostic comparing first and last portions of chain.
"""
function compute_geweke(chain::Vector{T}, first_frac::T = 0.1, last_frac::T = 0.5) where T
    n = length(chain)

    if n < 20
        return T(NaN), T(NaN)
    end

    # Define portions
    n_first = max(1, floor(Int, first_frac * n))
    n_last = max(1, floor(Int, last_frac * n))

    if n_first + n_last >= n
        return T(NaN), T(NaN)
    end

    # Extract portions
    first_portion = chain[1:n_first]
    last_portion = chain[(end - n_last + 1):end]

    # Means
    mean_first = mean(first_portion)
    mean_last = mean(last_portion)

    # Spectral density estimates at frequency 0 (using autocorrelation)
    var_first = var(first_portion; corrected=true)
    var_last = var(last_portion; corrected=true)

    # For more accurate spectral density, we could use autocorrelation
    # but for simplicity, we use the variance estimate
    autocorr_first = autocorrelation_function(first_portion, min(50, n_first ÷ 4))
    autocorr_last = autocorrelation_function(last_portion, min(50, n_last ÷ 4))

    τ_first = integrated_autocorr_time(autocorr_first)
    τ_last = integrated_autocorr_time(autocorr_last)

    # Effective variance (accounting for autocorrelation)
    eff_var_first = var_first * 2 * τ_first / n_first
    eff_var_last = var_last * 2 * τ_last / n_last

    # Standard error of difference
    se_diff = sqrt(eff_var_first + eff_var_last)

    if se_diff ≈ 0.0
        z_score = T(0.0)
    else
        z_score = (mean_first - mean_last) / se_diff
    end

    # P-value (two-tailed test)
    p_value = 2 * (1 - cdf(Normal(), abs(z_score)))

    return z_score, p_value
end

# =============================================================================
# Trace Analysis Implementation
# =============================================================================

"""
    compute_trend_statistic(chain::Vector{T}) where T

Compute linear trend coefficient for drift detection.
"""
function compute_trend_statistic(chain::Vector{T}) where T
    n = length(chain)

    if n < 3
        return T(0.0)
    end

    # Simple linear regression: chain ~ 1 + time
    time_points = collect(1:n)
    time_centered = time_points .- mean(time_points)
    chain_centered = chain .- mean(chain)

    # Slope coefficient
    slope = dot(time_centered, chain_centered) / dot(time_centered, time_centered)

    return slope
end

"""
    detect_drift(chain::Vector{T}, significance_level::T = 0.05) where T

Test for significant linear drift in the chain.
"""
function detect_drift(chain::Vector{T}, significance_level::T = 0.05) where T
    n = length(chain)

    if n < 10
        return false
    end

    # Compute trend
    slope = compute_trend_statistic(chain)

    # Standard error of slope (approximate)
    time_points = collect(1:n)
    time_centered = time_points .- mean(time_points)
    residuals = chain .- (mean(chain) .+ slope .* time_centered)
    mse = sum(residuals.^2) / (n - 2)
    se_slope = sqrt(mse / sum(time_centered.^2))

    # T-statistic
    t_stat = abs(slope / se_slope)

    # Critical value (two-tailed)
    df = n - 2
    t_critical = quantile(TDist(df), 1 - significance_level / 2)

    return t_stat > t_critical
end

# =============================================================================
# Comprehensive Diagnostic Computation
# =============================================================================

"""
    compute_comprehensive_diagnostics(chains::Array{T,3}, diagnostics_engine::MCMCDiagnostics{T}) where T

Compute all diagnostics for the given chains.
"""
function compute_comprehensive_diagnostics(chains::Array{T,3},
                                         diagnostics_engine::MCMCDiagnostics{T}) where T
    start_time = time()

    n_samples, n_params, n_chains = size(chains)
    results = DiagnosticResults{T}()

    # Initialize arrays
    results.rhat = zeros(T, n_params)
    results.bulk_ess = zeros(T, n_params)
    results.tail_ess = zeros(T, n_params)
    results.autocorr_time = zeros(T, n_params)
    results.mixing_time = zeros(T, n_params)
    results.geweke_z = zeros(T, n_params)
    results.geweke_p_values = zeros(T, n_params)
    results.trend_statistics = zeros(T, n_params)
    results.drift_detection = zeros(Bool, n_params)
    results.chain_variance_ratios = zeros(T, n_params)
    results.converged = zeros(Bool, n_params)

    # Store autocorrelation functions if requested
    if diagnostics_engine.store_full_autocorr
        results.autocorr_function = zeros(T, diagnostics_engine.max_lag + 1, n_params)
    else
        results.autocorr_function = zeros(T, 0, 0)
    end

    # Basic metadata
    results.n_chains = n_chains
    results.n_samples = n_samples
    results.n_parameters = n_params

    # Compute R̂ statistics
    results.rhat = compute_rhat(chains)
    results.multivariate_rhat = compute_multivariate_rhat(chains)

    # Per-parameter diagnostics
    for p in 1:n_params
        # Effective sample sizes
        results.bulk_ess[p] = compute_bulk_ess(chains, p)
        results.tail_ess[p] = compute_tail_ess(chains, p)

        # Autocorrelation analysis (using first chain as representative)
        if n_chains > 0 && n_samples > 1
            autocorr = autocorrelation_function(chains[:, p, 1], diagnostics_engine.max_lag)
            results.autocorr_time[p] = integrated_autocorr_time(autocorr)

            # Store full autocorrelation if requested
            if diagnostics_engine.store_full_autocorr
                results.autocorr_function[:, p] = autocorr
            end

            # Mixing time (exponential decay estimate)
            results.mixing_time[p] = estimate_mixing_time(autocorr)
        end

        # Geweke diagnostic (using concatenated chains)
        if n_chains > 0 && n_samples > 20
            combined_chain = vec(chains[:, p, :])
            z_score, p_value = compute_geweke(combined_chain,
                                            diagnostics_engine.geweke_first_fraction,
                                            diagnostics_engine.geweke_last_fraction)
            results.geweke_z[p] = z_score
            results.geweke_p_values[p] = p_value
        end

        # Trace analysis
        if n_chains > 0 && n_samples > 3
            # Use first chain for trend analysis
            chain = chains[:, p, 1]
            results.trend_statistics[p] = compute_trend_statistic(chain)
            results.drift_detection[p] = detect_drift(chain)

            # Variance ratio (within vs between chains)
            if n_chains > 1
                chain_means = [mean(chains[:, p, c]) for c in 1:n_chains]
                between_var = var(chain_means; corrected=true)
                within_vars = [var(chains[:, p, c]; corrected=true) for c in 1:n_chains]
                within_var = mean(within_vars)

                if within_var > 0
                    results.chain_variance_ratios[p] = between_var / within_var
                else
                    results.chain_variance_ratios[p] = T(0.0)
                end
            end
        end
    end

    # Convergence assessment
    rhat_converged = results.rhat .< diagnostics_engine.monitor.rhat_threshold
    ess_converged = (results.bulk_ess .> diagnostics_engine.monitor.ess_threshold) .&
                   (results.tail_ess .> diagnostics_engine.monitor.ess_threshold)
    geweke_converged = results.geweke_p_values .> diagnostics_engine.monitor.geweke_alpha

    results.converged = rhat_converged .& ess_converged .& geweke_converged
    results.overall_converged = all(results.converged)

    # FLEXINVERT-specific diagnostics
    compute_flexinvert_diagnostics!(results, chains)

    # Timing
    results.computation_time = time() - start_time

    return results
end

"""
    estimate_mixing_time(autocorr::Vector{T}) where T

Estimate mixing time from autocorrelation function using exponential decay.
"""
function estimate_mixing_time(autocorr::Vector{T}) where T
    n = length(autocorr)

    if n < 3
        return T(1.0)
    end

    # Find first negative value or when autocorr drops below threshold
    threshold = exp(-1)  # 1/e

    for i in 2:n
        if autocorr[i] <= 0 || autocorr[i] <= threshold
            return T(i - 1)
        end
    end

    # If no crossing found, return integrated time estimate
    return integrated_autocorr_time(autocorr)
end

# =============================================================================
# FLEXINVERT-Specific Diagnostics
# =============================================================================

"""
    FlexinvertDiagnostics

Container for FLEXINVERT-specific diagnostic results.
"""
struct FlexinvertDiagnostics{T<:AbstractFloat}
    flux_budget_error::T                    # Mass balance constraint violation
    regional_flux_correlations::Matrix{T}   # Inter-regional correlation matrix
    constraint_satisfaction_rates::Vector{T} # Fraction satisfying physical constraints
    posterior_predictive_chi2::T            # Posterior predictive check statistic

    FlexinvertDiagnostics{T}() where T = new{T}(T(0.0), zeros(T, 0, 0), T[], T(0.0))
end

"""
    compute_flexinvert_diagnostics!(results::DiagnosticResults{T}, chains::Array{T,3}) where T

Compute FLEXINVERT-specific convergence diagnostics.
"""
function compute_flexinvert_diagnostics!(results::DiagnosticResults{T}, chains::Array{T,3}) where T
    n_samples, n_params, n_chains = size(chains)

    # Initialize with safe defaults
    results.flux_budget_converged = true
    results.regional_correlations = Matrix{T}(I, min(n_params, 10), min(n_params, 10))
    results.constraint_violations = zeros(T, n_params)

    try
        # Flux budget convergence (simplified check)
        # This would need domain-specific knowledge about which parameters represent fluxes
        if n_params > 1
            # Compute sum of fluxes for each sample and check convergence of totals
            flux_sums = [sum(chains[s, :, c]) for s in 1:n_samples, c in 1:n_chains]

            # Check if flux sum R̂ indicates convergence
            flux_sum_chains = reshape(flux_sums, n_samples, 1, n_chains)
            flux_rhat = compute_rhat(flux_sum_chains)[1]
            results.flux_budget_converged = flux_rhat < 1.1
        end

        # Regional correlation analysis (simplified)
        if n_params >= 2
            # Take a subset for correlation analysis to avoid huge matrices
            max_regions = min(10, n_params)
            combined_samples = reshape(chains[:, 1:max_regions, :], :, max_regions)

            if size(combined_samples, 1) > max_regions
                results.regional_correlations = cor(combined_samples)
            end
        end

        # Constraint violations (placeholder - would need physical constraints)
        # For now, just check for any non-finite values
        for p in 1:n_params
            param_samples = chains[:, p, :]
            finite_fraction = count(isfinite, param_samples) / length(param_samples)
            results.constraint_violations[p] = 1.0 - finite_fraction
        end

    catch e
        @warn "Error computing FLEXINVERT-specific diagnostics: $e"
        # Keep safe defaults
    end
end

"""
    flux_budget_convergence(chains::Array{T,3}, flux_indices::Vector{Int}) where T

Assess convergence of mass balance constraints for specified flux parameters.
"""
function flux_budget_convergence(chains::Array{T,3}, flux_indices::Vector{Int}) where T
    if isempty(flux_indices)
        return true, T(1.0)
    end

    n_samples, _, n_chains = size(chains)

    # Sum fluxes for each sample
    flux_sums = zeros(T, n_samples, n_chains)
    for c in 1:n_chains
        for s in 1:n_samples
            flux_sums[s, c] = sum(chains[s, flux_indices, c])
        end
    end

    # Reshape for R̂ computation
    flux_sum_chains = reshape(flux_sums, n_samples, 1, n_chains)
    budget_rhat = compute_rhat(flux_sum_chains)[1]

    return budget_rhat < 1.1, budget_rhat
end

"""
    regional_correlation_analysis(chains::Array{T,3}, region_indices::Vector{Vector{Int}}) where T

Analyze correlations between regional flux estimates.
"""
function regional_correlation_analysis(chains::Array{T,3}, region_indices::Vector{Vector{Int}}) where T
    n_regions = length(region_indices)

    if n_regions < 2
        return Matrix{T}(I, 1, 1)
    end

    # Compute regional totals
    regional_totals = zeros(T, size(chains, 1) * size(chains, 3), n_regions)

    idx = 1
    for c in 1:size(chains, 3)
        for s in 1:size(chains, 1)
            for r in 1:n_regions
                regional_totals[idx, r] = sum(chains[s, region_indices[r], c])
            end
            idx += 1
        end
    end

    # Compute correlation matrix
    return cor(regional_totals)
end

# =============================================================================
# Real-time Monitoring and Early Stopping
# =============================================================================

"""
    update_diagnostics!(monitor::ConvergenceMonitor{T},
                        chains::Array{T,3},
                        iteration::Int) where T

Update convergence monitor with new diagnostic results.
"""
function update_diagnostics!(monitor::ConvergenceMonitor{T},
                           chains::Array{T,3},
                           iteration::Int) where T
    # Check if it's time to compute diagnostics
    if iteration < monitor.min_samples ||
       (iteration - monitor.last_check_iteration) < monitor.check_frequency
        return false
    end

    # Compute current diagnostics
    rhat_current = compute_rhat(chains)

    # ESS computation (simplified for monitoring)
    n_samples, n_params, n_chains = size(chains)
    ess_current = zeros(T, n_params)
    for p in 1:n_params
        ess_current[p] = compute_bulk_ess(chains, p)
    end

    # Store in history
    push!(monitor.rhat_history, copy(rhat_current))
    push!(monitor.ess_history, copy(ess_current))

    # Check convergence
    rhat_ok = all(rhat_current .< monitor.rhat_threshold)
    ess_ok = all(ess_current .> monitor.ess_threshold)
    converged = rhat_ok && ess_ok

    push!(monitor.convergence_history, converged)

    # Update convergence tracking
    if converged
        if monitor.convergence_first_achieved == 0
            monitor.convergence_first_achieved = iteration
        end
        monitor.consecutive_converged += 1
    else
        monitor.consecutive_converged = 0
    end

    monitor.last_check_iteration = iteration

    return converged
end

"""
    check_convergence(monitor::ConvergenceMonitor)

Check if convergence criteria are met for early stopping.
"""
function check_convergence(monitor::ConvergenceMonitor)
    if isempty(monitor.convergence_history)
        return false
    end

    # Must have achieved convergence at least once
    if monitor.convergence_first_achieved == 0
        return false
    end

    # Must have maintained convergence for patience iterations
    return monitor.consecutive_converged >= monitor.patience
end

"""
    early_stopping_criterion(monitor::ConvergenceMonitor{T},
                            chains::Array{T,3},
                            iteration::Int) where T

Comprehensive early stopping criterion for MCMC sampling.
"""
function early_stopping_criterion(monitor::ConvergenceMonitor{T},
                                 chains::Array{T,3},
                                 iteration::Int) where T
    if !monitor.early_stop_enabled
        return false
    end

    # Update diagnostics
    converged = update_diagnostics!(monitor, chains, iteration)

    if !converged
        return false
    end

    # Check if we should stop
    should_stop = check_convergence(monitor)

    if should_stop
        @info "Early stopping criterion met at iteration $iteration"
        @info "Convergence first achieved at iteration $(monitor.convergence_first_achieved)"
        @info "Maintained convergence for $(monitor.consecutive_converged) checks"
    end

    return should_stop
end

# =============================================================================
# Diagnostic Summary and Reporting
# =============================================================================

"""
    create_diagnostic_summary(results::DiagnosticResults{T}) where T

Create a human-readable summary of diagnostic results.
"""
function create_diagnostic_summary(results::DiagnosticResults{T}) where T
    summary = """
    MCMC Convergence Diagnostics Summary
    ====================================

    Chains: $(results.n_chains)
    Samples: $(results.n_samples)
    Parameters: $(results.n_parameters)
    Computation Time: $(round(results.computation_time, digits=2))s

    Overall Convergence: $(results.overall_converged ? "✓ CONVERGED" : "✗ NOT CONVERGED")
    $(results.convergence_iteration > 0 ? "Converged at iteration: $(results.convergence_iteration)" : "")

    Gelman-Rubin R̂ Statistics:
    ---------------------------
    Max R̂: $(round(maximum(results.rhat), digits=3))
    Mean R̂: $(round(mean(results.rhat), digits=3))
    Multivariate R̂: $(round(results.multivariate_rhat, digits=3))
    Parameters with R̂ > 1.1: $(count(results.rhat .> 1.1))

    Effective Sample Size:
    ----------------------
    Min Bulk ESS: $(round(minimum(results.bulk_ess), digits=1))
    Min Tail ESS: $(round(minimum(results.tail_ess), digits=1))
    Mean Bulk ESS: $(round(mean(results.bulk_ess), digits=1))
    Mean Tail ESS: $(round(mean(results.tail_ess), digits=1))
    Parameters with ESS < 400: $(count((results.bulk_ess .< 400) .| (results.tail_ess .< 400)))

    Autocorrelation:
    ---------------
    Max Autocorr Time: $(round(maximum(results.autocorr_time), digits=1))
    Mean Autocorr Time: $(round(mean(results.autocorr_time), digits=1))
    Max Mixing Time: $(round(maximum(results.mixing_time), digits=1))

    Geweke Stationarity Tests:
    -------------------------
    Failed tests (p < 0.05): $(count(results.geweke_p_values .< 0.05))
    Mean p-value: $(round(mean(results.geweke_p_values), digits=3))

    Trace Quality:
    -------------
    Parameters with significant drift: $(count(results.drift_detection))
    Max trend statistic: $(round(maximum(abs.(results.trend_statistics)), digits=6))

    FLEXINVERT-Specific:
    -------------------
    Flux budget converged: $(results.flux_budget_converged ? "✓" : "✗")
    Max constraint violation: $(round(maximum(results.constraint_violations), digits=4))
    """

    return summary
end

"""
    evaluate_convergence(chains::Array{T,3};
                        rhat_threshold::T = 1.1,
                        ess_threshold::T = 400.0,
                        verbose::Bool = true) where T

Quick convergence evaluation with standard thresholds.
"""
function evaluate_convergence(chains::Array{T,3};
                            rhat_threshold::T = 1.1,
                            ess_threshold::T = 400.0,
                            verbose::Bool = true) where T
    # Create minimal diagnostics engine
    monitor = ConvergenceMonitor{T}(
        rhat_threshold = rhat_threshold,
        ess_threshold = ess_threshold
    )

    diagnostics_engine = MCMCDiagnostics{T}(monitor, store_full_autocorr = false)

    # Compute diagnostics
    results = compute_comprehensive_diagnostics(chains, diagnostics_engine)

    if verbose
        println(create_diagnostic_summary(results))
    end

    return results
end

end # module Diagnostics