# MCMC (Markov Chain Monte Carlo) Implementation for FLEXINVERT.jl
# Advanced Bayesian inference using adaptive sampling methods

module MCMC

using ..CoreTypes: Domain, State, Observations
using ..Covariance: CovarianceMatrix
using ..Transformations
using ..ForwardModel: forward_model, calculate_model_concentrations
using LinearAlgebra
using Random
using Statistics
using Printf
using ProgressMeter
using Base.Threads

export MCMCConfig, MCMCResult, run_mcmc, AdaptiveProposal, DelayedRejection

"""
    MCMCConfig

Configuration settings for MCMC inversion.

# Fields
- `n_samples::Int`: Number of MCMC samples to generate
- `n_burnin::Int`: Number of burn-in samples to discard
- `n_chains::Int`: Number of parallel chains to run
- `thin::Int`: Thinning interval for saved samples
- `proposal_method::Symbol`: Proposal method (:random_walk, :pcn, :mala, :adaptive)
- `adapt_interval::Int`: Interval for proposal adaptation
- `target_acceptance::Float64`: Target acceptance rate
- `seed::Int`: Random seed for reproducibility
- `save_chains::Bool`: Whether to save individual chains
- `diagnostics::Bool`: Whether to compute convergence diagnostics
"""
Base.@kwdef mutable struct MCMCConfig
    # Basic MCMC parameters
    n_samples::Int = 1000
    n_burnin::Int = 200
    n_chains::Int = 1
    thin::Int = 1

    # Proposal method configuration
    proposal_method::Symbol = :adaptive
    adapt_interval::Int = 100
    target_acceptance::Float64 = 0.25

    # Algorithm-specific parameters
    step_size::Float64 = 0.1
    adaptation_rate::Float64 = 0.1
    max_adaptations::Int = 50

    # Reproducibility and output
    seed::Int = 12345
    save_chains::Bool = true
    save_samples::Bool = true
    diagnostics::Bool = true

    # Advanced options
    delayed_rejection::Bool = false
    parallel_tempering::Bool = false
    temperature_schedule::Vector{Float64} = [1.0]
end

"""
    MCMCResult

Container for MCMC inversion results.

# Fields
- `samples::Matrix{Float64}`: Saved samples (n_samples × n_params)
- `log_likelihoods::Vector{Float64}`: Log likelihood values for saved samples
- `acceptance_rates::Vector{Float64}`: Acceptance rate per chain
- `gelman_rubin::Float64`: Gelman-Rubin convergence diagnostic
- `ess::Vector{Float64}`: Effective sample size per parameter
- `autocorrelation_times::Matrix{Float64}`: Autocorrelation times
- `chain_statistics::Dict`: Additional chain statistics
- `convergence_diagnostics::Dict`: Convergence diagnostic results
"""
struct MCMCResult
    samples::Matrix{Float64}
    log_likelihoods::Vector{Float64}
    log_priors::Vector{Float64}
    acceptance_rates::Vector{Float64}
    gelman_rubin::Float64
    ess::Vector{Float64}
    autocorrelation_times::Matrix{Float64}
    chain_statistics::Dict
    convergence_diagnostics::Dict
    n_chains::Int
    n_saved::Int
    n_params::Int
end

"""
    AbstractProposal

Abstract base class for proposal distributions.
"""
abstract type AbstractProposal end

"""
    RandomWalkProposal

Random walk Metropolis proposal with adaptive step size.
"""
struct RandomWalkProposal <: AbstractProposal
    step_size::Float64
    cov_matrix::Matrix{Float64}
    adaptation_rate::Float64
    n_adaptations::Int
    max_adaptations::Int
end

function RandomWalkProposal(n_params::Int; step_size=0.1, adaptation_rate=0.1, max_adaptations=50)
    cov_matrix = Matrix{Float64}(I, n_params, n_params)
    return RandomWalkProposal(step_size, cov_matrix, adaptation_rate, 0, max_adaptations)
end

"""
    PCNProposal

Preconditioned Crank-Nicolson proposal for improved efficiency.
"""
struct PCNProposal <: AbstractProposal
    beta::Float64
    cov_matrix::Matrix{Float64}
end

function PCNProposal(cov_matrix::Matrix{Float64}; beta=0.8)
    return PCNProposal(beta, cov_matrix)
end

"""
    MALAProposal

Metropolis-Adjusted Langevin Algorithm proposal using gradient information.
"""
struct MALAProposal <: AbstractProposal
    step_size::Float64
    adaptation_rate::Float64
end

function MALAProposal(; step_size=0.1, adaptation_rate=0.1)
    return MALAProposal(step_size, adaptation_rate)
end

"""
    AdaptiveProposal

Adaptive proposal that automatically tunes based on acceptance history.
"""
mutable struct AdaptiveProposal <: AbstractProposal
    base_proposal::Union{RandomWalkProposal, PCNProposal, MALAProposal}
    acceptance_history::Vector{Bool}
    target_acceptance::Float64
    adaptation_counter::Int
    min_step_size::Float64
    max_step_size::Float64
end

function AdaptiveProposal(base_proposal, target_acceptance=0.25)
    return AdaptiveProposal(
        base_proposal,
        Bool[],
        target_acceptance,
        0,
        1e-6,
        10.0
    )
end

"""
    propose(proposal::AbstractProposal, current_state::Vector{Float64}, iteration::Int)

Generate a proposed state from the current state.
"""
function propose(proposal::RandomWalkProposal, current_state::Vector{Float64}, iteration::Int)
    n = length(current_state)
    perturbation = proposal.cov_matrix * randn(n)
    return current_state + proposal.step_size * perturbation
end

function propose(proposal::PCNProposal, current_state::Vector{Float64}, iteration::Int)
    n = length(current_state)
    xi = randn(n)
    sqrt_term = sqrt(1 - proposal.beta^2)
    return sqrt_term * current_state + proposal.beta * (proposal.cov_matrix * xi)
end

function propose(proposal::MALAProposal, current_state::Vector{Float64}, iteration::Int)
    # Simplified MALA implementation - would need gradient information
    n = length(current_state)
    drift_term = zeros(n)  # Would be gradient of log posterior here
    noise_term = proposal.step_size * randn(n)
    return current_state + 0.5 * proposal.step_size^2 * drift_term + noise_term
end

function propose(proposal::AdaptiveProposal, current_state::Vector{Float64}, iteration::Int)
    # Use base proposal
    proposed_state = propose(proposal.base_proposal, current_state, iteration)

    # Adapt based on acceptance history
    if length(proposal.acceptance_history) >= 100
        recent_acceptance = mean(proposal.acceptance_history[end-99:end])

        if recent_acceptance < proposal.target_acceptance * 0.9
            # Increase step size
            adapt_proposal!(proposal.base_proposal, 1.1)
        elseif recent_acceptance > proposal.target_acceptance * 1.1
            # Decrease step size
            adapt_proposal!(proposal.base_proposal, 0.9)
        end

        proposal.adaptation_counter += 1
    end

    return proposed_state
end

"""
    adapt_proposal!(proposal, factor)

Adapt proposal parameters based on acceptance rate.
"""
function adapt_proposal!(proposal::RandomWalkProposal, factor::Float64)
    proposal.step_size = clamp(proposal.step_size * factor, 1e-6, 10.0)
    proposal.n_adaptations += 1
end

function adapt_proposal!(proposal::MALAProposal, factor::Float64)
    proposal.step_size = clamp(proposal.step_size * factor, 1e-6, 10.0)
end

"""
    log_posterior(state::Vector{Float64}, observations::Observations,
                  domain::Domain, covariance::CovarianceMatrix)

Compute log posterior density.
"""
function log_posterior(state::Vector{Float64}, observations::Observations,
                      domain::Domain, covariance::CovarianceMatrix)

    # Convert to physical space
    physical_state = convert_to_physical(state, covariance)

    # Forward model
    model_concentrations = calculate_model_concentrations(observations, physical_state, domain)

    # Compute likelihood
    n_obs = length(observations.concentrations)
    residuals = observations.concentrations - model_concentrations

    # Use observation errors (total errors if available, measurement errors otherwise)
    if !isempty(observations.total_errors)
        sigma = observations.total_errors
    else
        sigma = observations.measurement_errors
    end

    # Log likelihood
    log_likelihood = -0.5 * sum((residuals ./ sigma).^2) -
                     0.5 * n_obs * log(2π) - sum(log.(sigma))

    # Log prior (Gaussian prior)
    prior_diff = state
    log_prior = -0.5 * prior_diff' * prior_diff - 0.5 * length(state) * log(2π)

    return log_likelihood + log_prior, log_likelihood, log_prior
end

"""
    run_mcmc(observations::Observations, domain::Domain, covariance::CovarianceMatrix,
             config::MCMCConfig; initial_state=nothing)

Run MCMC inversion with specified configuration.
"""
function run_mcmc(observations::Observations, domain::Domain, covariance::CovarianceMatrix,
                  config::MCMCConfig; initial_state=nothing)

    # Set random seed for reproducibility
    Random.seed!(config.seed)

    # Get problem dimensions
    n_params = covariance.n_modes
    n_total_samples = config.n_samples + config.n_burnin
    n_saved = config.n_samples ÷ config.thin

    @info "Starting MCMC inversion"
    @info "Parameters: $n_params, Chains: $(config.n_chains)"
    @info "Total samples: $n_total_samples, Saved samples: $n_saved"

    # Initialize chains
    if initial_state === nothing
        initial_state = zeros(n_params)
    end

    # Initialize proposal
    proposal = initialize_proposal(config, n_params, covariance)

    # Run chains
    if config.n_chains == 1
        return run_single_chain(observations, domain, covariance, config, proposal, initial_state)
    else
        return run_multiple_chains(observations, domain, covariance, config, proposal, initial_state)
    end
end

"""
    run_single_chain(observations, domain, covariance, config, proposal, initial_state)

Run a single MCMC chain.
"""
function run_single_chain(observations::Observations, domain::Domain, covariance::CovarianceMatrix,
                         config::MCMCConfig, proposal::AbstractProposal, initial_state::Vector{Float64})

    n_params = length(initial_state)
    n_total_samples = config.n_samples + config.n_burnin
    n_saved = config.n_samples ÷ config.thin

    # Allocate storage
    if config.save_samples
        samples = zeros(n_saved, n_params)
        log_likelihoods = zeros(n_saved)
        log_priors = zeros(n_saved)
    else
        samples = zeros(0, n_params)
        log_likelihoods = zeros(0)
        log_priors = zeros(0)
    end

    # Initialize chain
    current_state = copy(initial_state)
    current_log_post, current_log_like, current_log_prior = log_posterior(current_state, observations, domain, covariance)

    # Track acceptance
    n_accepted = 0
    acceptance_history = Bool[]

    @info "Running single MCMC chain with $n_total_samples iterations"

    # Main MCMC loop
    saved_idx = 1
    @showprogress 1 "MCMC Progress" for iteration in 1:n_total_samples
        # Generate proposal
        proposed_state = propose(proposal, current_state, iteration)

        # Compute log posterior for proposal
        proposed_log_post, proposed_log_like, proposed_log_prior = log_posterior(proposed_state, observations, domain, covariance)

        # Compute acceptance probability
        log_alpha = proposed_log_post - current_log_post

        # Accept/reject
        if log(rand()) < log_alpha
            current_state = proposed_state
            current_log_post = proposed_log_post
            current_log_like = proposed_log_like
            current_log_prior = proposed_log_prior
            n_accepted += 1
            push!(acceptance_history, true)
        else
            push!(acceptance_history, false)
        end

        # Save sample (after burn-in and thinning)
        if iteration > config.n_burnin && (iteration - config.n_burnin) % config.thin == 0
            if config.save_samples && saved_idx <= n_saved
                samples[saved_idx, :] = current_state
                log_likelihoods[saved_idx] = current_log_like
                log_priors[saved_idx] = current_log_prior
                saved_idx += 1
            end
        end

        # Update proposal if adaptive
        if isa(proposal, AdaptiveProposal)
            push!(proposal.acceptance_history, acceptance_history[end])
        end
    end

    # Compute statistics
    acceptance_rate = n_accepted / n_total_samples

    # Compute diagnostics
    diagnostics = compute_diagnostics(samples, acceptance_rate, config)

    @info "MCMC completed successfully"
    @info "Final acceptance rate: $(round(acceptance_rate, digits=3))"
    @info "Effective sample size: $(round(diagnostics[:mean_ess], digits=1))"

    return MCMCResult(
        samples,
        log_likelihoods,
        log_priors,
        [acceptance_rate],
        get(diagnostics, :gelman_rubin, NaN),
        get(diagnostics, :ess, zeros(n_params)),
        get(diagnostics, :autocorrelation_times, zeros(n_params, 1)),
        Dict(:acceptance_rate => acceptance_rate, :n_accepted => n_accepted),
        diagnostics,
        1,
        n_saved,
        n_params
    )
end

"""
    run_multiple_chains(observations, domain, covariance, config, proposal, initial_state)

Run multiple parallel MCMC chains.
"""
function run_multiple_chains(observations::Observations, domain::Domain, covariance::CovarianceMatrix,
                           config::MCMCConfig, proposal::AbstractProposal, initial_state::Vector{Float64})

    n_chains = config.n_chains
    n_params = length(initial_state)
    n_saved = config.n_samples ÷ config.thin

    @info "Running $n_chains parallel MCMC chains"

    # Initialize results storage
    all_samples = zeros(n_saved, n_params, n_chains)
    all_log_likelihoods = zeros(n_saved, n_chains)
    all_log_priors = zeros(n_saved, n_chains)
    acceptance_rates = zeros(n_chains)

    # Run chains (could be parallelized in future)
    chain_results = []

    for chain_id in 1:n_chains
        @info "Running chain $chain_id/$n_chains"

        # Modify seed for each chain
        chain_config = deepcopy(config)
        chain_config.seed = config.seed + chain_id - 1

        # Initialize chain state
        chain_initial_state = initial_state + 0.1 * randn(n_params)

        # Run single chain
        result = run_single_chain(observations, domain, covariance, chain_config,
                                deepcopy(proposal), chain_initial_state)

        # Store results
        if config.save_samples
            all_samples[:, :, chain_id] = result.samples
            all_log_likelihoods[:, chain_id] = result.log_likelihoods
            all_log_priors[:, chain_id] = result.log_priors
        end

        acceptance_rates[chain_id] = result.acceptance_rates[1]
        push!(chain_results, result)

        @info "Chain $chain_id completed. Acceptance rate: $(round(acceptance_rates[chain_id], digits=3))"
    end

    # Combine results
    combined_samples = all_samples[:, :, 1]  # Use first chain as primary
    combined_log_likelihoods = all_log_likelihoods[:, 1]
    combined_log_priors = all_log_priors[:, 1]

    # Compute multi-chain diagnostics
    multi_chain_diagnostics = compute_multi_chain_diagnostics(all_samples, acceptance_rates, config)

    @info "All chains completed successfully"
    @info "Mean acceptance rate: $(round(mean(acceptance_rates), digits=3))"
    @info "Gelman-Rubin statistic: $(round(multi_chain_diagnostics[:gelman_rubin], digits=3))"

    return MCMCResult(
        combined_samples,
        combined_log_likelihoods,
        combined_log_priors,
        acceptance_rates,
        multi_chain_diagnostics[:gelman_rubin],
        multi_chain_diagnostics[:ess],
        multi_chain_diagnostics[:autocorrelation_times],
        Dict(
            :chain_acceptance_rates => acceptance_rates,
            :mean_acceptance_rate => mean(acceptance_rates),
            :individual_chain_stats => chain_results
        ),
        multi_chain_diagnostics,
        n_chains,
        n_saved,
        n_params
    )
end

"""
    initialize_proposal(config::MCMCConfig, n_params::Int, covariance::CovarianceMatrix)

Initialize proposal distribution based on configuration.
"""
function initialize_proposal(config::MCMCConfig, n_params::Int, covariance::CovarianceMatrix)
    if config.proposal_method == :random_walk
        base_proposal = RandomWalkProposal(n_params; step_size=config.step_size)
    elseif config.proposal_method == :pcn
        base_proposal = PCNProposal(covariance.full; beta=0.8)
    elseif config.proposal_method == :mala
        base_proposal = MALAProposal(step_size=config.step_size)
    else
        # Default to random walk
        base_proposal = RandomWalkProposal(n_params; step_size=config.step_size)
    end

    if config.proposal_method == :adaptive
        return AdaptiveProposal(base_proposal, config.target_acceptance)
    else
        return base_proposal
    end
end

"""
    compute_diagnostics(samples::Matrix{Float64}, acceptance_rate::Float64, config::MCMCConfig)

Compute MCMC convergence diagnostics.
"""
function compute_diagnostics(samples::Matrix{Float64}, acceptance_rate::Float64, config::MCMCConfig)
    n_samples, n_params = size(samples)
    diagnostics = Dict()

    if n_samples == 0
        diagnostics[:ess] = zeros(n_params)
        diagnostics[:mean_ess] = 0.0
        diagnostics[:autocorrelation_times] = zeros(n_params, 1)
        return diagnostics
    end

    # Effective sample size
    ess_values = zeros(n_params)
    autocorr_times = zeros(n_params, 1)

    for param_idx in 1:n_params
        param_samples = samples[:, param_idx]

        # Compute autocorrelation function
        acf = autocorrelation_function(param_samples)

        # Find integrated autocorrelation time
        tau_int = 1.0
        for lag in 1:min(length(acf)-1, n_samples÷4)
            if acf[lag+1] < 0.05
                break
            end
            tau_int += 2 * acf[lag+1]
        end

        autocorr_times[param_idx] = tau_int
        ess_values[param_idx] = n_samples / tau_int
    end

    diagnostics[:ess] = ess_values
    diagnostics[:mean_ess] = mean(ess_values)
    diagnostics[:autocorrelation_times] = autocorr_times
    diagnostics[:acceptance_rate] = acceptance_rate

    return diagnostics
end

"""
    compute_multi_chain_diagnostics(all_samples::Array{Float64, 3}, acceptance_rates::Vector{Float64}, config::MCMCConfig)

Compute multi-chain convergence diagnostics.
"""
function compute_multi_chain_diagnostics(all_samples::Array{Float64, 3}, acceptance_rates::Vector{Float64}, config::MCMCConfig)
    n_saved, n_params, n_chains = size(all_samples)
    diagnostics = Dict()

    if n_chains < 2
        diagnostics[:gelman_rubin] = NaN
        diagnostics[:ess] = zeros(n_params)
        diagnostics[:autocorrelation_times] = zeros(n_params, 1)
        return diagnostics
    end

    # Compute Gelman-Rubin statistic
    gr_values = zeros(n_params)

    for param_idx in 1:n_params
        # Extract parameter samples for all chains
        param_samples = all_samples[:, param_idx, :]

        # Compute within-chain and between-chain variances
        chain_means = mean(param_samples, dims=1)[:]
        chain_vars = var(param_samples, dims=1)[:]

        overall_mean = mean(chain_means)
        between_chain_var = n_saved * var(chain_means) / n_chains
        within_chain_var = mean(chain_vars)

        # Gelman-Rubin statistic
        W = within_chain_var
        B = between_chain_var
        V_hat = ((n_saved - 1) / n_saved) * W + (1 / n_saved) * B

        gr_values[param_idx] = sqrt(V_hat / W)
    end

    diagnostics[:gelman_rubin] = maximum(gr_values)

    # Compute effective sample size accounting for between-chain correlation
    combined_samples = reshape(all_samples, n_saved * n_chains, n_params)
    single_chain_diagnostics = compute_diagnostics(combined_samples, mean(acceptance_rates), config)

    diagnostics[:ess] = single_chain_diagnostics[:ess] / n_chains
    diagnostics[:autocorrelation_times] = single_chain_diagnostics[:autocorrelation_times]

    return diagnostics
end

"""
    autocorrelation_function(series::Vector{Float64})

Compute autocorrelation function for a time series.
"""
function autocorrelation_function(series::Vector{Float64})
    n = length(series)
    if n == 0
        return Float64[]
    end

    # Remove mean
    series_centered = series .- mean(series)

    # Compute variance
    var_series = var(series_centered)
    if var_series == 0
        return [1.0]
    end

    # Compute autocorrelation
    max_lag = min(n-1, 1000)  # Limit maximum lag for efficiency
    acf = zeros(max_lag + 1)

    for lag in 0:max_lag
        if lag < n
            correlation = sum(series_centered[1:n-lag] .* series_centered[1+lag:n])
            acf[lag+1] = correlation / ((n - lag) * var_series)
        end
    end

    return acf
end

"""
    convert_to_physical(control_state::Vector{Float64}, covariance::CovarianceMatrix)

Convert control space state to physical space.
"""
function convert_to_physical(control_state::Vector{Float64}, covariance::CovarianceMatrix)
    return Transformations.phi2chi(control_state, covariance)
end

"""
    DelayedRejection

Delayed rejection MCMC for improved acceptance rates.
"""
mutable struct DelayedRejection
    primary_proposal::AbstractProposal
    secondary_proposal::AbstractProposal
    max_stages::Int
end

function DelayedRejection(primary_proposal, secondary_proposal=nothing; max_stages=2)
    if secondary_proposal === nothing
        secondary_proposal = deepcopy(primary_proposal)
        # Make secondary proposal more conservative
        if isa(secondary_proposal, RandomWalkProposal)
            secondary_proposal.step_size *= 0.5
        end
    end

    return DelayedRejection(primary_proposal, secondary_proposal, max_stages)
end

end # module
