# Core MCMC sampling algorithms for FLEXINVERT
# Includes Metropolis-Hastings, pCN, MALA, and block updates

module Sampler

using LinearAlgebra
using Random
using Statistics
using Distributions
using ..MCMCTypes
using ..Posterior
using ..MALA

export MCMCSampler, sample!, metropolis_hastings_step!, pcn_step!, mala_step!
export block_gibbs_step!, adapt_step_size!, initialize_chain!

"""
    MCMCSampler

Main MCMC sampler object that orchestrates different proposal mechanisms.
"""
struct MCMCSampler{E<:LogPosteriorEvaluator}
    evaluator::E                        # Log-posterior evaluator
    config::MCMCConfiguration          # Sampling configuration
    rng::AbstractRNG                   # Random number generator

    # Adaptation tracking
    adaptation_history::Vector{Float64} # Step size adaptation history

    # MALA proposal state (when using MALA)
    mala_proposal::Union{MALAProposal, Nothing}

    function MCMCSampler(evaluator::E, config::MCMCConfiguration, rng::AbstractRNG = MersenneTwister()) where E
        # Initialize MALA proposal if needed
        mala_proposal = if config.proposal_type == MCMCTypes.MALA
            mala_options = MALAOptions(
                step_size = config.mala_step_size,
                adapt_step_size = config.adapt_step_size,
                target_acceptance = config.target_acceptance,
                block_structure = config.block_structure
            )
            MALAProposal(mala_options)
        else
            nothing
        end

        new{E}(evaluator, config, rng, Float64[], mala_proposal)
    end
end

"""
    initialize_chain!(sampler::MCMCSampler, state::MCMCState, x_init::Vector{Float64})

Initialize MCMC chain with starting values.
"""
function initialize_chain!(sampler::MCMCSampler, state::MCMCState, x_init::Vector{Float64})
    # Set initial state
    state.x_chi .= x_init

    # Evaluate initial log-posterior
    evaluate_log_posterior!(sampler.evaluator, state)

    # Reset acceptance statistics
    reset_acceptance!(state)

    @info "Chain initialized with log-posterior: $(state.log_posterior)"

    # Check for valid initial state
    if !isfinite(state.log_posterior)
        @warn "Initial state has non-finite log-posterior: $(state.log_posterior)"
        @warn "Attempting to find valid starting point..."

        # Try to find a valid starting point
        success = false
        for attempt in 1:100
            state.x_chi .= x_init + 0.1 * randn(sampler.rng, length(x_init))
            evaluate_log_posterior!(sampler.evaluator, state)

            if isfinite(state.log_posterior)
                success = true
                @info "Found valid starting point after $attempt attempts"
                break
            end
        end

        if !success
            error("Could not find valid starting point after 100 attempts")
        end
    end

    return state
end

"""
    sample!(sampler::MCMCSampler, state::MCMCState)

Perform a single MCMC sampling step based on the configured proposal type.
"""
function sample!(sampler::MCMCSampler, state::MCMCState)
    # Choose proposal mechanism based on configuration
    if sampler.config.proposal_type == MCMCTypes.MetropolisHastings
        return metropolis_hastings_step!(sampler, state)
    elseif sampler.config.proposal_type == MCMCTypes.CrankNicolson
        return pcn_step!(sampler, state)
    elseif sampler.config.proposal_type == MCMCTypes.MALA
        return mala_step!(sampler, state)
    elseif sampler.config.proposal_type == MCMCTypes.BlockGibbs
        return block_gibbs_step!(sampler, state)
    else
        error("Unknown proposal type: $(sampler.config.proposal_type)")
    end
end

"""
    metropolis_hastings_step!(sampler::MCMCSampler, state::MCMCState)

Standard Metropolis-Hastings step with Gaussian random walk proposals.
"""
function metropolis_hastings_step!(sampler::MCMCSampler, state::MCMCState)
    # Store current state
    x_current = copy(state.x_chi)
    log_post_current = state.log_posterior

    # Generate proposal
    x_proposal = x_current + sampler.config.step_size * randn(sampler.rng, length(x_current))

    # Update state with proposal
    state.x_chi .= x_proposal

    # Evaluate proposal
    log_post_proposal = evaluate_log_posterior!(sampler.evaluator, state)

    # Compute acceptance probability
    log_alpha = log_post_proposal - log_post_current
    alpha = min(1.0, exp(log_alpha))

    # Accept or reject
    if rand(sampler.rng) < alpha
        # Accept proposal
        update_acceptance!(state, true)
        return true
    else
        # Reject proposal - restore current state
        state.x_chi .= x_current
        state.log_posterior = log_post_current
        evaluate_log_posterior!(sampler.evaluator, state)  # Restore other fields
        update_acceptance!(state, false)
        return false
    end
end

"""
    pcn_step!(sampler::MCMCSampler, state::MCMCState)

Preconditioned Crank-Nicolson step that preserves the Gaussian prior exactly.
Particularly effective for high-dimensional problems.
"""
function pcn_step!(sampler::MCMCSampler, state::MCMCState)
    β = sampler.config.pcn_beta

    # Store current state
    x_current = copy(state.x_chi)
    log_post_current = state.log_posterior

    # pCN proposal: x' = √(1-β²) x + β ξ, where ξ ~ N(0,I)
    # This preserves the N(0,I) distribution in chi space
    ξ = randn(sampler.rng, length(x_current))
    x_proposal = sqrt(1 - β^2) * x_current + β * ξ

    # Update state with proposal
    state.x_chi .= x_proposal

    # Evaluate proposal
    log_post_proposal = evaluate_log_posterior!(sampler.evaluator, state)

    # For pCN, the proposal ratio cancels in chi space, so we only need likelihood ratio
    log_alpha = (log_post_proposal - state.log_prior) - (log_post_current - state.log_prior)
    alpha = min(1.0, exp(log_alpha))

    # Accept or reject
    if rand(sampler.rng) < alpha
        # Accept proposal
        update_acceptance!(state, true)
        return true
    else
        # Reject proposal - restore current state
        state.x_chi .= x_current
        state.log_posterior = log_post_current
        evaluate_log_posterior!(sampler.evaluator, state)  # Restore other fields
        update_acceptance!(state, false)
        return false
    end
end

"""
    mala_step!(sampler::MCMCSampler, state::MCMCState)

Metropolis-adjusted Langevin algorithm step using gradient information.
"""
function mala_step!(sampler::MCMCSampler, state::MCMCState)
    # Use the comprehensive MALA implementation
    if sampler.mala_proposal === nothing
        error("MALA proposal not initialized - this should not happen")
    end

    if !sampler.evaluator.compute_gradient
        error("MALA requires gradient computation to be enabled")
    end

    # Delegate to the comprehensive MALA implementation
    return MALA.mala_step!(state, sampler.evaluator, sampler.mala_proposal, sampler.rng)
end

"""
    block_gibbs_step!(sampler::MCMCSampler, state::MCMCState)

Block-wise Metropolis-within-Gibbs update for grouped parameters.
"""
function block_gibbs_step!(sampler::MCMCSampler, state::MCMCState)
    if isempty(sampler.config.block_structure)
        # Fall back to full-dimensional update
        return metropolis_hastings_step!(sampler, state)
    end

    # Cycle through blocks
    n_blocks = length(sampler.config.block_structure)
    state.current_block = (state.current_block % n_blocks) + 1

    # Get current block indices
    block_indices = sampler.config.block_structure[state.current_block]

    # Store current state
    x_current = copy(state.x_chi)
    log_post_current = state.log_posterior

    # Propose changes only for current block
    x_proposal = copy(x_current)
    x_proposal[block_indices] += sampler.config.step_size * randn(sampler.rng, length(block_indices))

    # Update state with proposal
    state.x_chi .= x_proposal

    # Evaluate proposal
    log_post_proposal = evaluate_log_posterior!(sampler.evaluator, state)

    # Compute acceptance probability
    log_alpha = log_post_proposal - log_post_current
    alpha = min(1.0, exp(log_alpha))

    # Accept or reject
    if rand(sampler.rng) < alpha
        # Accept proposal
        update_acceptance!(state, true)
        return true
    else
        # Reject proposal - restore current state
        state.x_chi .= x_current
        state.log_posterior = log_post_current
        evaluate_log_posterior!(sampler.evaluator, state)  # Restore other fields
        update_acceptance!(state, false)
        return false
    end
end

"""
    adapt_step_size!(sampler::MCMCSampler, state::MCMCState, iteration::Int)

Adapt step size during burn-in to achieve target acceptance rate.
"""
function adapt_step_size!(sampler::MCMCSampler, state::MCMCState, iteration::Int)
    if !sampler.config.adapt_step_size || iteration <= sampler.config.adaptation_window
        return
    end

    # Check if we're in an adaptation window
    if iteration % sampler.config.adaptation_window == 0
        current_acceptance = acceptance_rate(state)
        target_acceptance = sampler.config.target_acceptance

        # Robbins-Monro adaptation
        adaptation_rate = min(0.1, 1.0 / sqrt(iteration / sampler.config.adaptation_window))

        if current_acceptance > target_acceptance
            # Increase step size
            factor = exp(adaptation_rate)
        else
            # Decrease step size
            factor = exp(-adaptation_rate)
        end

        # Update step size in configuration (for next proposals)
        if sampler.config.proposal_type == MCMCTypes.CrankNicolson
            # For pCN, adapt β parameter
            new_beta = clamp(sampler.config.pcn_beta * factor, 0.01, 0.99)
            @info "Adapting pCN β: $(sampler.config.pcn_beta) → $new_beta (acceptance: $current_acceptance)"
            # Note: This requires mutable config or a different approach
        elseif sampler.config.proposal_type == MCMCTypes.MALA
            # For MALA, let the comprehensive implementation handle adaptation
            if sampler.mala_proposal !== nothing
                # The MALA implementation handles its own adaptation internally
                @info "MALA using internal adaptation (current acceptance: $current_acceptance)"
            else
                # Fallback if something went wrong
                new_step = sampler.config.mala_step_size * factor
                @info "Adapting MALA step size (fallback): $(sampler.config.mala_step_size) → $new_step (acceptance: $current_acceptance)"
            end
        else
            # For MH, adapt step size
            new_step = sampler.config.step_size * factor
            @info "Adapting MH step size: $(sampler.config.step_size) → $new_step (acceptance: $current_acceptance)"
        end

        # Record adaptation history
        push!(sampler.adaptation_history, current_acceptance)

        # Reset acceptance statistics for next window
        reset_acceptance!(state)
    end
end

"""
    run_chain!(sampler::MCMCSampler, x_init::Vector{Float64})

Run a complete MCMC chain and return results.
"""
function run_chain!(sampler::MCMCSampler, x_init::Vector{Float64})
    # Initialize state
    state = MCMCState(length(x_init))
    initialize_chain!(sampler, state, x_init)

    # Storage for samples
    samples = PosteriorSample{Float64}[]
    n_total = sampler.config.n_burnin + sampler.config.n_samples

    @info "Starting MCMC chain: $(n_total) total iterations ($(sampler.config.n_burnin) burn-in)"

    # Main sampling loop
    for iter in 1:n_total
        # Perform sampling step
        accepted = sample!(sampler, state)

        # Adapt step size during burn-in
        if iter <= sampler.config.n_burnin
            adapt_step_size!(sampler, state, iter)
        end

        # Store sample (after burn-in, with thinning)
        if iter > sampler.config.n_burnin && (iter - sampler.config.n_burnin) % sampler.config.n_thin == 0
            sample = PosteriorSample(
                copy(state.x_phys),
                copy(state.x_chi),
                state.log_posterior,
                state.log_likelihood,
                state.log_prior,
                deepcopy(state.hyperparams),
                iter
            )
            push!(samples, sample)
        end

        # Progress reporting
        if iter % 1000 == 0
            acc_rate = acceptance_rate(state)
            @info "Iteration $iter: log-posterior = $(round(state.log_posterior, digits=2)), acceptance = $(round(acc_rate, digits=3))"
        end
    end

    final_acceptance = acceptance_rate(state)
    @info "Chain completed. Final acceptance rate: $(round(final_acceptance, digits=3))"

    return samples, state
end

end # module Sampler
