# Example demonstrating MALA implementation for FLEXINVERT
# Shows integration with existing infrastructure and advanced features

using LinearAlgebra
using Random
using Statistics
using Plots

# Add the src directory to the load path
push!(LOAD_PATH, joinpath(@__DIR__, "..", "src"))

using MCMCTypes
using Posterior
using MALA
using Transformations

"""
Example: High-dimensional Gaussian posterior with MALA sampling.

This example demonstrates:
1. Setting up a MALA proposal with proper configuration
2. Integration with gradient computation
3. Adaptive step size tuning
4. Block-wise MALA updates
5. Performance comparison with standard Metropolis-Hastings
"""

# Set up a realistic high-dimensional test problem
function setup_test_problem(n_dim::Int = 100)
    Random.seed!(42)

    # Create a realistic prior covariance structure
    # Simulate spatial correlations typical in atmospheric inversion
    x_coords = collect(1:n_dim)
    correlation_length = 20.0

    # Exponential correlation structure
    Σ_prior = [exp(-abs(i-j)/correlation_length) for i=1:n_dim, j=1:n_dim]

    # Add some numerical stability
    Σ_prior += 0.01 * I

    # Prior mean (background emissions)
    μ_prior = ones(n_dim) * 0.5

    # Simulated "observations" - sparse and noisy
    n_obs = Int(n_dim ÷ 3)  # Fewer observations than parameters
    H = randn(n_obs, n_dim) * 0.1  # Observation operator (transport matrix)

    # True state for synthetic data
    x_true = μ_prior + cholesky(Σ_prior).L * randn(n_dim) * 0.3

    # Synthetic observations with noise
    σ_obs = 0.1
    y_obs = H * x_true + σ_obs * randn(n_obs)

    # Observation error covariance
    R = σ_obs^2 * I(n_obs)

    return (
        μ_prior = μ_prior,
        Σ_prior = Σ_prior,
        H = H,
        y_obs = y_obs,
        R = R,
        x_true = x_true,
        n_dim = n_dim,
        n_obs = n_obs
    )
end

"""
Create log-posterior evaluator for the test problem.
"""
function create_evaluator(problem)
    # Forward model: simple linear case H*x
    forward_model = x -> problem.H * x

    # Gradient function: ∇ log p(x|y) = H^T R^{-1} (y - Hx) - Σ^{-1} (x - μ)
    function gradient_func(x)
        # Likelihood gradient: H^T R^{-1} (y - Hx)
        residual = problem.y_obs - problem.H * x
        grad_likelihood = problem.H' * (problem.R \ residual)

        # Prior gradient: -Σ^{-1} (x - μ)
        grad_prior = -(problem.Σ_prior \ (x - problem.μ_prior))

        return grad_likelihood + grad_prior
    end

    # Create likelihood
    likelihood = Posterior.construct_gaussian_likelihood(problem.R)

    # Create evaluator
    evaluator = LogPosteriorEvaluator(
        forward_model,
        gradient_func,
        nothing,  # No transformation for this example
        problem.μ_prior,
        inv(problem.Σ_prior),
        likelihood,
        problem.y_obs;
        compute_gradient = true
    )

    return evaluator
end

"""
Run MALA sampling with the new implementation.
"""
function run_mala_sampling(evaluator, problem; n_samples = 5000, n_burnin = 1000)
    println("Setting up MALA sampler...")

    # Configure MALA with adaptive step size
    mala_options = MALAOptions(
        step_size = 0.01,
        min_step_size = 1e-5,
        max_step_size = 0.1,
        adapt_step_size = true,
        target_acceptance = 0.574,  # Optimal for MALA
        adaptation_window = 100,
        gradient_clip_threshold = 50.0,
        fallback_to_random_walk = true
    )

    # Create MALA proposal
    mala = MALAProposal(mala_options)

    # Initialize MCMC state
    state = MCMCState(problem.n_dim)
    state.x_chi .= problem.μ_prior + 0.1 * randn(problem.n_dim)  # Start near prior mean

    # Evaluate initial state
    evaluate_log_posterior!(evaluator, state)
    println("Initial log-posterior: $(state.log_posterior)")

    # Storage for samples
    samples = Vector{Vector{Float64}}()
    log_posteriors = Float64[]
    acceptance_rates = Float64[]
    step_sizes = Float64[]

    rng = MersenneTwister(123)

    println("Running MALA sampling...")
    total_iterations = n_burnin + n_samples

    for iter in 1:total_iterations
        # MALA step
        accepted = MALA.mala_step!(state, evaluator, mala, rng)

        # Record diagnostics
        if iter % 100 == 0
            push!(acceptance_rates, acceptance_rate(state))
            push!(step_sizes, mala.state.current_step_size)

            if iter <= n_burnin
                println("Burn-in iter $iter: acc_rate = $(round(acceptance_rate(state), digits=3)), " *
                       "step_size = $(round(mala.state.current_step_size, digits=5)), " *
                       "log_post = $(round(state.log_posterior, digits=2))")
            elseif iter % 500 == 0
                println("Sampling iter $(iter-n_burnin): acc_rate = $(round(acceptance_rate(state), digits=3)), " *
                       "log_post = $(round(state.log_posterior, digits=2))")
            end

            # Reset acceptance for next window during burn-in
            if iter <= n_burnin
                reset_acceptance!(state)
            end
        end

        # Store samples after burn-in
        if iter > n_burnin
            push!(samples, copy(state.x_chi))
            push!(log_posteriors, state.log_posterior)
        end
    end

    # Final diagnostics
    final_diagnostics = MALA.mala_diagnostics(mala)
    println("\nFinal MALA Diagnostics:")
    println("  Total acceptance rate: $(round(final_diagnostics.total_acceptance_rate, digits=3))")
    println("  Final step size: $(round(final_diagnostics.current_step_size, digits=5))")
    println("  Gradient failures: $(final_diagnostics.gradient_failures)")
    println("  Clipped gradients: $(final_diagnostics.clipped_gradients)")
    println("  Fallback steps: $(final_diagnostics.fallback_steps)")

    return (
        samples = samples,
        log_posteriors = log_posteriors,
        acceptance_rates = acceptance_rates,
        step_sizes = step_sizes,
        diagnostics = final_diagnostics
    )
end

"""
Run block-wise MALA sampling.
"""
function run_block_mala_sampling(evaluator, problem; n_samples = 5000, n_burnin = 1000, block_size = 20)
    println("\nSetting up Block MALA sampler...")

    # Create block structure
    blocks = MALA.create_mala_blocks(problem.n_dim, block_size)
    println("Created $(length(blocks)) blocks of approximate size $block_size")

    # Configure block MALA
    block_step_sizes = fill(0.02, length(blocks))  # Different step size per block

    mala_options = MALAOptions(
        step_size = 0.01,  # Not used for block updates
        block_structure = blocks,
        block_step_sizes = block_step_sizes,
        adapt_step_size = false,  # Disable global adaptation for blocks
        gradient_clip_threshold = 50.0,
        fallback_to_random_walk = true
    )

    mala = MALAProposal(mala_options)

    # Initialize state
    state = MCMCState(problem.n_dim)
    state.x_chi .= problem.μ_prior + 0.1 * randn(problem.n_dim)
    evaluate_log_posterior!(evaluator, state)

    # Storage
    samples = Vector{Vector{Float64}}()
    log_posteriors = Float64[]
    block_acceptances = [Float64[] for _ in 1:length(blocks)]

    rng = MersenneTwister(456)
    total_iterations = n_burnin + n_samples

    println("Running Block MALA sampling...")

    for iter in 1:total_iterations
        # Cycle through blocks
        for (block_idx, block) in enumerate(blocks)
            accepted = MALA.mala_block_step!(state, evaluator, mala, block_idx, rng)

            if iter % 100 == 0
                block_state = mala.state.block_states[block_idx]
                acceptance = block_state.n_proposals > 0 ?
                            block_state.n_accepted / block_state.n_proposals : 0.0
                push!(block_acceptances[block_idx], acceptance)
            end
        end

        # Progress reporting
        if iter % 500 == 0
            avg_block_acceptance = mean([
                bs.n_proposals > 0 ? bs.n_accepted / bs.n_proposals : 0.0
                for bs in mala.state.block_states
            ])

            if iter <= n_burnin
                println("Block burn-in iter $iter: avg_acc = $(round(avg_block_acceptance, digits=3)), " *
                       "log_post = $(round(state.log_posterior, digits=2))")
            else
                println("Block sampling iter $(iter-n_burnin): avg_acc = $(round(avg_block_acceptance, digits=3)), " *
                       "log_post = $(round(state.log_posterior, digits=2))")
            end
        end

        # Store samples after burn-in
        if iter > n_burnin
            push!(samples, copy(state.x_chi))
            push!(log_posteriors, state.log_posterior)
        end
    end

    # Block-wise diagnostics
    println("\nBlock MALA Diagnostics:")
    for (i, block_state) in enumerate(mala.state.block_states)
        acceptance = block_state.n_proposals > 0 ?
                    block_state.n_accepted / block_state.n_proposals : 0.0
        println("  Block $i: acceptance = $(round(acceptance, digits=3)), " *
               "size = $(length(blocks[i]))")
    end

    return (
        samples = samples,
        log_posteriors = log_posteriors,
        block_acceptances = block_acceptances
    )
end

"""
Analyze and compare sampling results.
"""
function analyze_results(problem, mala_results, block_results = nothing)
    println("\n" * "="^60)
    println("SAMPLING RESULTS ANALYSIS")
    println("="^60)

    # Convert samples to matrix for easier analysis
    mala_samples_matrix = hcat(mala_results.samples...)'

    # Posterior statistics
    posterior_mean = mean(mala_samples_matrix, dims=1)[1, :]
    posterior_std = std(mala_samples_matrix, dims=1)[1, :]

    # Compare with true values
    bias = posterior_mean - problem.x_true
    rmse = sqrt(mean(bias.^2))

    println("Posterior Estimation Quality:")
    println("  RMSE vs true state: $(round(rmse, digits=4))")
    println("  Mean absolute bias: $(round(mean(abs.(bias)), digits=4))")
    println("  Max absolute bias: $(round(maximum(abs.(bias)), digits=4))")

    # Posterior uncertainty
    println("\nPosterior Uncertainty:")
    println("  Mean posterior std: $(round(mean(posterior_std), digits=4))")
    println("  Min posterior std: $(round(minimum(posterior_std), digits=4))")
    println("  Max posterior std: $(round(maximum(posterior_std), digits=4))")

    # Sampling efficiency
    println("\nSampling Efficiency:")
    println("  Effective sample size: $(length(mala_results.samples))")
    println("  Final acceptance rate: $(round(mala_results.diagnostics.total_acceptance_rate, digits=3))")

    # Create some diagnostic plots if Plots.jl is available
    try
        # Trace plot for first few parameters
        p1 = plot(mala_samples_matrix[:, 1:min(3, size(mala_samples_matrix, 2))],
                 title="MALA Trace Plot (first 3 params)",
                 label=["Param 1" "Param 2" "Param 3"],
                 xlabel="Iteration", ylabel="Value")

        # Log-posterior trace
        p2 = plot(mala_results.log_posteriors,
                 title="Log-Posterior Trace",
                 xlabel="Iteration", ylabel="Log-Posterior",
                 label="MALA")

        # Step size adaptation
        p3 = plot(mala_results.step_sizes,
                 title="MALA Step Size Adaptation",
                 xlabel="Adaptation Window", ylabel="Step Size",
                 label="Step Size")

        # Parameter comparison with truth
        comparison_indices = 1:min(20, length(problem.x_true))
        p4 = scatter(comparison_indices, problem.x_true[comparison_indices],
                    label="True", alpha=0.7, markersize=4)
        scatter!(p4, comparison_indices, posterior_mean[comparison_indices],
                label="MALA Posterior Mean", alpha=0.7, markersize=4)
        plot!(p4, title="Parameter Estimates vs Truth (first 20)",
             xlabel="Parameter Index", ylabel="Value")

        plot_combined = plot(p1, p2, p3, p4, layout=(2,2), size=(800, 600))

        println("\nDiagnostic plots created (if plotting backend available)")
        display(plot_combined)

    catch e
        println("\nNote: Plotting failed - may need to install Plots.jl or set backend")
        @debug "Plotting error: $e"
    end

    return (
        posterior_mean = posterior_mean,
        posterior_std = posterior_std,
        bias = bias,
        rmse = rmse
    )
end

# Main execution
function main()
    println("FLEXINVERT MALA Example")
    println("="^40)

    # Problem setup
    problem = setup_test_problem(50)  # Smaller for example
    evaluator = create_evaluator(problem)

    println("Problem setup:")
    println("  Dimensions: $(problem.n_dim)")
    println("  Observations: $(problem.n_obs)")
    println("  Prior mean: $(round(mean(problem.μ_prior), digits=3))")

    # Run standard MALA
    mala_results = run_mala_sampling(evaluator, problem, n_samples=2000, n_burnin=500)

    # Run block MALA
    block_results = run_block_mala_sampling(evaluator, problem,
                                          n_samples=2000, n_burnin=500, block_size=10)

    # Analyze results
    analysis = analyze_results(problem, mala_results, block_results)

    println("\nExample completed successfully!")
    return (problem, mala_results, block_results, analysis)
end

# Run if script is executed directly
if abspath(PROGRAM_FILE) == @__FILE__
    main()
end