# MCMC integration with existing FLEXINVERT infrastructure
# Bridges MCMC sampling with forward models, covariance systems, and transformations

module MCMCIntegration

using LinearAlgebra
using Random
using Statistics
using Distributed

# Import FLEXINVERT modules
using ..Settings: Config, Files
using ..CoreTypes: Domain
using ..ObservationsCore: Observations
using ..StateVector: InversionState, initialize_state
using ..PriorCovariance: CovarianceMatrix
using ..ForwardModel: forward_model, PriorFluxes, FluxField
using ..Transformations: validate_transforms, phi2chi, chi2phi, chi2phi!, phi2chi!,
                           full_precision_matrix
using ..MCMCTypes: MCMCState, MCMCConfiguration, ProposalType,
                    MetropolisHastings, CrankNicolson, MALA, BlockGibbs,
                    MCMCResult, PosteriorSample, ChainDiagnostics, HyperParameters,
                    acceptance_rate, reset_acceptance!
using ..Posterior: LogPosteriorEvaluator, construct_gaussian_likelihood,
                   construct_student_t_likelihood, evaluate_log_posterior!
using ..Sampler: MCMCSampler, adapt_step_size!, initialize_chain!, sample!

export run_mcmc_inversion, create_mcmc_evaluator, mcmc_configuration_from_config
export save_mcmc_results, load_mcmc_checkpoint

"""
    mcmc_configuration_from_config(cfg::Config) -> MCMCConfiguration

Convert FLEXINVERT Config to MCMCConfiguration.
"""
function mcmc_configuration_from_config(cfg::Config)
    # Map proposal string to enum
    proposal_type = if cfg.mcmc_proposal == "mh"
        MetropolisHastings
    elseif cfg.mcmc_proposal == "pcn"
        CrankNicolson
    elseif cfg.mcmc_proposal == "mala"
        MALA
    elseif cfg.mcmc_proposal == "block"
        BlockGibbs
    else
        error("Unknown MCMC proposal type: $(cfg.mcmc_proposal)")
    end

    return MCMCConfiguration(
        n_samples = cfg.mcmc_samples,
        n_burnin = cfg.mcmc_burnin,
        n_thin = cfg.mcmc_thin,
        proposal_type = proposal_type,
        step_size = cfg.mcmc_step_size,
        block_structure = Vector{Int}[],  # Will be set up later based on problem structure
        adapt_step_size = cfg.mcmc_adapt_step,
        target_acceptance = cfg.mcmc_target_accept,
        adaptation_window = cfg.mcmc_adapt_window,
        pcn_beta = cfg.mcmc_pcn_beta,
        mala_step_size = cfg.mcmc_mala_step,
        sample_hyperparams = cfg.mcmc_sample_hyperparams,
        hyper_update_freq = cfg.mcmc_hyper_freq,
        compute_diagnostics = cfg.mcmc_diagnostics,
        save_samples = cfg.mcmc_save_samples,
        diagnostic_freq = cfg.mcmc_diagnostic_freq,
        n_chains = cfg.mcmc_chains
    )
end

"""
    create_mcmc_evaluator(state::InversionState, prior_fluxes::PriorFluxes,
                         obs::Observations, covariance::CovarianceMatrix,
                         cfg::Config, files::Files, domain::Domain) -> LogPosteriorEvaluator

Create LogPosteriorEvaluator from existing FLEXINVERT components.
"""
function create_mcmc_evaluator(
    state::InversionState,
    prior_fluxes::PriorFluxes,
    obs::Observations,
    covariance::CovarianceMatrix,
    cfg::Config,
    files::Files,
    domain::Domain,
    footprints
)
    @info "Creating MCMC log-posterior evaluator"
    @info "Number of observations: $(length(obs.concentrations))"
    @info "Number of control variables: $(state.n_control)"

    # Build numerically safe observation-error vector
    measurement_errors = copy(obs.measurement_errors)
    max_error = maximum(measurement_errors)
    error_floor = max(1e-6, isnan(max_error) || max_error == 0.0 ? 1e-6 : max_error * 1e-6)
    safe_measurement_errors = max.(measurement_errors, error_floor)

    # Create forward model function
    forward_func = x_phys -> begin
        # Temporarily update state for forward model evaluation
        original_state = copy(state)

        # Set the physical space state
        if length(x_phys) == state.n_physical
            # Set physical state directly and convert to control space
            state.chi .= x_phys
            # Remove prior and transform to control space
            chi_deviation = x_phys .- state.chi_prior
            # Convert to control space using covariance
            if covariance !== nothing
                chi2phi!(state.phi, chi_deviation, covariance)
            else
                state.phi .= chi_deviation
            end
        else
            error("Input dimension mismatch: expected $(state.n_physical), got $(length(x_phys))")
        end

        try
            # The forward model still needs to be modified to use the state
            # For now, we create modified fluxes based on the state
            modified_fluxes = apply_state_to_fluxes(prior_fluxes, state, domain)

            # Run forward model using existing FLEXINVERT infrastructure
            y_model = forward_model(cfg, domain, modified_fluxes, obs;
                                     footprints=footprints, files=files)
            return y_model
        finally
            # Restore original state
            copyto!(state, original_state)
        end
    end

    # Create gradient function (optional for gradient-based methods like MALA)
    gradient_func = if cfg.mcmc_proposal == "mala"
        x_phys -> begin
            # Temporarily update state for gradient computation
            original_state = copy(state)

            # Set the physical space state (similar to forward_func)
            if length(x_phys) == state.n_physical
                state.chi .= x_phys
                # Remove prior and transform to control space
                chi_deviation = x_phys .- state.chi_prior
                if covariance !== nothing
                    chi2phi!(state.phi, chi_deviation, covariance)
                else
                    state.phi .= chi_deviation
                end
            else
                error("Input dimension mismatch: expected $(state.n_physical), got $(length(x_phys))")
            end

            try
                # Apply state to fluxes and compute forward model
                modified_fluxes = apply_state_to_fluxes(prior_fluxes, state, domain)
                y_mod = forward_model(cfg, domain, modified_fluxes, obs;
                                       footprints=footprints, files=files)

                # Compute gradient of log-likelihood (observation term only)
                # g_obs = H^T * R^{-1} * (y - Hx)
                residual = obs.concentrations .- y_mod
                # Apply adjoint using weighted residuals
                obs_gradient_physical = zeros(Float64, state.n_physical)

                # Simple adjoint implementation - could be improved with proper H^T
                # For now, assume identity mapping for first n_box elements
                for i in 1:min(length(residual), domain.nbox, state.n_physical)
                    obs_gradient_physical[i] = residual[i] / safe_measurement_errors[i]^2
                end

                return obs_gradient_physical

            finally
                # Restore original state
                copyto!(state, original_state)
            end
        end
    else
        nothing
    end

    # Set up transformation between chi and physical spaces
    transform = nothing

    # Prior mean in physical space
    x_prior = copy(state.chi_prior)  # Use actual prior values

    # Prior precision matrix (inverse covariance)
    B_inv = covariance !== nothing ? full_precision_matrix(covariance) : Matrix{Float64}(I, state.n_physical, state.n_physical)

    # Construct likelihood model
    likelihood = if cfg.mcmc_likelihood == "gaussian"
        # Use observation errors
        R = Diagonal(safe_measurement_errors.^2)
        construct_gaussian_likelihood(R; ridge=error_floor^2)
    elseif cfg.mcmc_likelihood == "student_t"
        # Robust Student-t likelihood
        R = Diagonal(safe_measurement_errors.^2)
        construct_student_t_likelihood(R, cfg.mcmc_student_nu; ridge=error_floor^2)
    else
        error("Unknown likelihood type: $(cfg.mcmc_likelihood)")
    end

    # Create evaluator
    evaluator = LogPosteriorEvaluator(
        forward_func,
        gradient_func,
        transform,
        x_prior,
        B_inv,
        likelihood,
        obs.concentrations;
        log_normal_prior = cfg.lognormal,
        compute_gradient = (cfg.mcmc_proposal == "mala")
    )

    @info "MCMC evaluator created successfully"
    @info "Likelihood type: $(cfg.mcmc_likelihood)"
    @info "Transform available: $(transform !== nothing)"
    @info "Gradient computation: $(evaluator.compute_gradient)"

    return evaluator
end

"""
    run_mcmc_inversion(files::Files, config::Config, domain::Domain,
                      observations::Observations, prior_fluxes::PriorFluxes,
                      covariance::CovarianceMatrix) -> MCMCResult

High-level interface for running MCMC inversion using existing FLEXINVERT infrastructure.
"""
function run_mcmc_inversion(
    files::Files,
    config::Config,
    domain::Domain,
    observations::Observations,
    prior_fluxes::PriorFluxes,
    covariance::CovarianceMatrix;
    footprints=nothing
)
    @info "Starting MCMC inversion"
    @info "Method: $(config.method)"
    @info "Proposal: $(config.mcmc_proposal)"
    @info "Chains: $(config.mcmc_chains)"
    @info "Samples per chain: $(config.mcmc_samples)"

    # Validate that transformations are set up correctly
    if covariance !== nothing
        validate_transforms(covariance)
    end

    # Initialize inversion state
    state = initialize_state(config, domain, covariance)
    @info "Initialized state with $(state.n_control) control variables"

    # Create MCMC configuration
    mcmc_config = mcmc_configuration_from_config(config)

    # Set up block structure for block proposals
    if config.mcmc_proposal == "block"
        # Create simple blocks based on problem structure
        n_vars = state.n_control
        block_size = max(10, n_vars ÷ 10)  # Aim for ~10 blocks
        blocks = [collect(i:min(i+block_size-1, n_vars)) for i in 1:block_size:n_vars]
        mcmc_config = MCMCConfiguration(
            mcmc_config;
            block_structure = blocks
        )
        @info "Created $(length(blocks)) blocks for block sampling"
    end

    # Create log-posterior evaluator
    evaluator = create_mcmc_evaluator(state, prior_fluxes, observations, covariance, config, files, domain, footprints)

    # Initialize MCMC sampler
    sampler = MCMCSampler(evaluator, mcmc_config)

    # Run MCMC sampling
    @info "Running MCMC sampling..."
    start_time = time()

    result = if mcmc_config.n_chains > 1
        # Parallel chain sampling
        run_parallel_chains(sampler, mcmc_config)
    else
        # Single chain sampling
        run_single_chain(sampler, mcmc_config)
    end

    sampling_time = time() - start_time
    @info "MCMC sampling completed in $(round(sampling_time, digits=2)) seconds"

    # Save results if requested
    if !isempty(files.path_output)
        save_mcmc_results(result, files.path_output, config)
    end

    # Print summary statistics
    print_mcmc_summary(result)

    return result
end

"""
    run_single_chain(sampler::MCMCSampler, config::MCMCConfiguration) -> MCMCResult

Run MCMC sampling with a single chain.
"""
function run_single_chain(sampler::MCMCSampler, config::MCMCConfiguration)
    start_time = time()

    n_vars = sampler.evaluator.n_params
    state = MCMCState(n_vars)
    if sampler.evaluator.transform !== nothing
        x_init = zeros(Float64, n_vars)
    else
        x_init = copy(sampler.evaluator.x_prior)
    end
    initialize_chain!(sampler, state, x_init)

    @info "Initial log-posterior: $(state.log_posterior)"

    total_iterations = config.n_samples + config.n_burnin
    samples = PosteriorSample{Float64}[]

    for iter in 1:total_iterations
        sample!(sampler, state)

        if !isfinite(state.log_posterior)
            @warn "Non-finite log-posterior encountered at iteration $iter; resetting acceptance window"
            reset_acceptance!(state)
            continue
        end

        if iter <= config.n_burnin && config.adapt_step_size
            adapt_step_size!(sampler, state, iter)
        end

        if iter > config.n_burnin && (iter - config.n_burnin) % config.n_thin == 0
            push!(samples, PosteriorSample(
                copy(state.x_phys),
                copy(state.x_chi),
                state.log_posterior,
                state.log_likelihood,
                state.log_prior,
                deepcopy(state.hyperparams),
                iter
            ))
        end

        if iter % 1000 == 0
            acc_rate = acceptance_rate(state)
            @info "Iteration $iter: log-posterior = $(round(state.log_posterior, digits=2)), acceptance = $(round(acc_rate, digits=3))"
        end
    end

    n_params = sampler.evaluator.n_params
    if !isempty(samples)
        sample_matrix = hcat((s.x_phys for s in samples)...)
        posterior_mean = vec(mean(sample_matrix; dims=2))
        posterior_std = vec(std(sample_matrix; dims=2, corrected=false))

        credible_intervals = zeros(Float64, 2, n_params)
        for j in 1:n_params
            param_samples = view(sample_matrix, j, :)
            credible_intervals[1, j] = quantile(param_samples, 0.025)
            credible_intervals[2, j] = quantile(param_samples, 0.975)
        end
    else
        posterior_mean = copy(state.x_phys)
        posterior_std = zeros(Float64, n_params)
        credible_intervals = repeat(state.x_phys', 2, 1)
    end

    diagnostics = ChainDiagnostics()
    diagnostics.acceptance_rate = acceptance_rate(state)

    sampling_time = time() - start_time

    return MCMCResult(
        samples,
        [state],
        posterior_mean,
        posterior_std,
        credible_intervals,
        diagnostics,
        1,
        total_iterations,
        length(samples),
        sampling_time,
        config
    )
end

"""
    run_parallel_chains(sampler::MCMCSampler, config::MCMCConfiguration) -> MCMCResult

Run MCMC sampling with multiple parallel chains.
"""
function run_parallel_chains(sampler::MCMCSampler, config::MCMCConfiguration)
    @info "Running $(config.n_chains) parallel MCMC chains"

    # For now, implement sequential version
    # In practice, this would use Distributed.jl for parallel execution
    chain_results = []

    for chain in 1:config.n_chains
        @info "Running chain $chain/$(config.n_chains)"

        # Add some randomness to initial conditions
        chain_sampler = deepcopy(sampler)
        result = run_single_chain(chain_sampler, config)
        push!(chain_results, result)
    end

    # Combine results from all chains
    all_samples = vcat([r.samples for r in chain_results]...)
    final_states = [r.final_states[1] for r in chain_results]

    if !isempty(all_samples)
        sample_matrix = hcat((s.x_phys for s in all_samples)...)
        posterior_mean = vec(mean(sample_matrix; dims=2))
        posterior_std = vec(std(sample_matrix; dims=2, corrected=false))

        credible_intervals = zeros(Float64, 2, length(posterior_mean))
        for j in 1:length(posterior_mean)
            param_samples = view(sample_matrix, j, :)
            credible_intervals[1, j] = quantile(param_samples, 0.025)
            credible_intervals[2, j] = quantile(param_samples, 0.975)
        end
    else
        n_params = sampler.evaluator.n_params
        posterior_mean = zeros(Float64, n_params)
        posterior_std = zeros(Float64, n_params)
        credible_intervals = zeros(Float64, 2, n_params)
    end

    # Combined diagnostics
    diagnostics = ChainDiagnostics()
    diagnostics.acceptance_rate = mean([acceptance_rate(state) for state in final_states])

    # Compute R-hat diagnostic if multiple chains
    if config.n_chains > 1
        diagnostics.rhat = compute_rhat(chain_results)
    end

    return MCMCResult(
        all_samples,
        final_states,
        posterior_mean,
        posterior_std,
        credible_intervals,
        diagnostics,
        config.n_chains,
        config.n_samples + config.n_burnin,
        length(all_samples),
        sum([r.sampling_time for r in chain_results]),
        config
    )
end

"""
    save_mcmc_results(result::MCMCResult, output_path::String, config::Config)

Save MCMC results in various formats.
"""
function save_mcmc_results(result::MCMCResult, output_path::String, config::Config)
    @info "Saving MCMC results to $output_path"

    # Create output directory if it doesn't exist
    mkpath(output_path)

    # Save posterior mean and standard deviation
    mean_file = joinpath(output_path, "posterior_mean.txt")
    open(mean_file, "w") do io
        println(io, "# Posterior mean")
        for (i, val) in enumerate(result.posterior_mean)
            println(io, "$i $val")
        end
    end

    std_file = joinpath(output_path, "posterior_std.txt")
    open(std_file, "w") do io
        println(io, "# Posterior standard deviation")
        for (i, val) in enumerate(result.posterior_std)
            println(io, "$i $val")
        end
    end

    # Save credible intervals
    ci_file = joinpath(output_path, "credible_intervals.txt")
    open(ci_file, "w") do io
        println(io, "# 95% credible intervals (lower, upper)")
        for i in 1:size(result.credible_intervals, 2)
            println(io, "$i $(result.credible_intervals[1,i]) $(result.credible_intervals[2,i])")
        end
    end

    # Save diagnostics
    diag_file = joinpath(output_path, "diagnostics.txt")
    open(diag_file, "w") do io
        println(io, "# MCMC Diagnostics")
        println(io, "acceptance_rate: $(result.diagnostics.acceptance_rate)")
        if !isempty(result.diagnostics.rhat)
            println(io, "rhat_max: $(maximum(result.diagnostics.rhat))")
            println(io, "rhat_mean: $(mean(result.diagnostics.rhat))")
        end
        println(io, "n_chains: $(result.n_chains)")
        println(io, "n_samples_total: $(result.n_samples_total)")
        println(io, "n_samples_kept: $(result.n_samples_kept)")
        println(io, "sampling_time: $(result.sampling_time)")
    end

    # Save individual samples if requested
    if config.mcmc_save_samples
        samples_file = joinpath(output_path, "mcmc_samples.txt")
        open(samples_file, "w") do io
            println(io, "# MCMC samples: iteration log_posterior log_likelihood log_prior x1 x2 ...")
            for sample in result.samples
                print(io, "$(sample.iteration) $(sample.log_posterior) ")
                print(io, "$(sample.log_likelihood) $(sample.log_prior)")
                for val in sample.x_phys
                    print(io, " $val")
                end
                println(io)
            end
        end
        @info "Saved $(length(result.samples)) individual samples"
    end

    @info "MCMC results saved successfully"
end

"""
    print_mcmc_summary(result::MCMCResult)

Print summary of MCMC results.
"""
function print_mcmc_summary(result::MCMCResult)
    @info "=== MCMC Inversion Summary ==="
    @info "Chains: $(result.n_chains)"
    @info "Total samples: $(result.n_samples_total) per chain"
    @info "Kept samples: $(result.n_samples_kept) total"
    @info "Sampling time: $(round(result.sampling_time, digits=2)) seconds"
    @info "Acceptance rate: $(round(result.diagnostics.acceptance_rate, digits=3))"

    if !isempty(result.diagnostics.rhat)
        max_rhat = maximum(result.diagnostics.rhat)
        @info "Max R̂: $(round(max_rhat, digits=3)) $(max_rhat < 1.1 ? "(Good)" : "(Poor convergence)")"
    end

    # Show some parameter summaries
    n_show = min(5, length(result.posterior_mean))
    @info "First $n_show parameters (mean ± std):"
    for i in 1:n_show
        mean_val = round(result.posterior_mean[i], digits=4)
        std_val = round(result.posterior_std[i], digits=4)
        ci_lower = round(result.credible_intervals[1, i], digits=4)
        ci_upper = round(result.credible_intervals[2, i], digits=4)
        @info "  x[$i]: $mean_val ± $std_val, 95% CI: [$ci_lower, $ci_upper]"
    end
end

# Helper functions

function compute_rhat(chain_results::Vector)
    # Placeholder for Gelman-Rubin R̂ diagnostic
    # Would compute between-chain and within-chain variance
    n_params = length(chain_results[1].posterior_mean)
    return ones(n_params)  # Return 1.0 for all parameters (perfect convergence)
end

# Additional helper functions for state copying
function Base.copyto!(dest::MCMCState, src::MCMCState)
    dest.x_chi .= src.x_chi
    dest.x_phys .= src.x_phys
    dest.log_posterior = src.log_posterior
    dest.log_likelihood = src.log_likelihood
    dest.log_prior = src.log_prior
    dest.gradient .= src.gradient
    dest.n_proposed = src.n_proposed
    dest.n_accepted = src.n_accepted
    dest.hyperparams = deepcopy(src.hyperparams)
    dest.current_block = src.current_block
    return dest
end

# Helper function to apply state vector to flux fields
"""
    apply_state_to_fluxes(prior_fluxes::PriorFluxes, state::InversionState, domain::Domain) -> PriorFluxes

Apply the state vector as flux increments to the prior fluxes.
"""
function apply_state_to_fluxes(prior_fluxes::PriorFluxes, state::InversionState, domain::Domain)
    # For now, implement a simple version where the state vector represents
    # flux increments in physical space that get added to the prior fluxes

    # The state.chi vector represents flux increments in physical space
    # Reshape it to the appropriate flux field dimensions

    # Simple approach: assume the state vector represents NEE flux increments
    # in the same spatial structure as the domain boxes
    if prior_fluxes.nee !== nothing && length(state.chi) >= domain.nbox
        # Create modified NEE field
        modified_nee_data = copy(prior_fluxes.nee.data)

        # Apply increments to the first time slice (could be extended for temporal)
        for i in 1:min(domain.nbox, length(state.chi))
            # Map box index to lon/lat indices (simplified)
            lon_idx = ((i - 1) % size(modified_nee_data, 1)) + 1
            lat_idx = ((i - 1) ÷ size(modified_nee_data, 1)) + 1
            time_idx = 1  # First time slice

            if lon_idx <= size(modified_nee_data, 1) && lat_idx <= size(modified_nee_data, 2)
                modified_nee_data[lon_idx, lat_idx, time_idx] += state.chi[i]
            end
        end

        modified_nee = FluxField(
            prior_fluxes.nee.name,
            modified_nee_data,
            prior_fluxes.nee.lon,
            prior_fluxes.nee.lat,
            prior_fluxes.nee.time,
            prior_fluxes.nee.units,
            prior_fluxes.nee.scaling_factor,
            prior_fluxes.nee.time_units
        )

        return PriorFluxes(
            modified_nee,
            prior_fluxes.fossil_fuel,
            prior_fluxes.ocean,
            prior_fluxes.background,
            prior_fluxes.nee_global,
            prior_fluxes.fossil_fuel_global,
            prior_fluxes.ocean_global
        )
    else
        # Return original fluxes if no modification possible
        @debug "Unable to apply state to fluxes - using original prior fluxes"
        return prior_fluxes
    end
end

end # module MCMCIntegration
