# Hierarchical parameter sampling for FLEXINVERT MCMC
#
# This module provides comprehensive hierarchical parameter sampling for FLEXINVERT
# Bayesian atmospheric inversion. It handles sampling of hyperparameters including:
#
# - Observation error scale factors (per observation class/station type)
# - Prior covariance scaling parameters
# - Spatial correlation length scales
# - CAR (Conditional Autoregressive) model parameters for spatial residuals
# - Student-t degrees of freedom for robust likelihood
#
# Features:
# - Conjugate Gibbs sampling for variance parameters
# - Metropolis updates for non-conjugate cases with proper transforms
# - Block updates for correlated hyperparameters with adaptive proposals
# - Integration with existing MCMCTypes and Posterior infrastructure
# - Comprehensive bounds checking and validation
# - Built-in distribution sampling without external dependencies
#
# Usage:
# 1. Create hyperparameter specifications using HyperParameterSpec
# 2. Initialize HyperParameterSampler with specs and optional blocks
# 3. Call sample_hyperparameters! during MCMC sampling iterations
# 4. Monitor acceptance rates and adapt proposals as needed

module Hierarchical

using LinearAlgebra
using Random
using Statistics
using ..MCMCTypes
using ..Posterior

# Manual implementation of special functions

export HyperParameterSampler, HyperParameterType, HyperParameterBlock
export ObservationErrorScale, PriorCovarianceScale, SpatialCorrelationLength
export CARPrecision, CARCorrelation, StudentTDF, TransportModelError
export sample_hyperparameters!, initialize_hyperparameter_sampler
export update_hyperparameter_priors!, validate_hyperparameters

"""
    HyperParameterType

Enumeration of hyperparameter types for FLEXINVERT applications.
"""
@enum HyperParameterType begin
    ObservationErrorScale     # Station-class specific observation error inflation
    PriorCovarianceScale     # Regional flux prior uncertainty scaling
    SpatialCorrelationLength # Spatial correlation length scales
    CARPrecision             # CAR model precision parameters
    CARCorrelation           # CAR model correlation parameters
    StudentTDF               # Student-t degrees of freedom
    TransportModelError      # Transport model error parameters
    FootprintAggregationError # Footprint aggregation error parameters
end

"""
    AbstractHyperParameterPrior

Abstract base type for hyperparameter prior distributions.
"""
abstract type AbstractHyperParameterPrior end

"""
    InverseGammaPrior

Inverse-gamma prior for variance parameters (conjugate with Gaussian).
"""
struct InverseGammaPrior <: AbstractHyperParameterPrior
    shape::Float64      # α parameter
    scale::Float64      # β parameter

    function InverseGammaPrior(shape::Float64, scale::Float64)
        @assert shape > 0 "Shape parameter must be positive"
        @assert scale > 0 "Scale parameter must be positive"
        new(shape, scale)
    end
end

"""
    GammaPrior

Gamma prior for precision parameters.
"""
struct GammaPrior <: AbstractHyperParameterPrior
    shape::Float64      # α parameter
    rate::Float64       # β parameter (rate parameterization)

    function GammaPrior(shape::Float64, rate::Float64)
        @assert shape > 0 "Shape parameter must be positive"
        @assert rate > 0 "Rate parameter must be positive"
        new(shape, rate)
    end
end

"""
    LogNormalPrior

Log-normal prior for positive-constrained parameters.
"""
struct LogNormalPrior <: AbstractHyperParameterPrior
    μ::Float64          # Log-scale mean
    σ::Float64          # Log-scale standard deviation

    function LogNormalPrior(μ::Float64, σ::Float64)
        @assert σ > 0 "Standard deviation must be positive"
        new(μ, σ)
    end
end

"""
    UniformPrior

Uniform prior on bounded interval.
"""
struct UniformPrior <: AbstractHyperParameterPrior
    lower::Float64      # Lower bound
    upper::Float64      # Upper bound

    function UniformPrior(lower::Float64, upper::Float64)
        @assert lower < upper "Lower bound must be less than upper bound"
        new(lower, upper)
    end
end

"""
    BetaPrior

Beta prior for correlation parameters on [0,1].
"""
struct BetaPrior <: AbstractHyperParameterPrior
    α::Float64          # First shape parameter
    β::Float64          # Second shape parameter

    function BetaPrior(α::Float64, β::Float64)
        @assert α > 0 "Alpha parameter must be positive"
        @assert β > 0 "Beta parameter must be positive"
        new(α, β)
    end
end

"""
    HyperParameterSpec

Specification for a single hyperparameter including prior and constraints.
"""
struct HyperParameterSpec
    name::Symbol                           # Parameter name
    type::HyperParameterType              # Parameter type
    prior::AbstractHyperParameterPrior    # Prior distribution
    bounds::Tuple{Float64, Float64}       # Hard bounds (lower, upper)
    initial_value::Float64                # Initial value
    conjugate::Bool                       # Whether updates are conjugate
    transform::Symbol                     # Transform: :none, :log, :logit

    function HyperParameterSpec(name::Symbol, type::HyperParameterType,
                               prior::AbstractHyperParameterPrior,
                               bounds::Tuple{Float64, Float64},
                               initial_value::Float64;
                               conjugate::Bool = false,
                               transform::Symbol = :none)
        @assert bounds[1] < bounds[2] "Invalid bounds"
        @assert bounds[1] <= initial_value <= bounds[2] "Initial value outside bounds"
        @assert transform in [:none, :log, :logit] "Invalid transform"
        new(name, type, prior, bounds, initial_value, conjugate, transform)
    end
end

"""
    HyperParameterBlock

Block of related hyperparameters for joint updates.
"""
struct HyperParameterBlock
    name::Symbol                                # Block name
    parameters::Vector{Symbol}                  # Parameter names in block
    joint_prior::Union{Nothing, AbstractHyperParameterPrior}   # Joint prior (optional)
    proposal_cov::Matrix{Float64}              # Proposal covariance matrix
    adaptive::Bool                             # Adaptive proposal covariance
    target_acceptance::Float64                 # Target acceptance rate

    function HyperParameterBlock(name::Symbol, parameters::Vector{Symbol};
                                joint_prior::Union{Nothing, AbstractHyperParameterPrior} = nothing,
                                proposal_cov::Union{Nothing, Matrix{Float64}} = nothing,
                                adaptive::Bool = true,
                                target_acceptance::Float64 = 0.44)
        n_params = length(parameters)
        if proposal_cov === nothing
            proposal_cov = 0.01 * I(n_params)  # Default small identity
        else
            @assert size(proposal_cov) == (n_params, n_params) "Proposal covariance dimension mismatch"
        end
        new(name, parameters, joint_prior, proposal_cov, adaptive, target_acceptance)
    end
end

"""
    HyperParameterSampler

Main sampler for hierarchical hyperparameters.
"""
mutable struct HyperParameterSampler
    # Parameter specifications
    specs::Dict{Symbol, HyperParameterSpec}     # Individual parameter specs
    blocks::Vector{HyperParameterBlock}         # Parameter blocks

    # Current values and statistics
    current_values::Dict{Symbol, Float64}       # Current parameter values
    log_priors::Dict{Symbol, Float64}          # Current log-prior values

    # Adaptation tracking
    acceptance_counts::Dict{Symbol, Int}        # Acceptance counts per parameter
    proposal_counts::Dict{Symbol, Int}          # Proposal counts per parameter
    block_acceptance_counts::Dict{Symbol, Int}  # Block acceptance counts
    block_proposal_counts::Dict{Symbol, Int}    # Block proposal counts

    # Adaptive proposal covariances for blocks
    block_means::Dict{Symbol, Vector{Float64}}      # Running means for adaptation
    block_covariances::Dict{Symbol, Matrix{Float64}} # Running covariances
    adaptation_counts::Dict{Symbol, Int}             # Adaptation sample counts

    # Sampling frequency control
    update_frequency::Int                       # Update every N iterations
    last_update::Int                           # Last update iteration

    # Random number generator
    rng::AbstractRNG

    function HyperParameterSampler(specs::Dict{Symbol, HyperParameterSpec},
                                  blocks::Vector{HyperParameterBlock} = HyperParameterBlock[];
                                  update_frequency::Int = 10,
                                  rng::AbstractRNG = MersenneTwister())
        # Initialize current values
        current_values = Dict{Symbol, Float64}()
        log_priors = Dict{Symbol, Float64}()
        for (name, spec) in specs
            current_values[name] = spec.initial_value
            log_priors[name] = evaluate_log_prior(spec.prior, spec.initial_value, spec.transform)
        end

        # Initialize statistics
        acceptance_counts = Dict(name => 0 for name in keys(specs))
        proposal_counts = Dict(name => 0 for name in keys(specs))
        block_acceptance_counts = Dict(block.name => 0 for block in blocks)
        block_proposal_counts = Dict(block.name => 0 for block in blocks)

        # Initialize adaptation tracking
        block_means = Dict{Symbol, Vector{Float64}}()
        block_covariances = Dict{Symbol, Matrix{Float64}}()
        adaptation_counts = Dict{Symbol, Int}()

        for block in blocks
            n_params = length(block.parameters)
            block_means[block.name] = zeros(n_params)
            block_covariances[block.name] = copy(block.proposal_cov)
            adaptation_counts[block.name] = 0
        end

        new(specs, blocks, current_values, log_priors,
            acceptance_counts, proposal_counts,
            block_acceptance_counts, block_proposal_counts,
            block_means, block_covariances, adaptation_counts,
            update_frequency, 0, rng)
    end
end

"""
    sample_hyperparameters!(sampler::HyperParameterSampler, state::MCMCState,
                            evaluator::LogPosteriorEvaluator, iteration::Int)

Main function to sample hyperparameters during MCMC.
"""
function sample_hyperparameters!(sampler::HyperParameterSampler,
                                 state::MCMCState,
                                 evaluator::LogPosteriorEvaluator,
                                 iteration::Int)
    # Check if it's time to update hyperparameters
    if iteration - sampler.last_update < sampler.update_frequency
        return false
    end

    sampler.last_update = iteration
    any_accepted = false

    # First, sample individual parameters (conjugate updates)
    for (name, spec) in sampler.specs
        if spec.conjugate
            accepted = sample_conjugate_parameter!(sampler, name, state, evaluator)
            any_accepted = any_accepted || accepted
        end
    end

    # Sample parameter blocks (joint updates)
    for block in sampler.blocks
        accepted = sample_parameter_block!(sampler, block, state, evaluator)
        any_accepted = any_accepted || accepted
    end

    # Sample remaining individual parameters (non-conjugate)
    for (name, spec) in sampler.specs
        if !spec.conjugate && !is_in_any_block(name, sampler.blocks)
            accepted = sample_metropolis_parameter!(sampler, name, state, evaluator)
            any_accepted = any_accepted || accepted
        end
    end

    # Update hyperparameters in state if any were accepted
    if any_accepted
        update_state_hyperparameters!(sampler, state)
    end

    return any_accepted
end

"""
    sample_conjugate_parameter!(sampler, name, state, evaluator)

Sample a hyperparameter with conjugate prior-likelihood relationship.
"""
function sample_conjugate_parameter!(sampler::HyperParameterSampler,
                                    name::Symbol,
                                    state::MCMCState,
                                    evaluator::LogPosteriorEvaluator)
    spec = sampler.specs[name]

    if spec.type == ObservationErrorScale
        return sample_observation_error_scale!(sampler, name, state, evaluator)
    elseif spec.type == PriorCovarianceScale
        return sample_prior_covariance_scale!(sampler, name, state, evaluator)
    else
        @warn "Conjugate sampling not implemented for parameter type $(spec.type)"
        return false
    end
end

"""
    sample_observation_error_scale!(sampler, name, state, evaluator)

Sample observation error scale factor using conjugate inverse-gamma update.
"""
function sample_observation_error_scale!(sampler::HyperParameterSampler,
                                        name::Symbol,
                                        state::MCMCState,
                                        evaluator::LogPosteriorEvaluator)
    spec = sampler.specs[name]
    @assert isa(spec.prior, InverseGammaPrior) "Observation error scale requires InverseGamma prior"

    # Get current residuals
    y_model = evaluator.forward_model(state.x_phys)
    residuals = y_model - evaluator.y_obs

    # Conjugate update for inverse-gamma prior
    # Assume observation error covariance R = σ² * R₀
    # where σ² is the scale factor and R₀ is the base covariance

    # Extract relevant observations for this parameter
    # (This would need to be adapted based on how observation classes are defined)
    n_obs = length(residuals)
    sum_sq_residuals = dot(residuals, residuals)  # Simplified - should use proper R₀⁻¹

    # Update parameters
    prior = spec.prior
    posterior_shape = prior.shape + 0.5 * n_obs
    posterior_scale = prior.scale + 0.5 * sum_sq_residuals

    # Sample from inverse gamma distribution
    # InverseGamma(α, β) -> sample from Gamma(α, 1/β) and take reciprocal
    gamma_sample = rand_gamma(sampler.rng, posterior_shape, 1.0 / posterior_scale)
    new_value = 1.0 / gamma_sample

    # Apply bounds
    new_value = clamp(new_value, spec.bounds[1], spec.bounds[2])

    # Update sampler state
    sampler.current_values[name] = new_value
    sampler.log_priors[name] = evaluate_log_prior(spec.prior, new_value, spec.transform)
    sampler.acceptance_counts[name] += 1
    sampler.proposal_counts[name] += 1

    return true  # Conjugate update always accepted
end

"""
    sample_prior_covariance_scale!(sampler, name, state, evaluator)

Sample prior covariance scale factor using conjugate inverse-gamma update.
"""
function sample_prior_covariance_scale!(sampler::HyperParameterSampler,
                                       name::Symbol,
                                       state::MCMCState,
                                       evaluator::LogPosteriorEvaluator)
    spec = sampler.specs[name]
    @assert isa(spec.prior, InverseGammaPrior) "Prior covariance scale requires InverseGamma prior"

    # Conjugate update for prior covariance scaling
    # Assume prior covariance B = σ² * B₀

    prior_deviation = state.x_phys - evaluator.x_prior
    quadratic_form = dot(prior_deviation, evaluator.B_inv * prior_deviation)

    # Update parameters
    prior = spec.prior
    n_params = length(state.x_phys)
    posterior_shape = prior.shape + 0.5 * n_params
    posterior_scale = prior.scale + 0.5 * quadratic_form

    # Sample from inverse gamma distribution
    # InverseGamma(α, β) -> sample from Gamma(α, 1/β) and take reciprocal
    gamma_sample = rand_gamma(sampler.rng, posterior_shape, 1.0 / posterior_scale)
    new_value = 1.0 / gamma_sample

    # Apply bounds
    new_value = clamp(new_value, spec.bounds[1], spec.bounds[2])

    # Update sampler state
    sampler.current_values[name] = new_value
    sampler.log_priors[name] = evaluate_log_prior(spec.prior, new_value, spec.transform)
    sampler.acceptance_counts[name] += 1
    sampler.proposal_counts[name] += 1

    return true  # Conjugate update always accepted
end

"""
    sample_parameter_block!(sampler, block, state, evaluator)

Sample a block of related hyperparameters jointly.
"""
function sample_parameter_block!(sampler::HyperParameterSampler,
                                block::HyperParameterBlock,
                                state::MCMCState,
                                evaluator::LogPosteriorEvaluator)
    # Get current parameter values
    current_values = [sampler.current_values[name] for name in block.parameters]
    current_log_posterior = state.log_posterior

    # Transform to unconstrained space if needed
    current_unconstrained = transform_to_unconstrained(current_values, block.parameters, sampler.specs)

    # Generate proposal using multivariate normal
    n_params = length(current_unconstrained)
    L = cholesky(sampler.block_covariances[block.name]).L
    proposal_unconstrained = current_unconstrained + L * randn(sampler.rng, n_params)

    # Transform back to constrained space
    proposal_values = transform_from_unconstrained(proposal_unconstrained, block.parameters, sampler.specs)

    # Check bounds
    if !all_in_bounds(proposal_values, block.parameters, sampler.specs)
        sampler.block_proposal_counts[block.name] += 1
        return false
    end

    # Store current hyperparameter values
    old_values = copy(sampler.current_values)
    old_log_priors = copy(sampler.log_priors)

    # Update hyperparameter values
    for (i, name) in enumerate(block.parameters)
        sampler.current_values[name] = proposal_values[i]
        spec = sampler.specs[name]
        sampler.log_priors[name] = evaluate_log_prior(spec.prior, proposal_values[i], spec.transform)
    end

    # Update state hyperparameters and re-evaluate posterior
    update_state_hyperparameters!(sampler, state)
    proposal_log_posterior = evaluate_log_posterior!(evaluator, state)

    # Compute acceptance probability
    log_prior_ratio = sum(sampler.log_priors[name] - old_log_priors[name] for name in block.parameters)
    log_likelihood_ratio = (proposal_log_posterior - state.log_prior) - (current_log_posterior - state.log_prior)

    # Add Jacobian correction for transforms
    log_jacobian_ratio = compute_jacobian_ratio(current_values, proposal_values, block.parameters, sampler.specs)

    log_alpha = log_likelihood_ratio + log_prior_ratio + log_jacobian_ratio
    alpha = min(1.0, exp(log_alpha))

    # Accept or reject
    sampler.block_proposal_counts[block.name] += 1

    if rand(sampler.rng) < alpha
        # Accept proposal
        sampler.block_acceptance_counts[block.name] += 1

        # Update adaptive proposal covariance
        if block.adaptive
            update_block_covariance!(sampler, block, proposal_unconstrained)
        end

        return true
    else
        # Reject proposal - restore values
        sampler.current_values = old_values
        sampler.log_priors = old_log_priors
        update_state_hyperparameters!(sampler, state)
        state.log_posterior = current_log_posterior

        return false
    end
end

"""
    sample_metropolis_parameter!(sampler, name, state, evaluator)

Sample individual hyperparameter using Metropolis update.
"""
function sample_metropolis_parameter!(sampler::HyperParameterSampler,
                                     name::Symbol,
                                     state::MCMCState,
                                     evaluator::LogPosteriorEvaluator)
    spec = sampler.specs[name]
    current_value = sampler.current_values[name]
    current_log_posterior = state.log_posterior
    current_log_prior = sampler.log_priors[name]

    # Generate proposal based on parameter type
    proposal_value = generate_proposal(current_value, spec, sampler.rng)

    # Check bounds
    if !(spec.bounds[1] <= proposal_value <= spec.bounds[2])
        sampler.proposal_counts[name] += 1
        return false
    end

    # Update hyperparameter
    sampler.current_values[name] = proposal_value
    sampler.log_priors[name] = evaluate_log_prior(spec.prior, proposal_value, spec.transform)

    # Update state and re-evaluate posterior
    update_state_hyperparameters!(sampler, state)
    proposal_log_posterior = evaluate_log_posterior!(evaluator, state)

    # Compute acceptance probability
    log_prior_ratio = sampler.log_priors[name] - current_log_prior
    log_likelihood_ratio = (proposal_log_posterior - state.log_prior) - (current_log_posterior - state.log_prior)

    # Add proposal ratio if needed
    log_proposal_ratio = compute_proposal_ratio(current_value, proposal_value, spec)

    log_alpha = log_likelihood_ratio + log_prior_ratio + log_proposal_ratio
    alpha = min(1.0, exp(log_alpha))

    # Accept or reject
    sampler.proposal_counts[name] += 1

    if rand(sampler.rng) < alpha
        sampler.acceptance_counts[name] += 1
        return true
    else
        # Reject proposal - restore values
        sampler.current_values[name] = current_value
        sampler.log_priors[name] = current_log_prior
        update_state_hyperparameters!(sampler, state)
        state.log_posterior = current_log_posterior
        return false
    end
end

"""
    generate_proposal(current_value, spec, rng)

Generate proposal for a hyperparameter based on its specification.
"""
function generate_proposal(current_value::Float64, spec::HyperParameterSpec, rng::AbstractRNG)
    if spec.transform == :log
        # Log-normal random walk
        log_current = log(current_value)
        log_proposal = log_current + 0.1 * randn(rng)  # Adaptive step size could be added
        return exp(log_proposal)
    elseif spec.transform == :logit
        # Logit transform for bounded parameters
        logit_current = logit((current_value - spec.bounds[1]) / (spec.bounds[2] - spec.bounds[1]))
        logit_proposal = logit_current + 0.1 * randn(rng)
        p = logistic(logit_proposal)
        return spec.bounds[1] + p * (spec.bounds[2] - spec.bounds[1])
    else
        # Normal random walk
        step_size = 0.1 * (spec.bounds[2] - spec.bounds[1])  # Adaptive step size could be added
        return current_value + step_size * randn(rng)
    end
end

"""
    evaluate_log_prior(prior, value, transform)

Evaluate log-prior density for a hyperparameter.
"""
function evaluate_log_prior(prior::AbstractHyperParameterPrior, value::Float64, transform::Symbol)
    if isa(prior, InverseGammaPrior)
        log_pdf = logpdf_invgamma(value, prior.shape, prior.scale)
    elseif isa(prior, GammaPrior)
        log_pdf = logpdf_gamma(value, prior.shape, prior.rate)
    elseif isa(prior, LogNormalPrior)
        log_pdf = logpdf_lognormal(value, prior.μ, prior.σ)
    elseif isa(prior, UniformPrior)
        log_pdf = logpdf_uniform(value, prior.lower, prior.upper)
    elseif isa(prior, BetaPrior)
        log_pdf = logpdf_beta(value, prior.α, prior.β)
    else
        error("Unknown prior type: $(typeof(prior))")
    end

    # Add Jacobian correction for transforms
    if transform == :log && value > 0
        log_pdf -= log(value)  # Jacobian for log transform
    elseif transform == :logit
        # Jacobian for logit transform would be added here
    end

    return log_pdf
end

"""
    update_state_hyperparameters!(sampler, state)

Update the hyperparameters in the MCMC state.
"""
function update_state_hyperparameters!(sampler::HyperParameterSampler, state::MCMCState)
    # Update observation error scale factors
    obs_error_indices = findall(name -> sampler.specs[name].type == ObservationErrorScale,
                               keys(sampler.specs))
    if !isempty(obs_error_indices)
        state.hyperparams.obs_error_scale = [sampler.current_values[name]
                                           for name in obs_error_indices]
    end

    # Update other hyperparameters
    for (name, spec) in sampler.specs
        if spec.type == PriorCovarianceScale
            state.hyperparams.prior_scale = sampler.current_values[name]
        elseif spec.type == SpatialCorrelationLength
            state.hyperparams.correlation_length = sampler.current_values[name]
        elseif spec.type == CARPrecision
            state.hyperparams.car_precision = sampler.current_values[name]
        elseif spec.type == CARCorrelation
            state.hyperparams.car_correlation = sampler.current_values[name]
        elseif spec.type == StudentTDF
            state.hyperparams.nu_obs = sampler.current_values[name]
        end
    end
end

"""
    update_block_covariance!(sampler, block, new_sample)

Update adaptive proposal covariance for a parameter block.
"""
function update_block_covariance!(sampler::HyperParameterSampler,
                                 block::HyperParameterBlock,
                                 new_sample::Vector{Float64})
    count = sampler.adaptation_counts[block.name] + 1
    sampler.adaptation_counts[block.name] = count

    # Update running mean
    old_mean = sampler.block_means[block.name]
    new_mean = old_mean + (new_sample - old_mean) / count
    sampler.block_means[block.name] = new_mean

    # Update running covariance (Welford's algorithm)
    if count > 1
        old_cov = sampler.block_covariances[block.name]
        delta1 = new_sample - old_mean
        delta2 = new_sample - new_mean
        new_cov = old_cov + (delta1 * delta2' - old_cov) / count

        # Add regularization to maintain positive definiteness
        regularization = 1e-6 * I(length(new_sample))
        sampler.block_covariances[block.name] = new_cov + regularization
    end
end

"""
    initialize_hyperparameter_sampler(config::MCMCConfiguration)

Initialize hyperparameter sampler with default FLEXINVERT specifications.
"""
function initialize_hyperparameter_sampler(config::MCMCConfiguration)
    specs = Dict{Symbol, HyperParameterSpec}()
    blocks = HyperParameterBlock[]

    if config.sample_hyperparams
        # Observation error scale factors (one per station class)
        # Example: surface stations, tower stations, aircraft
        for (i, class_name) in enumerate([:surface, :tower, :aircraft])
            name = Symbol("obs_error_$(class_name)")
            specs[name] = HyperParameterSpec(
                name, ObservationErrorScale,
                InverseGammaPrior(2.0, 1.0),  # Weakly informative
                (0.1, 10.0),  # Scale factor bounds
                1.0,  # Initial value
                conjugate = true,
                transform = :log
            )
        end

        # Prior covariance scale
        specs[:prior_scale] = HyperParameterSpec(
            :prior_scale, PriorCovarianceScale,
            InverseGammaPrior(2.0, 1.0),
            (0.1, 10.0),
            1.0,
            conjugate = true,
            transform = :log
        )

        # Spatial correlation length
        specs[:correlation_length] = HyperParameterSpec(
            :correlation_length, SpatialCorrelationLength,
            LogNormalPrior(log(100.0), 0.5),  # Mean 100 km, CV ≈ 50%
            (10.0, 1000.0),  # 10 km to 1000 km
            100.0,
            conjugate = false,
            transform = :log
        )

        # CAR model parameters
        specs[:car_precision] = HyperParameterSpec(
            :car_precision, CARPrecision,
            GammaPrior(2.0, 1.0),
            (0.1, 10.0),
            1.0,
            conjugate = false,
            transform = :log
        )

        specs[:car_correlation] = HyperParameterSpec(
            :car_correlation, CARCorrelation,
            BetaPrior(2.0, 2.0),  # Centered on 0.5
            (0.01, 0.99),
            0.5,
            conjugate = false,
            transform = :logit
        )

        # Student-t degrees of freedom
        specs[:nu_obs] = HyperParameterSpec(
            :nu_obs, StudentTDF,
            GammaPrior(2.0, 0.5),  # Mean = 4
            (2.1, 30.0),  # Heavy to light tails
            4.0,
            conjugate = false,
            transform = :log
        )

        # Create blocks for joint updates
        # CAR parameters often updated together
        car_block = HyperParameterBlock(
            :car_params,
            [:car_precision, :car_correlation],
            adaptive = true,
            target_acceptance = 0.44
        )
        push!(blocks, car_block)
    end

    return HyperParameterSampler(specs, blocks,
                                update_frequency = config.hyper_update_freq)
end

# Utility functions

"""
    is_in_any_block(name, blocks)

Check if a parameter is included in any block.
"""
function is_in_any_block(name::Symbol, blocks::Vector{HyperParameterBlock})
    return any(name in block.parameters for block in blocks)
end

"""
    all_in_bounds(values, names, specs)

Check if all parameter values are within bounds.
"""
function all_in_bounds(values::Vector{Float64}, names::Vector{Symbol},
                      specs::Dict{Symbol, HyperParameterSpec})
    return all(specs[names[i]].bounds[1] <= values[i] <= specs[names[i]].bounds[2]
               for i in 1:length(values))
end

"""
    transform_to_unconstrained(values, names, specs)

Transform constrained parameter values to unconstrained space.
"""
function transform_to_unconstrained(values::Vector{Float64}, names::Vector{Symbol},
                                   specs::Dict{Symbol, HyperParameterSpec})
    unconstrained = similar(values)
    for (i, name) in enumerate(names)
        spec = specs[name]
        if spec.transform == :log
            unconstrained[i] = log(values[i])
        elseif spec.transform == :logit
            p = (values[i] - spec.bounds[1]) / (spec.bounds[2] - spec.bounds[1])
            unconstrained[i] = logit(p)
        else
            unconstrained[i] = values[i]
        end
    end
    return unconstrained
end

"""
    transform_from_unconstrained(unconstrained, names, specs)

Transform unconstrained values back to constrained space.
"""
function transform_from_unconstrained(unconstrained::Vector{Float64}, names::Vector{Symbol},
                                     specs::Dict{Symbol, HyperParameterSpec})
    constrained = similar(unconstrained)
    for (i, name) in enumerate(names)
        spec = specs[name]
        if spec.transform == :log
            constrained[i] = exp(unconstrained[i])
        elseif spec.transform == :logit
            p = logistic(unconstrained[i])
            constrained[i] = spec.bounds[1] + p * (spec.bounds[2] - spec.bounds[1])
        else
            constrained[i] = unconstrained[i]
        end
    end
    return constrained
end

"""
    compute_jacobian_ratio(old_values, new_values, names, specs)

Compute Jacobian ratio for parameter transforms.
"""
function compute_jacobian_ratio(old_values::Vector{Float64}, new_values::Vector{Float64},
                               names::Vector{Symbol}, specs::Dict{Symbol, HyperParameterSpec})
    log_jacobian_ratio = 0.0
    for (i, name) in enumerate(names)
        spec = specs[name]
        if spec.transform == :log
            log_jacobian_ratio += log(new_values[i]) - log(old_values[i])
        elseif spec.transform == :logit
            # Jacobian for logit transform
            range = spec.bounds[2] - spec.bounds[1]
            p_old = (old_values[i] - spec.bounds[1]) / range
            p_new = (new_values[i] - spec.bounds[1]) / range
            log_jacobian_ratio += log(p_new * (1 - p_new)) - log(p_old * (1 - p_old))
        end
    end
    return log_jacobian_ratio
end

"""
    compute_proposal_ratio(old_value, new_value, spec)

Compute proposal ratio for asymmetric proposals.
"""
function compute_proposal_ratio(old_value::Float64, new_value::Float64, spec::HyperParameterSpec)
    # For symmetric proposals, this returns 0
    # For asymmetric proposals (e.g., log-normal), compute the ratio
    if spec.transform == :log
        # Log-normal proposal is symmetric in log space
        return 0.0
    else
        return 0.0
    end
end

"""
    logit(p)

Logit function: log(p / (1 - p))
"""
logit(p::Float64) = log(p / (1 - p))

"""
    logistic(x)

Logistic function: 1 / (1 + exp(-x))
"""
logistic(x::Float64) = 1 / (1 + exp(-x))

"""
    validate_hyperparameters(hyperparams::HyperParameters)

Validate hyperparameter values for physical consistency.
"""
function validate_hyperparameters(hyperparams::HyperParameters)
    # Check observation error scales
    if any(scale <= 0 for scale in hyperparams.obs_error_scale)
        @warn "Non-positive observation error scale detected"
        return false
    end

    # Check prior scale
    if hyperparams.prior_scale <= 0
        @warn "Non-positive prior scale detected"
        return false
    end

    # Check correlation length
    if hyperparams.correlation_length <= 0
        @warn "Non-positive correlation length detected"
        return false
    end

    # Check CAR parameters
    if hyperparams.car_precision <= 0
        @warn "Non-positive CAR precision detected"
        return false
    end

    if !(0 < hyperparams.car_correlation < 1)
        @warn "CAR correlation parameter outside (0,1)"
        return false
    end

    # Check Student-t degrees of freedom
    if hyperparams.nu_obs <= 2
        @warn "Student-t degrees of freedom too small (need > 2 for finite variance)"
        return false
    end

    return true
end

"""
    get_acceptance_rates(sampler::HyperParameterSampler)

Get acceptance rates for all hyperparameters and blocks.
"""
function get_acceptance_rates(sampler::HyperParameterSampler)
    rates = Dict{Symbol, Float64}()

    # Individual parameter rates
    for name in keys(sampler.specs)
        proposals = sampler.proposal_counts[name]
        if proposals > 0
            rates[name] = sampler.acceptance_counts[name] / proposals
        else
            rates[name] = 0.0
        end
    end

    # Block rates
    for block in sampler.blocks
        proposals = sampler.block_proposal_counts[block.name]
        if proposals > 0
            rates[block.name] = sampler.block_acceptance_counts[block.name] / proposals
        else
            rates[block.name] = 0.0
        end
    end

    return rates
end

# Manual implementations of distribution sampling functions
# (to avoid dependency on Distributions.jl)

"""
    rand_gamma(rng, shape, scale)

Sample from Gamma distribution using Marsaglia-Tsang method.
"""
function rand_gamma(rng::AbstractRNG, shape::Float64, scale::Float64)
    if shape < 1.0
        # Use rejection method for shape < 1
        return scale * rand_gamma_small_shape(rng, shape)
    else
        # Use Marsaglia-Tsang method for shape >= 1
        return scale * rand_gamma_marsaglia(rng, shape)
    end
end

function rand_gamma_marsaglia(rng::AbstractRNG, shape::Float64)
    # Marsaglia-Tsang method for shape >= 1
    d = shape - 1.0/3.0
    c = 1.0 / sqrt(9.0 * d)

    while true
        x = randn(rng)
        v = (1.0 + c * x)^3

        if v > 0.0
            u = rand(rng)
            if u < 1.0 - 0.0331 * x^4
                return d * v
            elseif log(u) < 0.5 * x^2 + d * (1.0 - v + log(v))
                return d * v
            end
        end
    end
end

function rand_gamma_small_shape(rng::AbstractRNG, shape::Float64)
    # For shape < 1, use transformation method
    large_shape_sample = rand_gamma_marsaglia(rng, shape + 1.0)
    u = rand(rng)
    return large_shape_sample * u^(1.0 / shape)
end

"""
    rand_beta(rng, α, β)

Sample from Beta distribution using transformation of Gamma samples.
"""
function rand_beta(rng::AbstractRNG, α::Float64, β::Float64)
    x = rand_gamma(rng, α, 1.0)
    y = rand_gamma(rng, β, 1.0)
    return x / (x + y)
end

"""
    rand_lognormal(rng, μ, σ)

Sample from log-normal distribution.
"""
function rand_lognormal(rng::AbstractRNG, μ::Float64, σ::Float64)
    return exp(μ + σ * randn(rng))
end

"""
    logpdf_gamma(x, shape, rate)

Log probability density function for Gamma distribution (rate parameterization).
"""
function logpdf_gamma(x::Float64, shape::Float64, rate::Float64)
    if x <= 0.0
        return -Inf
    end
    return shape * log(rate) - loggamma(shape) + (shape - 1.0) * log(x) - rate * x
end

"""
    logpdf_invgamma(x, shape, scale)

Log probability density function for Inverse-Gamma distribution.
"""
function logpdf_invgamma(x::Float64, shape::Float64, scale::Float64)
    if x <= 0.0
        return -Inf
    end
    return shape * log(scale) - loggamma(shape) - (shape + 1.0) * log(x) - scale / x
end

"""
    logpdf_lognormal(x, μ, σ)

Log probability density function for log-normal distribution.
"""
function logpdf_lognormal(x::Float64, μ::Float64, σ::Float64)
    if x <= 0.0
        return -Inf
    end
    return -log(x) - 0.5 * log(2π * σ^2) - 0.5 * ((log(x) - μ) / σ)^2
end

"""
    logpdf_beta(x, α, β)

Log probability density function for Beta distribution.
"""
function logpdf_beta(x::Float64, α::Float64, β::Float64)
    if x <= 0.0 || x >= 1.0
        return -Inf
    end
    return loggamma(α + β) - loggamma(α) - loggamma(β) +
           (α - 1.0) * log(x) + (β - 1.0) * log(1.0 - x)
end

"""
    logpdf_uniform(x, a, b)

Log probability density function for uniform distribution.
"""
function logpdf_uniform(x::Float64, a::Float64, b::Float64)
    if a <= x <= b
        return -log(b - a)
    else
        return -Inf
    end
end

"""
    loggamma(x)

Compute log of gamma function using Stirling's approximation for large x
and series expansion for small x.
"""
function loggamma(x::Float64)
    if x <= 0.0
        return Inf
    elseif x < 1.0
        # Use the recurrence relation Γ(x+1) = x*Γ(x)
        return loggamma(x + 1.0) - log(x)
    elseif x < 12.0
        # Use series expansion for moderate values
        return _loggamma_series(x)
    else
        # Use Stirling's approximation for large values
        return _loggamma_stirling(x)
    end
end

function _loggamma_series(x::Float64)
    # Coefficients for the series expansion
    # This is a simplified version - a full implementation would have more terms
    if x ≈ 1.0
        return 0.0  # Γ(1) = 1, so log(Γ(1)) = 0
    elseif x ≈ 2.0
        return 0.0  # Γ(2) = 1, so log(Γ(2)) = 0
    else
        # Simple approximation for other values
        # This could be improved with a proper series implementation
        return (x - 1.0) * log(x - 1.0) - (x - 1.0) + 0.5 * log(2π / (x - 1.0))
    end
end

function _loggamma_stirling(x::Float64)
    # Stirling's approximation: ln(Γ(x)) ≈ (x-0.5)*ln(x) - x + 0.5*ln(2π)
    return (x - 0.5) * log(x) - x + 0.5 * log(2π)
end

end # module Hierarchical