# Log-posterior evaluation for FLEXINVERT Bayesian inversion
# Reuses existing simulate and gradient infrastructure

module Posterior

using LinearAlgebra
using ..MCMCTypes
# using ..Transformations: chi2phi, phi2chi  # Optional transformation module

# Manual loggamma implementation will be used from Hierarchical module

export LogPosteriorEvaluator, evaluate_log_posterior!, evaluate_log_likelihood!
export evaluate_log_prior!, StudentTLikelihood, GaussianLikelihood

"""
    AbstractLikelihood

Abstract type for likelihood models.
"""
abstract type AbstractLikelihood end

"""
    GaussianLikelihood

Standard Gaussian likelihood with covariance matrix R.
"""
struct GaussianLikelihood <: AbstractLikelihood
    R_inv::Matrix{Float64}              # Inverse observation error covariance
    log_det_R::Float64                  # Log determinant of R
end

"""
    StudentTLikelihood

Robust Student-t likelihood for handling outliers and model discrepancies.
"""
struct StudentTLikelihood <: AbstractLikelihood
    R_inv::Matrix{Float64}              # Inverse scale matrix
    nu::Float64                         # Degrees of freedom
    log_normalization::Float64          # Log normalization constant
end

"""
    LogPosteriorEvaluator

Evaluator for log-posterior density in FLEXINVERT context.
Encapsulates the forward model, prior covariance, and observation error model.
"""
struct LogPosteriorEvaluator{F,G,T}
    # Forward model and gradient functions
    forward_model::F                    # Function (x_phys) -> y_model
    gradient_func::G                    # Function (x_phys) -> grad_obs
    transform::T                        # Transformation object (chi2phi, phi2chi)

    # Prior specification
    x_prior::Vector{Float64}            # Prior mean (physical space)
    B_inv::Matrix{Float64}              # Inverse prior covariance

    # Likelihood specification
    likelihood::AbstractLikelihood     # Likelihood model
    y_obs::Vector{Float64}              # Observations

    # Problem dimensions
    n_params::Int                       # Number of parameters
    n_obs::Int                          # Number of observations

    # Options
    log_normal_prior::Bool              # Whether using log-normal prior
    compute_gradient::Bool              # Whether to compute gradients
end

"""
    LogPosteriorEvaluator(forward_model, gradient_func, transform, config...)

Constructor for log-posterior evaluator.
"""
function LogPosteriorEvaluator(
    forward_model, gradient_func, transform,
    x_prior::Vector{Float64},
    B_inv::Matrix{Float64},
    likelihood::AbstractLikelihood,
    y_obs::Vector{Float64};
    log_normal_prior::Bool = false,
    compute_gradient::Bool = false
)
    n_params = length(x_prior)
    n_obs = length(y_obs)

    LogPosteriorEvaluator(
        forward_model, gradient_func, transform,
        x_prior, B_inv, likelihood, y_obs,
        n_params, n_obs, log_normal_prior, compute_gradient
    )
end

"""
    evaluate_log_posterior!(evaluator, state::MCMCState)

Evaluate log-posterior density and update state.
Returns the log-posterior value.
"""
function evaluate_log_posterior!(evaluator::LogPosteriorEvaluator, state::MCMCState)
    # Ensure physical space state is current
    if evaluator.transform !== nothing
        state.x_phys .= evaluator.transform.chi2phi(state.x_chi)
    else
        state.x_phys .= state.x_chi
    end

    # Evaluate log-likelihood
    state.log_likelihood = evaluate_log_likelihood!(evaluator, state)

    # Evaluate log-prior
    state.log_prior = evaluate_log_prior!(evaluator, state)

    # Combine for log-posterior
    state.log_posterior = state.log_likelihood + state.log_prior

    if !isfinite(state.log_likelihood) || !isfinite(state.log_prior)
        state.log_posterior = -Inf
    end

    return state.log_posterior
end

"""
    evaluate_log_likelihood!(evaluator, state::MCMCState)

Evaluate log-likelihood component.
"""
function evaluate_log_likelihood!(evaluator::LogPosteriorEvaluator, state::MCMCState)
    # Run forward model
    y_model = evaluator.forward_model(state.x_phys)

    # Compute residuals
    residuals = y_model - evaluator.y_obs

    # Evaluate likelihood based on type
    return evaluate_likelihood(evaluator.likelihood, residuals)
end

"""
    evaluate_log_prior!(evaluator, state::MCMCState)

Evaluate log-prior component.
"""
function evaluate_log_prior!(evaluator::LogPosteriorEvaluator, state::MCMCState)
    if evaluator.log_normal_prior
        # Log-normal prior case
        # For median: log p(z) = -0.5 * z^T B^{-1} z
        # where z is the log-transformed variable
        deviation = state.x_phys  # x_phys is already log-transformed
        return -0.5 * dot(deviation, evaluator.B_inv * deviation)
    else
        # Standard Gaussian prior
        # log p(x) = -0.5 * (x - x_b)^T B^{-1} (x - x_b)
        deviation = state.x_phys - evaluator.x_prior
        return -0.5 * dot(deviation, evaluator.B_inv * deviation)
    end
end

"""
    evaluate_likelihood(likelihood::GaussianLikelihood, residuals)

Evaluate Gaussian likelihood.
"""
function evaluate_likelihood(likelihood::GaussianLikelihood, residuals::Vector{Float64})
    # log p(y|x) = -0.5 * (y - H(x))^T R^{-1} (y - H(x)) - 0.5 * log|2πR|
    quadratic_form = dot(residuals, likelihood.R_inv * residuals)
    return -0.5 * (quadratic_form + likelihood.log_det_R + length(residuals) * log(2π))
end

"""
    evaluate_likelihood(likelihood::StudentTLikelihood, residuals)

Evaluate Student-t likelihood for robust error modeling.
"""
function evaluate_likelihood(likelihood::StudentTLikelihood, residuals::Vector{Float64})
    # log p(y|x) = log Γ((ν+p)/2) - log Γ(ν/2) - (p/2)log(πν) - 0.5*log|Σ|
    #              - ((ν+p)/2) * log(1 + (y-H(x))^T Σ^{-1} (y-H(x))/ν)
    p = length(residuals)
    quadratic_form = dot(residuals, likelihood.R_inv * residuals)

    log_term = log(1.0 + quadratic_form / likelihood.nu)
    return likelihood.log_normalization - 0.5 * (likelihood.nu + p) * log_term
end

"""
    construct_gaussian_likelihood(R::AbstractMatrix{<:Real})

Construct Gaussian likelihood from an observation-error covariance matrix.
Accepts dense, diagonal, or symmetric matrix representations.
"""
function construct_gaussian_likelihood(R::AbstractMatrix{<:Real}; ridge::Float64 = 0.0)
    R_sym = Symmetric(Matrix{Float64}(R))
    ch, applied_ridge = safe_cholesky(R_sym; ridge=ridge)
    n = size(R_sym, 1)

    log_det_R = 2.0 * sum(log, diag(ch.U))
    if applied_ridge > 0
        # Adjust log determinant for added ridge term: log|R + λI| = log|R| + n*log(λ) approximately,
        # but since we factorized the adjusted matrix directly, the decomposition already reflects it.
        # Nothing additional needed here, but keep branch for clarity.
    end
    R_inv = Matrix(ch \ Matrix{Float64}(I, n, n))

    return GaussianLikelihood(R_inv, log_det_R)
end

"""
    construct_student_t_likelihood(R::AbstractMatrix{<:Real}, nu::Float64)

Construct Student-t likelihood from scale matrix and degrees of freedom.
"""
function construct_student_t_likelihood(R::AbstractMatrix{<:Real}, nu::Float64; ridge::Float64 = 0.0)
    R_sym = Symmetric(Matrix{Float64}(R))
    ch, _ = safe_cholesky(R_sym; ridge=ridge)
    n = size(R_sym, 1)

    R_inv = Matrix(ch \ Matrix{Float64}(I, n, n))

    # Compute normalization constant
    log_det_R = 2.0 * sum(log, diag(ch.U))
    log_gamma_ratio = loggamma((nu + n) / 2) - loggamma(nu / 2)
    log_normalization = log_gamma_ratio - (n / 2) * log(π * nu) - 0.5 * log_det_R

    return StudentTLikelihood(R_inv, nu, log_normalization)
end

function safe_cholesky(R_sym::Symmetric{Float64, Matrix{Float64}}; ridge::Float64 = 0.0, max_attempts::Int = 5)
    λ = ridge
    for attempt in 1:max_attempts
        try
            if λ > 0
                return cholesky(R_sym + λ * I), λ
            else
                return cholesky(R_sym), λ
            end
        catch err
            if attempt == max_attempts
                rethrow(err)
            end
            λ = λ > 0 ? λ * 10 : sqrt(eps(Float64))
        end
    end
    error("Failed to compute Cholesky factorisation for observation error covariance")
end

"""
    compute_gradient!(evaluator, state::MCMCState)

Compute gradient of log-posterior (if evaluator is configured for it).
"""
function compute_gradient!(evaluator::LogPosteriorEvaluator, state::MCMCState)
    if !evaluator.compute_gradient
        return state.gradient
    end

    # Compute observation space gradient using existing FLEXINVERT infrastructure
    grad_obs = evaluator.gradient_func(state.x_phys)

    # Transform to parameter space if needed
    if evaluator.transform !== nothing
        # Use adjoint of transformation
        state.gradient .= evaluator.transform.phi2chi_adjoint(grad_obs)
    else
        state.gradient .= grad_obs
    end

    # Add prior gradient
    if evaluator.log_normal_prior
        # For log-normal: ∇ log p(z) = -B^{-1} z
        state.gradient .-= evaluator.B_inv * state.x_phys
    else
        # For Gaussian: ∇ log p(x) = -B^{-1} (x - x_b)
        prior_grad = evaluator.B_inv * (state.x_phys - evaluator.x_prior)
        state.gradient .-= prior_grad
    end

    return state.gradient
end

"""
    update_hyperparameters!(evaluator, hyperparams::HyperParameters, state::MCMCState)

Update hyperparameters in the evaluator (for hierarchical sampling).
"""
function update_hyperparameters!(
    evaluator::LogPosteriorEvaluator,
    hyperparams::HyperParameters,
    state::MCMCState
)
    # This would update the likelihood and prior based on new hyperparameters
    # Implementation depends on specific hyperparameter structure
    # For now, we'll update the state's hyperparameters
    state.hyperparams = deepcopy(hyperparams)

    # Re-evaluate log-posterior with new parameters
    evaluate_log_posterior!(evaluator, state)
end

# Simple loggamma implementation for Student-t likelihood
function loggamma(x::Float64)
    if x <= 0.0
        return Inf
    elseif x ≈ 1.0 || x ≈ 2.0
        return 0.0
    else
        # Stirling's approximation for simplicity
        return (x - 0.5) * log(x) - x + 0.5 * log(2π)
    end
end

end # module Posterior
