"""
    CostFunctions

Module implementing cost function evaluation and gradient computation for GSI variational
data assimilation. This module is ported from the GSI Fortran `jfunc.f90` module and
implements the core mathematical framework for 3D-Var, 4D-Var, and hybrid ensemble methods.

The cost function to be minimized is:
```
J(x) = Jᵦ(x) + Jₒ(x) + Jc(x)
```

Where:
- `Jᵦ(x) = ½(x-xₑ)ᵀB⁻¹(x-xₑ)`: Background term
- `Jₒ(x) = ½(H(x)-y)ᵀR⁻¹(H(x)-y)`: Observation term  
- `Jc(x)`: Constraint terms (balance, bias correction)

For hybrid methods:
- `B = (1-α)Bₛₜₐₜᵢc + αBₑₙₛₑₘᵦₗₑ`
"""
module CostFunctions

using LinearAlgebra
using ..GSICoreAnalysis: AbstractAnalysisConfig, AbstractControlVector, AbstractStateVector

export CostFunction, HybridCostFunction
export evaluate_cost, compute_gradient, evaluate_background_term, evaluate_observation_term
export create_sampling_operator, setup_diagonal_covariances!, create_observation_indices
export state_vector_to_array, array_to_state_vector!

"""
    AbstractCostFunction{T<:AbstractFloat}

Abstract base type for cost function implementations.
"""
abstract type AbstractCostFunction{T<:AbstractFloat} end

"""
    CostFunction{T<:AbstractFloat}

Standard GSI cost function implementation.

# Fields
- `config::AbstractAnalysisConfig`: Analysis configuration
- `background_covariance::AbstractMatrix{T}`: Background error covariance B
- `observation_covariance::AbstractMatrix{T}`: Observation error covariance R
- `observation_operator::Function`: Observation operator H(x)
- `observation_operator_adjoint::Union{Function,Nothing}`: Adjoint observation operator Hᵀ(y)
- `observation_indices::Union{Vector{Int},Nothing}`: Observation location indices in state space
- `background_state::AbstractVector{T}`: Background state xₑ
- `observations::AbstractVector{T}`: Observation vector y
- `observation_errors::Union{Vector{T},Nothing}`: Observation error standard deviations
- `observation_locations::Union{Matrix{T},Nothing}`: Observation locations (nobs × 3: lon, lat, lev)
- `current_cost::T`: Current cost function value
- `current_gradient_norm::T`: Current gradient norm
"""
mutable struct CostFunction{T<:AbstractFloat} <: AbstractCostFunction{T}
    config::AbstractAnalysisConfig
    background_covariance::AbstractMatrix{T}
    observation_covariance::AbstractMatrix{T}
    observation_operator::Function
    observation_operator_adjoint::Union{Function,Nothing}
    observation_indices::Union{Vector{Int},Nothing}
    background_state::AbstractVector{T}
    observations::AbstractVector{T}
    observation_errors::Union{Vector{T},Nothing}
    observation_locations::Union{Matrix{T},Nothing}
    current_cost::T
    current_gradient_norm::T

    function CostFunction{T}(config::AbstractAnalysisConfig) where T
        # Initialize with placeholder matrices - would be properly constructed
        # For a simple state vector (T, u, v, q, ps) with grid (nx, ny, nz):
        # State size = 4*(nx*ny*nz) + (nx*ny) = 4*3D_fields + 1*2D_field
        nx, ny, nz = config.grid_size
        n_3d_fields = 4  # Temperature, u_wind, v_wind, humidity
        n_2d_fields = 1  # Surface pressure
        n_state = n_3d_fields * (nx * ny * nz) + n_2d_fields * (nx * ny)
        n_obs = n_state ÷ 10  # Simplified

        new{T}(
            config,
            Matrix{T}(I, n_state, n_state),  # Background covariance (identity placeholder)
            Matrix{T}(I, n_obs, n_obs),      # Observation covariance (identity placeholder)
            identity,                         # Observation operator (identity placeholder)
            nothing,                          # Observation operator adjoint (optional)
            nothing,                          # Observation indices (optional)
            zeros(T, n_state),                # Background state
            zeros(T, n_obs),                  # Observations
            nothing,                          # Observation errors (optional)
            nothing,                          # Observation locations (optional)
            zero(T),                          # Current cost
            zero(T)                           # Current gradient norm
        )
    end
end

"""
    HybridCostFunction{T<:AbstractFloat}

Hybrid ensemble-variational cost function implementation.

Extends standard cost function with ensemble-based background error covariance:
```
B = (1-α)Bₛₜₐₜᵢc + αBₑₙₛₑₘᵦₗₑ
```

# Additional Fields
- `static_covariance::AbstractMatrix{T}`: Static background error covariance
- `ensemble_perturbations::AbstractMatrix{T}`: Ensemble perturbations matrix
- `hybrid_coefficient::T`: Hybrid weighting parameter α
- `localization_matrix::AbstractMatrix{T}`: Covariance localization
"""
mutable struct HybridCostFunction{T<:AbstractFloat} <: AbstractCostFunction{T}
    config::AbstractAnalysisConfig
    static_covariance::AbstractMatrix{T}
    ensemble_perturbations::AbstractMatrix{T}
    observation_covariance::AbstractMatrix{T}
    observation_operator::Function
    background_state::AbstractVector{T}
    observations::AbstractVector{T}
    hybrid_coefficient::T
    localization_matrix::AbstractMatrix{T}
    current_cost::T
    current_gradient_norm::T
    
    function HybridCostFunction{T}(config::AbstractAnalysisConfig) where T
        n_grid = prod(config.grid_size)
        n_obs = n_grid ÷ 10
        n_ens = config.ensemble_size
        
        new{T}(
            config,
            Matrix{T}(I, n_grid, n_grid),      # Static covariance
            randn(T, n_grid, n_ens),           # Ensemble perturbations
            Matrix{T}(I, n_obs, n_obs),        # Observation covariance
            identity,                          # Observation operator
            zeros(T, n_grid),                  # Background state
            zeros(T, n_obs),                   # Observations
            T(config.hybrid_coeff),            # Hybrid coefficient
            Matrix{T}(I, n_grid, n_grid),      # Localization matrix
            zero(T),                           # Current cost
            zero(T)                            # Current gradient norm
        )
    end
end

# Convenience constructors
CostFunction(config::AbstractAnalysisConfig) = CostFunction{config.precision}(config)
HybridCostFunction(config::AbstractAnalysisConfig) = HybridCostFunction{config.precision}(config)

"""
    evaluate_background_term(cf::CostFunction{T}, state::AbstractVector{T}) where T

Evaluate background term of cost function: Jᵦ(x) = ½(x-xₑ)ᵀB⁻¹(x-xₑ).
"""
function evaluate_background_term(cf::CostFunction{T}, state::AbstractVector{T}) where T
    departure = state - cf.background_state
    
    # For efficiency, assume B is factorized as B = LLᵀ
    # Then B⁻¹ = (Lᵀ)⁻¹L⁻¹
    # This would use the actual background error covariance structure
    
    # Simplified: assume B⁻¹ is available directly
    B_inv_departure = cf.background_covariance \ departure
    
    return T(0.5) * dot(departure, B_inv_departure)
end

"""
    evaluate_background_term(cf::HybridCostFunction{T}, state::AbstractVector{T}) where T

Evaluate hybrid background term with ensemble covariance.
"""
function evaluate_background_term(cf::HybridCostFunction{T}, state::AbstractVector{T}) where T
    departure = state - cf.background_state
    
    # Hybrid covariance: B = (1-α)Bₛₜₐₜᵢc + αBₑₙₛₑₘᵦₗₑ
    α = cf.hybrid_coefficient
    
    # Static component: (1-α)Bₛₜₐₜᵢc⁻¹
    static_term = (1 - α) * dot(departure, cf.static_covariance \ departure)
    
    # Ensemble component: αBₑₙₛₑₘᵦₗₑ⁻¹
    # For ensemble covariance: Bₑₙₛₑₘᵦₗₑ = (1/(n-1)) * X * Xᵀ
    # where X are the ensemble perturbations
    X = cf.ensemble_perturbations
    n_ens = size(X, 2)
    
    # Apply localization
    localized_departure = cf.localization_matrix * departure
    
    # Ensemble inverse: (n-1) * (XᵀX)⁻¹ * Xᵀ
    XtX = X' * X
    XtX_inv = inv(XtX + (n_ens - 1) / α * I)  # Add inflation
    ensemble_term = α * (n_ens - 1) * dot(localized_departure, X * (XtX_inv * (X' * localized_departure)))
    
    return T(0.5) * (static_term + ensemble_term)
end

"""
    evaluate_observation_term(cf::AbstractCostFunction{T}, state::AbstractVector{T}) where T

Evaluate observation term: Jₒ(x) = ½(H(x)-y)ᵀR⁻¹(H(x)-y).
"""
function evaluate_observation_term(cf::AbstractCostFunction{T}, state::AbstractVector{T}) where T
    # Apply observation operator
    predicted_obs = cf.observation_operator(state)
    
    # Compute innovation (observation minus prediction)
    innovation = cf.observations - predicted_obs
    
    # Apply observation error covariance inverse
    R_inv_innovation = cf.observation_covariance \ innovation
    
    return T(0.5) * dot(innovation, R_inv_innovation)
end

"""
    evaluate_cost(cf::AbstractCostFunction{T}, state::AbstractVector{T}) where T

Evaluate total cost function: J(x) = Jᵦ(x) + Jₒ(x).
"""
function evaluate_cost(cf::AbstractCostFunction{T}, state::AbstractVector{T}) where T
    background_cost = evaluate_background_term(cf, state)
    observation_cost = evaluate_observation_term(cf, state)
    
    total_cost = background_cost + observation_cost
    cf.current_cost = total_cost
    
    return total_cost
end

"""
    compute_gradient_background(cf::CostFunction{T}, state::AbstractVector{T}) where T

Compute background term gradient: ∇Jᵦ(x) = B⁻¹(x-xₑ).
"""
function compute_gradient_background(cf::CostFunction{T}, state::AbstractVector{T}) where T
    departure = state - cf.background_state
    return cf.background_covariance \ departure
end

"""
    compute_gradient_background(cf::HybridCostFunction{T}, state::AbstractVector{T}) where T

Compute hybrid background term gradient.
"""
function compute_gradient_background(cf::HybridCostFunction{T}, state::AbstractVector{T}) where T
    departure = state - cf.background_state
    α = cf.hybrid_coefficient
    
    # Static component gradient
    static_grad = (1 - α) * (cf.static_covariance \ departure)
    
    # Ensemble component gradient
    X = cf.ensemble_perturbations
    n_ens = size(X, 2)
    
    localized_departure = cf.localization_matrix * departure
    XtX = X' * X
    XtX_inv = inv(XtX + (n_ens - 1) / α * I)
    
    ensemble_grad = α * (n_ens - 1) * (cf.localization_matrix' * 
                   (X * (XtX_inv * (X' * localized_departure))))
    
    return static_grad + ensemble_grad
end

"""
    compute_gradient_observation(cf::AbstractCostFunction{T}, state::AbstractVector{T}) where T

Compute observation term gradient: ∇Jₒ(x) = HᵀR⁻¹(H(x)-y).
"""
function compute_gradient_observation(cf::AbstractCostFunction{T}, state::AbstractVector{T}) where T
    # Apply observation operator H(x)
    predicted_obs = cf.observation_operator(state)

    # Compute innovation: H(x) - y (sign is positive for gradient)
    innovation = predicted_obs - cf.observations

    # Apply observation error covariance inverse: R⁻¹(H(x) - y)
    R_inv_innovation = cf.observation_covariance \ innovation

    # Apply adjoint of observation operator: Hᵀ[R⁻¹(H(x) - y)]
    # For a linear operator H represented as a matrix, the adjoint is Hᵀ
    # For identity or diagonal H, the adjoint is the same as the forward operator

    # Check if observation_operator has an adjoint method
    if hasfield(typeof(cf), :observation_operator_adjoint)
        # Use explicit adjoint operator if provided
        gradient_obs = cf.observation_operator_adjoint(R_inv_innovation)
    else
        # For simplified case: assume observation operator is a sampling/interpolation
        # The adjoint distributes the residual back to the state space
        # If H is a matrix, then Hᵀ v is computed

        # If observation_operator is a function handle, we need to approximate the adjoint
        # For linear interpolation, the adjoint is the transpose of the interpolation matrix

        # Simplified approach: if H is represented implicitly through observation_locations
        # and observation_indices, use those to distribute the gradient
        if hasfield(typeof(cf), :observation_indices) && !isnothing(cf.observation_indices)
            # Scatter the residual to state space locations
            gradient_obs = zeros(T, length(state))
            for (i, idx) in enumerate(cf.observation_indices)
                gradient_obs[idx] += R_inv_innovation[i]
            end
        else
            # Fallback: assume observation operator can be approximated
            # This will need to be customized based on the actual operator structure
            # For now, create a gradient of appropriate size
            gradient_obs = zeros(T, length(state))

            # If we have a simple sampling operator where observations are a subset of state
            # distribute evenly (this is a placeholder that should be replaced)
            n_state = length(state)
            n_obs = length(R_inv_innovation)

            if n_obs <= n_state
                # Simple distribution: scatter observations into state space
                step = max(1, n_state ÷ n_obs)
                for i in 1:n_obs
                    idx = min(n_state, (i-1) * step + 1)
                    gradient_obs[idx] += R_inv_innovation[i]
                end
            else
                # More observations than state elements: aggregate
                step = max(1, n_obs ÷ n_state)
                for i in 1:n_state
                    obs_start = (i-1) * step + 1
                    obs_end = min(n_obs, i * step)
                    gradient_obs[i] = sum(R_inv_innovation[obs_start:obs_end]) / (obs_end - obs_start + 1)
                end
            end
        end
    end

    return gradient_obs
end

"""
    compute_gradient(cf::AbstractCostFunction{T}, state::AbstractVector{T}) where T

Compute total gradient: ∇J(x) = ∇Jᵦ(x) + ∇Jₒ(x).
"""
function compute_gradient(cf::AbstractCostFunction{T}, state::AbstractVector{T}) where T
    grad_background = compute_gradient_background(cf, state)
    grad_observation = compute_gradient_observation(cf, state)
    
    total_gradient = grad_background + grad_observation
    cf.current_gradient_norm = norm(total_gradient)
    
    return total_gradient
end

"""
    initialize_background_covariance!(cf::AbstractCostFunction{T}, B::AbstractMatrix{T}) where T

Initialize background error covariance matrix.
"""
function initialize_background_covariance!(cf::CostFunction{T}, B::AbstractMatrix{T}) where T
    cf.background_covariance = B
    return cf
end

function initialize_background_covariance!(cf::HybridCostFunction{T}, B_static::AbstractMatrix{T}) where T
    cf.static_covariance = B_static
    return cf
end

"""
    initialize_ensemble!(cf::HybridCostFunction{T}, perturbations::AbstractMatrix{T}) where T

Initialize ensemble perturbations for hybrid method.
"""
function initialize_ensemble!(cf::HybridCostFunction{T}, perturbations::AbstractMatrix{T}) where T
    cf.ensemble_perturbations = perturbations
    return cf
end

"""
    set_observations!(cf::AbstractCostFunction{T}, obs::AbstractVector{T}, H::Function) where T

Set observation vector and observation operator.
"""
function set_observations!(cf::AbstractCostFunction{T}, obs::AbstractVector{T}, H::Function) where T
    cf.observations = obs
    cf.observation_operator = H
    return cf
end

"""
    set_background!(cf::AbstractCostFunction{T}, bg::AbstractVector{T}) where T

Set background state vector.
"""
function set_background!(cf::AbstractCostFunction{T}, bg::AbstractVector{T}) where T
    cf.background_state = bg
    return cf
end

"""
    create_sampling_operator(obs_indices::Vector{Int}, n_state::Int)

Create a simple sampling observation operator that selects state elements at specified indices.

# Arguments
- `obs_indices::Vector{Int}`: Indices of state vector to sample for observations
- `n_state::Int`: Total size of state vector

# Returns
- Tuple of (H_forward, H_adjoint) functions
"""
function create_sampling_operator(obs_indices::Vector{Int}, n_state::Int)
    # Forward operator: sample state at observation locations
    H_forward = function(state::AbstractVector{T}) where T
        return state[obs_indices]
    end

    # Adjoint operator: scatter observations back to state space
    H_adjoint = function(obs_residual::AbstractVector{T}) where T
        gradient = zeros(T, n_state)
        for (i, idx) in enumerate(obs_indices)
            gradient[idx] += obs_residual[i]
        end
        return gradient
    end

    return (H_forward, H_adjoint)
end

"""
    setup_diagonal_covariances!(cf::CostFunction{T},
                                background_variance::T,
                                observation_variance::T) where T

Set up diagonal background and observation error covariance matrices.

# Arguments
- `cf::CostFunction{T}`: Cost function to configure
- `background_variance::T`: Variance for background errors (diagonal elements)
- `observation_variance::T`: Variance for observation errors (diagonal elements)
"""
function setup_diagonal_covariances!(cf::CostFunction{T},
                                    background_variance::T,
                                    observation_variance::T) where T
    n_state = length(cf.background_state)
    n_obs = length(cf.observations)

    # Create diagonal matrices (scaled identity)
    cf.background_covariance = Matrix{T}(background_variance * I, n_state, n_state)
    cf.observation_covariance = Matrix{T}(observation_variance * I, n_obs, n_obs)

    return cf
end

"""
    setup_diagonal_covariances!(cf::CostFunction{T},
                                background_variances::Vector{T},
                                observation_variances::Vector{T}) where T

Set up diagonal background and observation error covariance matrices with variable diagonal elements.

# Arguments
- `cf::CostFunction{T}`: Cost function to configure
- `background_variances::Vector{T}`: Variances for background errors (one per state element)
- `observation_variances::Vector{T}`: Variances for observation errors (one per observation)
"""
function setup_diagonal_covariances!(cf::CostFunction{T},
                                    background_variances::Vector{T},
                                    observation_variances::Vector{T}) where T
    n_state = length(cf.background_state)
    n_obs = length(cf.observations)

    @assert length(background_variances) == n_state "Background variance size mismatch"
    @assert length(observation_variances) == n_obs "Observation variance size mismatch"

    # Create diagonal matrices
    cf.background_covariance = Matrix(Diagonal(background_variances))
    cf.observation_covariance = Matrix(Diagonal(observation_variances))

    return cf
end

"""
    create_observation_indices(n_state::Int, n_obs::Int, sampling_strategy::Symbol = :uniform)

Create observation indices that sample from the state space.

# Arguments
- `n_state::Int`: Total size of state vector
- `n_obs::Int`: Number of observations to create
- `sampling_strategy::Symbol`: How to sample (:uniform, :random)

# Returns
- Vector of observation indices
"""
function create_observation_indices(n_state::Int, n_obs::Int, sampling_strategy::Symbol = :uniform)
    if sampling_strategy == :uniform
        # Uniform sampling across state space
        if n_obs >= n_state
            # More obs than state: sample with potential repeats
            return rand(1:n_state, n_obs)
        else
            # Sample uniformly spaced indices
            step = n_state / n_obs
            return [min(n_state, Int(round((i-1) * step + 1))) for i in 1:n_obs]
        end
    elseif sampling_strategy == :random
        # Random sampling
        return sort(rand(1:n_state, n_obs))
    else
        error("Unknown sampling strategy: $sampling_strategy")
    end
end

"""
    state_vector_to_array(state_vector::AbstractStateVector)

Convert a StateVector to a flat array for use in cost function evaluation.

# Arguments
- `state_vector::AbstractStateVector`: State vector to convert

# Returns
- Vector{T} containing flattened state variables
"""
function state_vector_to_array(state_vector::AbstractStateVector)
    # Concatenate all state variable fields into a single array
    # This assumes StateVector has fields like temperature, u_wind, v_wind, humidity, pressure

    # Get all numeric fields from state vector
    result = Float64[]

    for field_name in fieldnames(typeof(state_vector))
        field_value = getfield(state_vector, field_name)

        # Skip non-array fields (like config)
        if field_value isa AbstractArray{<:Number}
            append!(result, vec(field_value))
        end
    end

    return result
end

"""
    array_to_state_vector!(state_vector::AbstractStateVector, array::AbstractVector)

Update a StateVector from a flat array.

# Arguments
- `state_vector::AbstractStateVector`: State vector to update (modified in place)
- `array::AbstractVector`: Flat array containing state values

# Returns
- Updated state vector
"""
function array_to_state_vector!(state_vector::AbstractStateVector, array::AbstractVector)
    idx = 1

    for field_name in fieldnames(typeof(state_vector))
        field_value = getfield(state_vector, field_name)

        # Update array fields
        if field_value isa AbstractArray{<:Number}
            field_size = length(field_value)
            if idx + field_size - 1 <= length(array)
                field_value[:] = array[idx:idx+field_size-1]
                idx += field_size
            end
        end
    end

    return state_vector
end

end # module CostFunctions