"""
Test suite for DRP-4DVar using the Lorenz-96 model.

This script implements comprehensive tests of the DRP-4DVar algorithm using the
chaotic Lorenz-96 dynamical system, which serves as an idealized testbed for
data assimilation methods.

The Lorenz-96 model simulates the evolution of a single scalar variable on
a circle with periodic boundary conditions, exhibiting chaotic behavior
suitable for testing nonlinear data assimilation algorithms.
"""

using Test
using LinearAlgebra
using Random
using Statistics
# using Plots  # Commented out to avoid dependency issues

# Add the FourDVar module to the load path
push!(LOAD_PATH, "../src/FourDVar")
include("../src/FourDVar/FourDVar.jl")
using .FourDVar

"""
Lorenz-96 Model Implementation

The Lorenz-96 model represents a simple one-dimensional atmosphere with
periodic boundary conditions:

dx_i/dt = (x_{i+1} - x_{i-2})x_{i-1} - x_i + F

where F is the forcing parameter and indices are cyclic.
"""
struct Lorenz96Model
    n::Int                    # Number of variables
    F::Float64               # Forcing parameter
    dt::Float64              # Time step
    
    function Lorenz96Model(n::Int=40, F::Float64=8.0, dt::Float64=0.05)
        new(n, F, dt)
    end
end

"""
    lorenz96_rhs(x, model)

Compute the right-hand side of the Lorenz-96 equations.

# Arguments
- `x::Vector{Float64}`: State vector
- `model::Lorenz96Model`: Model parameters

# Returns
- `Vector{Float64}`: Time derivative dx/dt
"""
function lorenz96_rhs(x::Vector{Float64}, model::Lorenz96Model)
    n = model.n
    F = model.F
    dxdt = zeros(n)
    
    for i in 1:n
        # Cyclic indices
        im2 = mod(i - 3, n) + 1  # i-2
        im1 = mod(i - 2, n) + 1  # i-1
        ip1 = mod(i, n) + 1      # i+1
        
        # Lorenz-96 dynamics
        dxdt[i] = (x[ip1] - x[im2]) * x[im1] - x[i] + F
    end
    
    return dxdt
end

"""
    integrate_lorenz96(x0, model, nsteps)

Integrate the Lorenz-96 model using 4th-order Runge-Kutta.

# Arguments
- `x0::Vector{Float64}`: Initial condition
- `model::Lorenz96Model`: Model parameters
- `nsteps::Int`: Number of time steps

# Returns
- `Matrix{Float64}`: State trajectory (n × nsteps+1)
"""
function integrate_lorenz96(x0::Vector{Float64}, model::Lorenz96Model, nsteps::Int)
    n = model.n
    dt = model.dt
    
    trajectory = zeros(n, nsteps + 1)
    trajectory[:, 1] = x0
    
    x = copy(x0)
    
    for step in 1:nsteps
        # 4th-order Runge-Kutta integration
        k1 = dt * lorenz96_rhs(x, model)
        k2 = dt * lorenz96_rhs(x + 0.5 * k1, model)
        k3 = dt * lorenz96_rhs(x + 0.5 * k2, model)
        k4 = dt * lorenz96_rhs(x + k3, model)
        
        x += (k1 + 2*k2 + 2*k3 + k4) / 6
        trajectory[:, step + 1] = x
    end
    
    return trajectory
end

"""
    lorenz96_tangent_linear(x, model)

Compute the tangent linear operator (Jacobian) of the Lorenz-96 model.

# Arguments
- `x::Vector{Float64}`: State vector around which to linearize
- `model::Lorenz96Model`: Model parameters

# Returns
- `Matrix{Float64}`: Tangent linear matrix M (n × n)
"""
function lorenz96_tangent_linear(x::Vector{Float64}, model::Lorenz96Model)
    n = model.n
    dt = model.dt
    M = zeros(n, n)
    
    # Compute Jacobian of the nonlinear operator
    J = zeros(n, n)
    for i in 1:n
        # Cyclic indices
        im2 = mod(i - 3, n) + 1  # i-2
        im1 = mod(i - 2, n) + 1  # i-1
        ip1 = mod(i, n) + 1      # i+1
        
        # Partial derivatives
        J[i, im2] = -x[im1]                    # ∂f_i/∂x_{i-2}
        J[i, im1] = x[ip1] - x[im2]           # ∂f_i/∂x_{i-1}  
        J[i, i] = -1.0                        # ∂f_i/∂x_i
        J[i, ip1] = x[im1]                    # ∂f_i/∂x_{i+1}
    end
    
    # Convert to tangent linear propagator using matrix exponential approximation
    # M ≈ I + dt*J (first-order approximation)
    M = I + dt * J
    
    return M
end

"""
Linear observation operator for Lorenz-96 model.

This represents partial, noisy observations of the true state.
"""
struct Lorenz96ObservationOperator
    H::Matrix{Float64}      # Linear observation matrix
    obs_error_std::Float64  # Observation error standard deviation
    
    function Lorenz96ObservationOperator(n_state::Int, n_obs::Int; 
                                       obs_error_std::Float64=1.0,
                                       obs_spacing::Int=2)
        # Create observation operator that observes every obs_spacing variables
        H = zeros(n_obs, n_state)
        for i in 1:n_obs
            obs_index = (i - 1) * obs_spacing + 1
            if obs_index <= n_state
                H[i, obs_index] = 1.0
            end
        end
        
        new(H, obs_error_std)
    end
end

"""
    (obs_op::Lorenz96ObservationOperator)(x::Vector{Float64})

Apply nonlinear observation operator (in this case, linear).
"""
function (obs_op::Lorenz96ObservationOperator)(x::Vector{Float64})
    return obs_op.H * x
end

"""
    linear(obs_op::Lorenz96ObservationOperator, x::Vector{Float64})

Return tangent linear observation operator.
"""
function linear(obs_op::Lorenz96ObservationOperator, x::Vector{Float64})
    return obs_op.H
end

"""
    generate_synthetic_observations(true_trajectory, obs_op, time_indices)

Generate synthetic observations with added noise.

# Arguments
- `true_trajectory::Matrix{Float64}`: True state trajectory
- `obs_op::Lorenz96ObservationOperator`: Observation operator
- `time_indices::Vector{Int}`: Time indices where observations are available

# Returns
- `Dict{Int, Vector{Float64}}`: Observations by time index
"""
function generate_synthetic_observations(true_trajectory::Matrix{Float64},
                                       obs_op::Lorenz96ObservationOperator,
                                       time_indices::Vector{Int})
    observations = Dict{Int, Vector{Float64}}()
    
    for t in time_indices
        if t <= size(true_trajectory, 2)
            # Apply observation operator
            y_true = obs_op(true_trajectory[:, t])
            
            # Add observation noise
            noise = obs_op.obs_error_std * randn(length(y_true))
            observations[t] = y_true + noise
        end
    end
    
    return observations
end

"""
    create_background_error_covariance(n, correlation_length=10.0, variance=1.0)

Create a background error covariance matrix with exponential correlation structure.

# Arguments
- `n::Int`: State dimension
- `correlation_length::Float64`: Correlation length scale
- `variance::Float64`: Error variance

# Returns
- `Matrix{Float64}`: Background error covariance matrix B
"""
function create_background_error_covariance(n::Int; 
                                          correlation_length::Float64=10.0,
                                          variance::Float64=1.0)
    B = zeros(n, n)
    
    for i in 1:n
        for j in 1:n
            # Periodic distance on a circle
            dist = min(abs(i - j), n - abs(i - j))
            # Exponential correlation
            correlation = exp(-dist / correlation_length)
            B[i, j] = variance * correlation
        end
    end
    
    return B
end

# =============================================================================
# Test Cases
# =============================================================================

@testset "Lorenz-96 Model Tests" begin
    
    # Test model integration
    @testset "Model Integration" begin
        model = Lorenz96Model(40, 8.0, 0.05)
        x0 = randn(40)
        
        # Test single step integration
        trajectory = integrate_lorenz96(x0, model, 10)
        
        @test size(trajectory) == (40, 11)
        @test trajectory[:, 1] ≈ x0
        
        # Test that model produces different states (not constant)
        @test norm(trajectory[:, end] - trajectory[:, 1]) > 1e-6
    end
    
    # Test tangent linear operator
    @testset "Tangent Linear Operator" begin
        model = Lorenz96Model(10, 8.0, 0.05)
        x = randn(10)
        
        M = lorenz96_tangent_linear(x, model)
        
        @test size(M) == (10, 10)
        @test det(M) != 0  # Should be non-singular
        
        # Test finite difference approximation
        ε = 1e-8
        δx = ε * randn(10)
        
        # Forward difference
        f_x = lorenz96_rhs(x, model)
        f_x_plus = lorenz96_rhs(x + δx, model)
        fd_approx = (f_x_plus - f_x) / ε
        
        # Tangent linear prediction
        J_matrix = (M - I) / model.dt  # Convert to Jacobian
        tl_approx = J_matrix * δx
        
        @test norm(fd_approx - tl_approx) < 1e-1  # Relaxed tolerance for chaotic system
    end
    
    # Test observation operator
    @testset "Observation Operator" begin
        n_state = 20
        n_obs = 10
        obs_op = Lorenz96ObservationOperator(n_state, n_obs, obs_spacing=2)
        
        @test size(obs_op.H) == (n_obs, n_state)
        
        x = randn(n_state)
        y = obs_op(x)
        
        @test length(y) == n_obs
        @test y ≈ obs_op.H * x
    end
end

@testset "DRP-4DVar Algorithm Tests" begin
    
    # Setup test problem
    Random.seed!(12345)
    n_state = 20
    n_obs = 10
    
    # Create Lorenz-96 model
    model = Lorenz96Model(n_state, 8.0, 0.05)
    
    # Generate true trajectory
    x_true_init = randn(n_state)
    true_trajectory = integrate_lorenz96(x_true_init, model, 20)
    
    # Create observation operator
    obs_op = Lorenz96ObservationOperator(n_state, n_obs, obs_error_std=0.5)
    
    # Generate synthetic observations
    obs_times = [1, 5, 10, 15, 20]
    observations = generate_synthetic_observations(true_trajectory, obs_op, obs_times)
    
    # Create background error covariance
    B = create_background_error_covariance(n_state, correlation_length=5.0, variance=2.0)
    
    # Add noise to background
    background_state = x_true_init + sqrt(2.0) * randn(n_state)
    
    @testset "Algorithm Setup" begin
        # Test DRP4DVar constructor
        drp4dvar = DRP4DVar(
            ensemble_size = 15,
            max_outer_loops = 2,
            max_inner_loops = 50,
            convergence_tolerance = 1e-4,
            time_window = 6,
            optimizer = "lbfgs"
        )
        
        @test drp4dvar.ensemble_size == 15
        @test drp4dvar.optimizer == "lbfgs"
        @test haskey(drp4dvar.statistics, "convergence_history")
    end
    
    @testset "Ensemble Perturbations" begin
        # Test ensemble generation
        projection = ensemble_perturbations(
            background_state, B, 15,
            random_seed = 42
        )
        
        @test size(projection.P_x) == (n_state, 15)
        @test 0.0 <= projection.explained_variance <= 1.0
        @test length(projection.eigenvalues) <= 15
        
        # Check that perturbations are centered
        mean_perturbation = vec(mean(projection.ensemble_perturbations, dims=2))
        @test norm(mean_perturbation) < 1e-10
    end
    
    @testset "Observation Space Projection" begin
        # Create projection
        projection = ensemble_perturbations(background_state, B, 10, random_seed=42)
        
        # Create simplified observation and model operators for testing
        obs_operators = Dict{Int, Any}()
        model_operators = Dict{Int, Any}()
        
        for t in obs_times
            obs_operators[t] = obs_op.H  # Linear operator
            if t > 1
                # Simplified model operator (identity for this test)
                model_operators[t] = Matrix{Float64}(I, n_state, n_state)
            end
        end
        
        # Project to observation space
        project_to_observation_space!(
            projection, obs_operators, model_operators, obs_times, background_state
        )
        
        @test haskey(projection.P_y, obs_times[1])
        @test size(projection.P_y[obs_times[1]]) == (n_obs, 10)
    end
    
    @testset "Cost Function Evaluation" begin
        # Create minimal test setup
        projection = ensemble_perturbations(background_state, B, 5, random_seed=42)
        
        # Simple projection for testing
        projection.P_y[1] = randn(n_obs, 5)
        
        # Create innovations
        innovations = Dict(1 => randn(n_obs))
        obs_error_inv = Dict(1 => Matrix{Float64}(I, n_obs, n_obs))
        
        cost_func = ReducedSpaceCostFunction(projection, innovations, obs_error_inv)
        
        # Test cost function evaluation
        α = randn(5)
        cost = reduced_cost_function(α, cost_func)
        
        @test cost >= 0.0  # Cost function should be non-negative
        @test isfinite(cost)
        
        # Test gradient computation
        grad = reduced_gradient(α, cost_func)
        
        @test length(grad) == 5
        @test all(isfinite.(grad))
        
        # Test gradient by finite differences
        ε = 1e-6
        grad_fd = zeros(5)
        
        for i in 1:5
            α_plus = copy(α)
            α_plus[i] += ε
            cost_plus = reduced_cost_function(α_plus, cost_func)
            grad_fd[i] = (cost_plus - cost) / ε
        end
        
        @test norm(grad - grad_fd) < 1e-4  # Gradient should match finite differences
    end
    
    @testset "Optimization Algorithms" begin
        # Setup minimal optimization problem
        projection = ensemble_perturbations(background_state, B, 8, random_seed=42)
        projection.P_y[1] = randn(n_obs, 8)
        
        innovations = Dict(1 => 0.1 * randn(n_obs))  # Small innovations for convergence
        obs_error_inv = Dict(1 => Matrix{Float64}(I, n_obs, n_obs))
        
        cost_func = ReducedSpaceCostFunction(projection, innovations, obs_error_inv)
        
        # Test different optimizers
        for optimizer in ["lbfgs", "gauss_newton", "conjugate_gradient"]
            solver = DRP4DVariationalSolver(optimizer, max_iterations=30, tolerance=1e-6)
            initial_guess = zeros(8)
            
            α_opt, stats = optimize_reduced_space(cost_func, solver, initial_guess)
            
            @test length(α_opt) == 8
            @test haskey(stats, "converged")
            @test haskey(stats, "final_cost")
            @test stats["final_cost"] < reduced_cost_function(initial_guess, cost_func)
        end
    end
    
    @testset "Full DRP-4DVar Integration Test" begin
        # This test may take longer but provides end-to-end validation
        
        # Setup DRP-4DVar method
        drp4dvar = DRP4DVar(
            ensemble_size = 12,
            max_outer_loops = 1,  # Keep it simple for testing
            max_inner_loops = 20,
            convergence_tolerance = 1e-3,
            time_window = 4,
            optimizer = "lbfgs"
        )
        
        # Create simplified operators for testing
        obs_operators = Dict{Int, Any}()
        model_operators = Dict{Int, Any}()
        
        for t in obs_times[1:3]  # Use first 3 observation times only
            # Define observation operator as a callable
            obs_operators[t] = x -> obs_op.H * x  # Nonlinear operator (linear in this case)
            
            if t > 1
                # Simplified tangent linear model (identity for this test)
                model_operators[t] = Matrix{Float64}(I, n_state, n_state)
            end
        end
        
        # Use subset of observations
        test_observations = Dict(t => observations[t] for t in obs_times[1:3] if haskey(observations, t))
        
        # Run DRP-4DVar
        analysis_state, stats = run_drp4dvar(
            drp4dvar,
            background_state,
            B,
            test_observations,
            obs_operators,
            model_operators
        )
        
        @test length(analysis_state) == n_state
        @test haskey(stats, "total_execution_time")
        @test haskey(stats, "final_analysis_state")
        
        # Analysis should be different from background
        @test norm(analysis_state - background_state) > 1e-6
        
        # Analysis should be closer to truth than background (in expectation)
        background_rmse = norm(background_state - x_true_init) / sqrt(n_state)
        analysis_rmse = norm(analysis_state - x_true_init) / sqrt(n_state)
        
        println("Background RMSE: $(background_rmse)")
        println("Analysis RMSE: $(analysis_rmse)")
        
        # The analysis should generally improve the estimate
        # Note: this might not always be true for a single realization with synthetic data
        if analysis_rmse < background_rmse
            println("✓ Analysis improved over background")
        else
            println("⚠ Analysis did not improve over background (can happen with synthetic data)")
        end
        
        @test stats["total_execution_time"] > 0.0
    end
end

# Performance benchmarking test
@testset "Performance Benchmarking" begin
    
    @testset "Scalability Test" begin
        # Test algorithm performance with different ensemble sizes
        n_state = 30
        ensemble_sizes = [5, 10, 20]
        execution_times = Float64[]
        
        for ens_size in ensemble_sizes
            # Create simple test setup
            background = randn(n_state)
            B = create_background_error_covariance(n_state, variance=1.0)
            
            # Minimal observation setup
            observations = Dict(1 => randn(n_state÷2))
            obs_operators = Dict(1 => x -> x[1:2:end])  # Observe every other variable
            model_operators = Dict{Int, Any}()
            
            drp4dvar = DRP4DVar(
                ensemble_size = ens_size,
                max_outer_loops = 1,
                max_inner_loops = 10,
                convergence_tolerance = 1e-2
            )
            
            # Time the execution
            start_time = time()
            _, _ = run_drp4dvar(drp4dvar, background, B, observations, obs_operators, model_operators)
            execution_time = time() - start_time
            
            push!(execution_times, execution_time)
            
            println("Ensemble size $(ens_size): $(execution_time) seconds")
        end
        
        # Basic performance check - execution times should scale reasonably
        @test all(execution_times .> 0.0)
        @test execution_times[end] > execution_times[1]  # Larger ensembles should take more time
    end
end

# Run visualization if requested (requires Plots.jl) - disabled for now
# if get(ENV, "RUN_PLOTS", "false") == "true"
#     @testset "Visualization Tests" begin
#         # Create a simple test case for visualization
#         model = Lorenz96Model(20, 8.0, 0.05)
#         x0 = randn(20)
#         trajectory = integrate_lorenz96(x0, model, 100)
#         
#         # Plot trajectory
#         p = plot(trajectory[1, :], label="Variable 1", title="Lorenz-96 Trajectory")
#         plot!(p, trajectory[2, :], label="Variable 2")
#         plot!(p, trajectory[3, :], label="Variable 3")
#         
#         savefig(p, "lorenz96_trajectory.png")
#         
#         # Plot eigenvalue spectrum of background error covariance
#         B = create_background_error_covariance(20)
#         eigenvals = eigvals(B)
#         sort!(eigenvals, rev=true)
#         
#         p2 = plot(eigenvals, marker=:circle, ylabel="Eigenvalue", xlabel="Mode", 
#                  title="Background Error Covariance Spectrum", yscale=:log10)
#         savefig(p2, "background_error_spectrum.png")
#         
#         println("Plots saved to lorenz96_trajectory.png and background_error_spectrum.png")
#     end
# end

println("All DRP-4DVar tests completed successfully!")