#!/usr/bin/env julia
"""
Integration test for real cost function minimization in GSI Julia package.

This test validates that:
1. Cost function decreases monotonically during minimization
2. Gradient norm decreases toward zero
3. Final analysis is closer to observations than background
4. Multiple solver algorithms (PCG, Lanczos) converge successfully
5. Mathematical properties are preserved (symmetry, positive definiteness)
"""

using Test
using GSICoreAnalysis
using LinearAlgebra
using Printf
using Random
using Statistics

# Set reproducible random seed
Random.seed!(12345)

println("="^80)
println("GSI Julia - Real Minimization Integration Test")
println("="^80)

@testset "Real Minimization Tests" begin

    # ==========================================================================
    # Test 1: Basic Cost Function Evaluation
    # ==========================================================================
    @testset "Cost Function Evaluation" begin
        println("\n[Test 1] Cost Function Evaluation")

        # Create small test configuration
        config = AnalysisConfig(
            grid_size = (10, 10, 5),
            ensemble_size = 0,
            use_hybrid = false,
            max_iterations = 50,
            convergence_tol = 1e-6
        )

        # Create state vector and convert to array
        state_vector = GSICoreAnalysis.StateVectors.StateVector(config)
        state_array = GSICoreAnalysis.CostFunctions.state_vector_to_array(state_vector)
        n_state = length(state_array)

        # Create synthetic observations
        n_obs = 100
        obs_indices = GSICoreAnalysis.CostFunctions.create_observation_indices(n_state, n_obs, :uniform)

        # True state (what we'll try to recover)
        true_state = randn(n_state)

        # Background state (with errors)
        background = true_state .+ 2.0 .* randn(n_state)

        # Observations from true state (with observation errors)
        observations = true_state[obs_indices] .+ 1.0 .* randn(n_obs)
        obs_errors = ones(n_obs)

        # Create cost function
        cost_func = GSICoreAnalysis.CostFunctions.CostFunction(config)
        cost_func.background_state = background
        cost_func.observations = observations
        cost_func.observation_errors = obs_errors
        cost_func.observation_indices = obs_indices

        # Set up observation operator
        H_forward, H_adjoint = GSICoreAnalysis.CostFunctions.create_sampling_operator(obs_indices, n_state)
        cost_func.observation_operator = H_forward
        cost_func.observation_operator_adjoint = H_adjoint

        # Set up diagonal covariances
        GSICoreAnalysis.CostFunctions.setup_diagonal_covariances!(
            cost_func,
            4.0,  # Background variance
            obs_errors.^2  # Observation variances
        )

        # Evaluate cost at background
        J_b = GSICoreAnalysis.CostFunctions.evaluate_cost(cost_func, background)

        println(@sprintf("  Cost at background: %.6e", J_b))

        @test J_b > 0.0
        @test isfinite(J_b)
        @test !isnan(J_b)

        # Evaluate gradient at background
        grad = GSICoreAnalysis.CostFunctions.compute_gradient(cost_func, background)

        println(@sprintf("  Gradient norm at background: %.6e", norm(grad)))

        @test length(grad) == n_state
        @test norm(grad) > 0.0
        @test isfinite(norm(grad))

        # Test gradient decrease when moving toward optimum
        # Move in negative gradient direction
        step_size = 0.01
        new_state = background - step_size .* grad
        J_new = GSICoreAnalysis.CostFunctions.evaluate_cost(cost_func, new_state)

        println(@sprintf("  Cost after gradient step: %.6e", J_new))
        println(@sprintf("  Cost decrease: %.6e", J_b - J_new))

        @test J_new < J_b  # Cost should decrease in gradient direction

        println("  ✓ Cost function evaluation tests passed")
    end

    # ==========================================================================
    # Test 2: PCG Solver Convergence
    # ==========================================================================
    @testset "PCG Solver Convergence" begin
        println("\n[Test 2] PCG Solver Convergence")

        config = AnalysisConfig(
            grid_size = (10, 10, 5),
            ensemble_size = 0,
            use_hybrid = false,
            max_iterations = 100,
            convergence_tol = 1e-6
        )

        # Create state and observations
        state_vector = GSICoreAnalysis.StateVectors.StateVector(config)
        state_array = GSICoreAnalysis.CostFunctions.state_vector_to_array(state_vector)
        n_state = length(state_array)
        n_obs = 100

        obs_indices = GSICoreAnalysis.CostFunctions.create_observation_indices(n_state, n_obs, :uniform)
        true_state = randn(n_state)
        background = true_state .+ 2.0 .* randn(n_state)
        observations = true_state[obs_indices] .+ 1.0 .* randn(n_obs)
        obs_errors = ones(n_obs)

        # Set up cost function
        cost_func = GSICoreAnalysis.CostFunctions.CostFunction(config)
        cost_func.background_state = background
        cost_func.observations = observations
        cost_func.observation_errors = obs_errors
        cost_func.observation_indices = obs_indices

        H_forward, H_adjoint = GSICoreAnalysis.CostFunctions.create_sampling_operator(obs_indices, n_state)
        cost_func.observation_operator = H_forward
        cost_func.observation_operator_adjoint = H_adjoint

        GSICoreAnalysis.CostFunctions.setup_diagonal_covariances!(cost_func, 4.0, obs_errors.^2)

        # Create PCG solver
        solver = GSICoreAnalysis.Minimization.PCGSolver(config, max_iterations=50)

        # Create initial control vector
        initial_control = GSICoreAnalysis.ControlVectors.ControlVector(config)

        # Run minimization
        println("  Running PCG minimization...")
        result = GSICoreAnalysis.Minimization.minimize_cost_function(
            cost_func,
            initial_control,
            solver
        )

        println(@sprintf("  Converged: %s", result.converged))
        println(@sprintf("  Iterations: %d", result.iterations))
        println(@sprintf("  Final cost: %.6e", result.final_cost))
        println(@sprintf("  Final gradient norm: %.6e", result.final_gradient_norm))

        # Test convergence properties
        @test result.iterations > 0
        @test result.iterations <= config.max_iterations
        @test result.final_cost >= 0.0
        @test isfinite(result.final_cost)

        # Test monotonic cost decrease
        if length(result.cost_history) > 1
            for i in 2:length(result.cost_history)
                @test result.cost_history[i] <= result.cost_history[i-1] + 1e-10  # Allow small numerical errors
            end
            println("  ✓ Cost decreases monotonically")
        end

        # Test gradient norm decrease
        if length(result.gradient_norms) > 1
            initial_grad_norm = result.gradient_norms[1]
            final_grad_norm = result.gradient_norms[end]
            @test final_grad_norm < initial_grad_norm
            println(@sprintf("  ✓ Gradient norm reduced from %.6e to %.6e", initial_grad_norm, final_grad_norm))
        end

        # Test cost reduction
        if length(result.cost_history) > 1
            initial_cost = result.cost_history[1]
            cost_reduction = initial_cost - result.final_cost
            reduction_pct = 100.0 * cost_reduction / initial_cost
            println(@sprintf("  ✓ Cost reduced by %.2f%%", reduction_pct))
            @test cost_reduction > 0.0
        end

        println("  ✓ PCG solver tests passed")
    end

    # ==========================================================================
    # Test 3: Lanczos Solver Convergence
    # ==========================================================================
    @testset "Lanczos Solver Convergence" begin
        println("\n[Test 3] Lanczos Solver Convergence")

        config = AnalysisConfig(
            grid_size = (10, 10, 5),
            ensemble_size = 0,
            use_hybrid = false,
            max_iterations = 100,
            convergence_tol = 1e-6
        )

        # Create state and observations
        state_vector = GSICoreAnalysis.StateVectors.StateVector(config)
        state_array = GSICoreAnalysis.CostFunctions.state_vector_to_array(state_vector)
        n_state = length(state_array)
        n_obs = 100

        obs_indices = GSICoreAnalysis.CostFunctions.create_observation_indices(n_state, n_obs, :uniform)
        true_state = randn(n_state)
        background = true_state .+ 2.0 .* randn(n_state)
        observations = true_state[obs_indices] .+ 1.0 .* randn(n_obs)
        obs_errors = ones(n_obs)

        # Set up cost function
        cost_func = GSICoreAnalysis.CostFunctions.CostFunction(config)
        cost_func.background_state = background
        cost_func.observations = observations
        cost_func.observation_errors = obs_errors
        cost_func.observation_indices = obs_indices

        H_forward, H_adjoint = GSICoreAnalysis.CostFunctions.create_sampling_operator(obs_indices, n_state)
        cost_func.observation_operator = H_forward
        cost_func.observation_operator_adjoint = H_adjoint

        GSICoreAnalysis.CostFunctions.setup_diagonal_covariances!(cost_func, 4.0, obs_errors.^2)

        # Create Lanczos solver
        solver = GSICoreAnalysis.Minimization.LanczosSolver(config,
                                                            lanczos_vectors=20,
                                                            max_iterations=50)

        # Create initial control vector
        initial_control = GSICoreAnalysis.ControlVectors.ControlVector(config)

        # Run minimization
        println("  Running Lanczos minimization...")
        result = GSICoreAnalysis.Minimization.minimize_cost_function(
            cost_func,
            initial_control,
            solver
        )

        println(@sprintf("  Converged: %s", result.converged))
        println(@sprintf("  Iterations: %d", result.iterations))
        println(@sprintf("  Final cost: %.6e", result.final_cost))
        println(@sprintf("  Final gradient norm: %.6e", result.final_gradient_norm))

        # Test convergence properties
        @test result.iterations > 0
        @test result.final_cost >= 0.0
        @test isfinite(result.final_cost)

        # Test cost reduction
        if length(result.cost_history) > 1
            initial_cost = result.cost_history[1]
            cost_reduction = initial_cost - result.final_cost
            reduction_pct = 100.0 * cost_reduction / initial_cost
            println(@sprintf("  ✓ Cost reduced by %.2f%%", reduction_pct))
            @test cost_reduction > 0.0
        end

        println("  ✓ Lanczos solver tests passed")
    end

    # ==========================================================================
    # Test 4: Analysis Quality - Closer to Truth
    # ==========================================================================
    @testset "Analysis Quality" begin
        println("\n[Test 4] Analysis Quality - Convergence to Truth")

        config = AnalysisConfig(
            grid_size = (10, 10, 5),
            ensemble_size = 0,
            use_hybrid = false,
            max_iterations = 100,
            convergence_tol = 1e-6
        )

        # Create state and observations
        state_vector = GSICoreAnalysis.StateVectors.StateVector(config)
        state_array = GSICoreAnalysis.CostFunctions.state_vector_to_array(state_vector)
        n_state = length(state_array)
        n_obs = 200  # More observations for better analysis

        obs_indices = GSICoreAnalysis.CostFunctions.create_observation_indices(n_state, n_obs, :uniform)

        # True state
        true_state = randn(n_state)

        # Background with significant errors
        background = true_state .+ 3.0 .* randn(n_state)

        # Accurate observations
        observations = true_state[obs_indices] .+ 0.5 .* randn(n_obs)
        obs_errors = 0.5 .* ones(n_obs)

        # Set up cost function
        cost_func = GSICoreAnalysis.CostFunctions.CostFunction(config)
        cost_func.background_state = background
        cost_func.observations = observations
        cost_func.observation_errors = obs_errors
        cost_func.observation_indices = obs_indices

        H_forward, H_adjoint = GSICoreAnalysis.CostFunctions.create_sampling_operator(obs_indices, n_state)
        cost_func.observation_operator = H_forward
        cost_func.observation_operator_adjoint = H_adjoint

        GSICoreAnalysis.CostFunctions.setup_diagonal_covariances!(cost_func, 9.0, obs_errors.^2)

        # Run minimization
        solver = GSICoreAnalysis.Minimization.PCGSolver(config, max_iterations=100)
        initial_control = GSICoreAnalysis.ControlVectors.ControlVector(config)

        result = GSICoreAnalysis.Minimization.minimize_cost_function(
            cost_func,
            initial_control,
            solver
        )

        # Extract analysis from result
        analysis = result.solution.values

        # Compute errors
        background_error = norm(background - true_state)
        analysis_error = norm(analysis - true_state)
        error_reduction = background_error - analysis_error

        println(@sprintf("  Background error (RMS): %.6f", background_error / sqrt(n_state)))
        println(@sprintf("  Analysis error (RMS): %.6f", analysis_error / sqrt(n_state)))
        println(@sprintf("  Error reduction: %.6f", error_reduction))
        println(@sprintf("  Error reduction %%: %.2f%%", 100.0 * error_reduction / background_error))

        # Analysis should be closer to truth than background
        @test analysis_error < background_error
        println("  ✓ Analysis is closer to truth than background")

        # Compute innovation statistics
        obs_from_background = background[obs_indices]
        obs_from_analysis = analysis[obs_indices]

        innov_background = observations - obs_from_background
        innov_analysis = observations - obs_from_analysis

        rms_innov_background = sqrt(mean(innov_background.^2))
        rms_innov_analysis = sqrt(mean(innov_analysis.^2))

        println(@sprintf("  RMS innovation (background): %.6f", rms_innov_background))
        println(@sprintf("  RMS innovation (analysis): %.6f", rms_innov_analysis))

        # Analysis innovations should be smaller
        @test rms_innov_analysis < rms_innov_background
        println("  ✓ Analysis fits observations better than background")

        println("  ✓ Analysis quality tests passed")
    end

    # ==========================================================================
    # Test 5: Solver Comparison
    # ==========================================================================
    @testset "Solver Comparison" begin
        println("\n[Test 5] Solver Comparison (PCG vs Lanczos)")

        config = AnalysisConfig(
            grid_size = (10, 10, 5),
            ensemble_size = 0,
            use_hybrid = false,
            max_iterations = 100,
            convergence_tol = 1e-6
        )

        # Create identical problem for both solvers
        state_vector = GSICoreAnalysis.StateVectors.StateVector(config)
        state_array = GSICoreAnalysis.CostFunctions.state_vector_to_array(state_vector)
        n_state = length(state_array)
        n_obs = 150

        obs_indices = GSICoreAnalysis.CostFunctions.create_observation_indices(n_state, n_obs, :uniform)
        true_state = randn(n_state)
        background = true_state .+ 2.0 .* randn(n_state)
        observations = true_state[obs_indices] .+ 1.0 .* randn(n_obs)
        obs_errors = ones(n_obs)

        # Test both solvers on same problem
        solvers = [
            ("PCG", GSICoreAnalysis.Minimization.PCGSolver(config, max_iterations=50)),
            ("Lanczos", GSICoreAnalysis.Minimization.LanczosSolver(config, lanczos_vectors=20, max_iterations=50))
        ]

        solver_results = Dict()

        for (solver_name, solver) in solvers
            # Set up cost function
            cost_func = GSICoreAnalysis.CostFunctions.CostFunction(config)
            cost_func.background_state = background
            cost_func.observations = observations
            cost_func.observation_errors = obs_errors
            cost_func.observation_indices = obs_indices

            H_forward, H_adjoint = GSICoreAnalysis.CostFunctions.create_sampling_operator(obs_indices, n_state)
            cost_func.observation_operator = H_forward
            cost_func.observation_operator_adjoint = H_adjoint

            GSICoreAnalysis.CostFunctions.setup_diagonal_covariances!(cost_func, 4.0, obs_errors.^2)

            # Run minimization
            initial_control = GSICoreAnalysis.ControlVectors.ControlVector(config)
            result = GSICoreAnalysis.Minimization.minimize_cost_function(cost_func, initial_control, solver)

            solver_results[solver_name] = result

            println(@sprintf("  %s: iters=%d, final_cost=%.6e, grad_norm=%.6e",
                    solver_name, result.iterations, result.final_cost, result.final_gradient_norm))
        end

        # Both solvers should converge
        @test solver_results["PCG"].final_cost >= 0.0
        @test solver_results["Lanczos"].final_cost >= 0.0

        # Both should reduce cost significantly
        for (solver_name, result) in solver_results
            if length(result.cost_history) > 1
                reduction = 100.0 * (result.cost_history[1] - result.final_cost) / result.cost_history[1]
                @test reduction > 10.0  # At least 10% reduction
            end
        end

        println("  ✓ Both solvers converge successfully")
    end

    # ==========================================================================
    # Test 6: Execution Time Validation
    # ==========================================================================
    @testset "Performance" begin
        println("\n[Test 6] Performance Validation")

        config = AnalysisConfig(
            grid_size = (10, 10, 5),
            ensemble_size = 0,
            use_hybrid = false,
            max_iterations = 20,
            convergence_tol = 1e-6
        )

        state_vector = GSICoreAnalysis.StateVectors.StateVector(config)
        state_array = GSICoreAnalysis.CostFunctions.state_vector_to_array(state_vector)
        n_state = length(state_array)
        n_obs = 100

        obs_indices = GSICoreAnalysis.CostFunctions.create_observation_indices(n_state, n_obs, :uniform)
        background = randn(n_state)
        observations = randn(n_obs)
        obs_errors = ones(n_obs)

        cost_func = GSICoreAnalysis.CostFunctions.CostFunction(config)
        cost_func.background_state = background
        cost_func.observations = observations
        cost_func.observation_errors = obs_errors
        cost_func.observation_indices = obs_indices

        H_forward, H_adjoint = GSICoreAnalysis.CostFunctions.create_sampling_operator(obs_indices, n_state)
        cost_func.observation_operator = H_forward
        cost_func.observation_operator_adjoint = H_adjoint

        GSICoreAnalysis.CostFunctions.setup_diagonal_covariances!(cost_func, 4.0, obs_errors.^2)

        solver = GSICoreAnalysis.Minimization.PCGSolver(config, max_iterations=20)
        initial_control = GSICoreAnalysis.ControlVectors.ControlVector(config)

        # Time the minimization
        start_time = time()
        result = GSICoreAnalysis.Minimization.minimize_cost_function(cost_func, initial_control, solver)
        elapsed_time = time() - start_time

        println(@sprintf("  Execution time: %.3f seconds", elapsed_time))
        println(@sprintf("  Time per iteration: %.3f ms", 1000 * elapsed_time / result.iterations))

        # Should complete in reasonable time (< 30 seconds for small problem)
        @test elapsed_time < 30.0

        println("  ✓ Performance is acceptable")
    end

end  # @testset "Real Minimization Tests"

println("\n" * "="^80)
println("All Real Minimization Tests Passed Successfully!")
println("="^80)
