"""
    test_minimization.jl

Test suite for the Minimization module of GSICoreAnalysis.jl.
This module tests the iterative optimization algorithms including
PCG, BiCG, convergence criteria, and line search methods.
"""

using Test
using GSICoreAnalysis
using GSICoreAnalysis.Minimization
using GSICoreAnalysis.ControlVectors
using GSICoreAnalysis.CostFunctions
using LinearAlgebra
using Random

# Set reproducible seed
Random.seed!(789)

@testset "Minimization Module Tests" begin
    
    @testset "Solver Construction" begin
        # Test PCG solver construction
        config = AnalysisConfig{Float64}(
            grid_size = (4, 4, 2),
            max_iterations = 50,
            convergence_tol = 1e-5
        )
        
        pcg_solver = PCGSolver(config)
        @test pcg_solver isa PCGSolver{Float64}
        @test pcg_solver.config === config
        @test pcg_solver.max_iterations == 50
        @test pcg_solver.tolerance ≈ 1e-5
        @test pcg_solver.line_search_iterations == 10  # Default
        @test pcg_solver.reorthogonalize == true
        @test pcg_solver.adaptive_tolerance == true
        
        # Test with custom parameters
        pcg_custom = PCGSolver(config,
                              max_iterations = 100,
                              tolerance = 1e-6,
                              line_search_iterations = 5,
                              reorthogonalize = false,
                              adaptive_tolerance = false)
        
        @test pcg_custom.max_iterations == 100
        @test pcg_custom.tolerance ≈ 1e-6
        @test pcg_custom.line_search_iterations == 5
        @test pcg_custom.reorthogonalize == false
        @test pcg_custom.adaptive_tolerance == false
        
        # Test BiCG solver construction
        bicg_solver = BiCGSolver(config)
        @test bicg_solver isa BiCGSolver{Float64}
        @test bicg_solver.config === config
        @test bicg_solver.max_iterations == 50
        @test bicg_solver.tolerance ≈ 1e-5
        @test bicg_solver.stabilized == true  # Default
        
        # Test precision consistency
        config32 = AnalysisConfig{Float32}(grid_size = (4, 4, 2))
        pcg32 = PCGSolver(config32)
        @test pcg32 isa PCGSolver{Float32}
        @test pcg32.tolerance isa Float32
    end
    
    @testset "Convergence Check" begin
        # Test basic convergence criteria
        @test convergence_check(1e-6, 1e-3, 1e-5, 10, false) == true   # Small gradient
        @test convergence_check(1e-3, 1e-3, 1e-5, 10, false) == false  # Large gradient
        @test convergence_check(1e-3, 1e-8, 1e-5, 15, false) == true   # Small cost reduction
        
        # Test adaptive convergence
        @test convergence_check(2e-5, 1e-3, 1e-5, 10, true) == true   # Adaptive tolerance
        @test convergence_check(2e-5, 1e-3, 1e-5, 3, true) == false   # Early iterations
        
        # Test edge cases
        @test convergence_check(0.0, 0.0, 1e-5, 1, false) == true     # Perfect convergence
        @test convergence_check(Inf, 1e-3, 1e-5, 10, false) == false  # Divergence
    end
    
    @testset \"Line Search\" begin\n        # Create a simple quadratic cost function for testing\n        config = AnalysisConfig{Float64}(grid_size = (3, 3, 1))\n        \n        # Simple cost function: J(x) = 0.5 * x'*A*x - b'*x\n        n_state = prod(config.grid_size)\n        A = Diagonal(ones(n_state))\n        b = ones(n_state)\n        \n        # Mock cost function\n        struct MockCostFunction{T} <: AbstractCostFunction{T}\n            A::Matrix{T}\n            b::Vector{T}\n            current_cost::T\n        end\n        \n        function evaluate_cost_mock(cf::MockCostFunction, x::Vector{T}) where T\n            return 0.5 * dot(x, cf.A * x) - dot(cf.b, x)\n        end\n        \n        mock_cf = MockCostFunction(A, b, 0.0)\n        \n        # Create control vectors\n        current_state = ControlVector(config)\n        search_direction = ControlVector(config)\n        current_gradient = ControlVector(config)\n        \n        # Set up a descent direction\n        current_state.values .= [2.0, 1.5, 1.0, 0.5, 0.0, -0.5, -1.0, -1.5, -2.0]\n        search_direction.values .= [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0]  # Descent direction\n        current_gradient.values .= [1.0, 0.5, 0.0, -0.5, -1.0, -1.5, -2.0, -2.5, -3.0]     # Some gradient\n        \n        # Test line search (note: this is a simplified test since our implementation is basic)\n        step_size = line_search(mock_cf, current_state, search_direction, current_gradient, 5)\n        \n        @test step_size >= 0.0\n        @test step_size <= 1.0  # Should be reasonable\n        @test isfinite(step_size)\n        \n        # Test with non-descent direction (should return 0)\n        search_direction.values .*= -1  # Now it's an ascent direction\n        step_size_bad = line_search(mock_cf, current_state, search_direction, current_gradient, 5)\n        @test step_size_bad == 0.0\n    end\n    \n    @testset \"MinimizationResult\" begin\n        config = AnalysisConfig{Float64}(grid_size = (2, 2, 1))\n        cv = ControlVector(config)\n        \n        cost_history = [10.0, 5.0, 2.0, 1.0]\n        gradient_norms = [1.0, 0.5, 0.2, 0.1]\n        \n        result = MinimizationResult(\n            cv,                    # solution\n            cost_history,         # cost_history\n            gradient_norms,       # gradient_norms\n            true,                 # converged\n            4,                    # iterations\n            1.0,                  # final_cost\n            0.1                   # final_gradient_norm\n        )\n        \n        @test result.solution === cv\n        @test result.cost_history == cost_history\n        @test result.gradient_norms == gradient_norms\n        @test result.converged == true\n        @test result.iterations == 4\n        @test result.final_cost == 1.0\n        @test result.final_gradient_norm == 0.1\n    end\n    \n    @testset \"PCG Solver Integration\" begin\n        # Set up a simple optimization problem\n        config = AnalysisConfig{Float64}(\n            grid_size = (3, 2, 2),\n            max_iterations = 10,\n            convergence_tol = 1e-3\n        )\n        \n        # Create cost function\n        cost_function = CostFunction(config)\n        n_state = prod(config.grid_size)\n        \n        # Simple background covariance and state\n        B = Diagonal(ones(n_state))\n        initialize_background_covariance!(cost_function, B)\n        set_background!(cost_function, zeros(n_state))\n        \n        # Simple observations\n        n_obs = 3\n        H(x) = x[1:n_obs]\n        observations = [1.0, 0.5, -0.5]\n        R = Diagonal(ones(n_obs))\n        \n        set_observations!(cost_function, observations, H)\n        cost_function.observation_covariance = R\n        \n        # Initial guess\n        initial_guess = ControlVector(config)\n        fill!(initial_guess.values, 0.0)\n        \n        # Set up solver\n        solver = PCGSolver(config, max_iterations = 5, tolerance = 1e-2)\n        \n        # Run optimization\n        result = pcg_solve(solver, cost_function, initial_guess)\n        \n        @test result isa MinimizationResult\n        @test result.iterations <= 5\n        @test length(result.cost_history) == result.iterations + 1  # Includes initial cost\n        @test length(result.gradient_norms) >= result.iterations\n        @test result.final_cost >= 0.0  # Cost should be non-negative\n        \n        # Check that cost decreased\n        if length(result.cost_history) > 1\n            @test result.cost_history[end] <= result.cost_history[1]\n        end\n        \n        # Solution should be allocated\n        @test result.solution.is_allocated == true\n    end\n    \n    @testset \"BiCG Solver Integration\" begin\n        config = AnalysisConfig{Float64}(\n            grid_size = (3, 2, 2),\n            max_iterations = 10,\n            convergence_tol = 1e-3\n        )\n        \n        # Set up similar problem as PCG test\n        cost_function = CostFunction(config)\n        n_state = prod(config.grid_size)\n        \n        B = Diagonal(ones(n_state))\n        initialize_background_covariance!(cost_function, B)\n        set_background!(cost_function, zeros(n_state))\n        \n        H(x) = x[1:2]\n        observations = [0.5, -0.2]\n        R = Diagonal(ones(2))\n        \n        set_observations!(cost_function, observations, H)\n        cost_function.observation_covariance = R\n        \n        initial_guess = ControlVector(config)\n        fill!(initial_guess.values, 0.0)\n        \n        solver = BiCGSolver(config, max_iterations = 5, tolerance = 1e-2)\n        \n        result = bicg_solve(solver, cost_function, initial_guess)\n        \n        @test result isa MinimizationResult\n        @test result.iterations <= 5\n        @test result.final_cost >= 0.0\n        @test result.solution.is_allocated == true\n        \n        # BiCG might not converge as well as PCG for this simple problem,\n        # but it should at least run without errors\n        @test length(result.cost_history) > 0\n        @test length(result.gradient_norms) > 0\n    end\n    \n    @testset \"High-Level Minimize Interface\" begin\n        config = AnalysisConfig{Float64}(\n            grid_size = (3, 3, 1),\n            max_iterations = 8,\n            convergence_tol = 1e-3\n        )\n        \n        # Set up cost function\n        cost_function = CostFunction(config)\n        n_state = prod(config.grid_size)\n        \n        B = Diagonal(ones(n_state) * 2.0)\n        initialize_background_covariance!(cost_function, B)\n        set_background!(cost_function, zeros(n_state))\n        \n        H(x) = [x[1], x[5], x[9]]  # Observe corners\n        observations = [1.0, 0.0, -1.0]\n        R = Diagonal([0.5, 1.0, 0.5])\n        \n        set_observations!(cost_function, observations, H)\n        cost_function.observation_covariance = R\n        \n        initial_guess = ControlVector(config)\n        fill!(initial_guess.values, 0.1)  # Small initial guess\n        \n        # Test with default PCG solver\n        result_default = minimize_cost_function(cost_function, initial_guess)\n        @test result_default isa MinimizationResult\n        \n        # Test with explicit PCG solver\n        pcg_solver = PCGSolver(config, max_iterations = 6)\n        result_pcg = minimize_cost_function(cost_function, initial_guess, pcg_solver)\n        @test result_pcg isa MinimizationResult\n        @test result_pcg.iterations <= 6\n        \n        # Test with BiCG solver\n        bicg_solver = BiCGSolver(config, max_iterations = 6)\n        result_bicg = minimize_cost_function(cost_function, initial_guess, bicg_solver)\n        @test result_bicg isa MinimizationResult\n        @test result_bicg.iterations <= 6\n    end\n    \n    @testset \"Solver Comparison\" begin\n        # Compare PCG and BiCG on the same problem\n        config = AnalysisConfig{Float64}(\n            grid_size = (4, 3, 2),\n            max_iterations = 15,\n            convergence_tol = 1e-4\n        )\n        \n        # Set up well-conditioned problem\n        cost_function = CostFunction(config)\n        n_state = prod(config.grid_size)\n        \n        B = Diagonal(ones(n_state) * 1.5)  # Well-conditioned background covariance\n        initialize_background_covariance!(cost_function, B)\n        set_background!(cost_function, zeros(n_state))\n        \n        # Moderate number of observations\n        n_obs = n_state ÷ 3\n        H(x) = x[1:n_obs]\n        observations = randn(n_obs) * 0.5  # Moderate-sized observations\n        R = Diagonal(ones(n_obs))\n        \n        set_observations!(cost_function, observations, H)\n        cost_function.observation_covariance = R\n        \n        initial_guess = ControlVector(config)\n        randn!(initial_guess.values)\n        initial_guess.values .*= 0.1  # Small initial perturbation\n        \n        # Run PCG\n        pcg_solver = PCGSolver(config, max_iterations = 15, tolerance = 1e-4)\n        pcg_result = minimize_cost_function(cost_function, initial_guess, pcg_solver)\n        \n        # Run BiCG (with fresh initial guess)\n        initial_guess2 = ControlVector(config)\n        assign!(initial_guess2, initial_guess)\n        \n        bicg_solver = BiCGSolver(config, max_iterations = 15, tolerance = 1e-4)\n        bicg_result = minimize_cost_function(cost_function, initial_guess2, bicg_solver)\n        \n        # Both should converge (PCG typically better for symmetric positive definite)\n        @test pcg_result.final_cost >= 0.0\n        @test bicg_result.final_cost >= 0.0\n        \n        # PCG should generally perform better or at least as well\n        # (though this isn't guaranteed for all problems)\n        @test pcg_result.iterations <= pcg_solver.max_iterations\n        @test bicg_result.iterations <= bicg_solver.max_iterations\n        \n        # Final costs should be reasonable\n        initial_cost = evaluate_cost(cost_function, initial_guess.values)\n        @test pcg_result.final_cost <= initial_cost  # Should improve\n        @test bicg_result.final_cost <= initial_cost  # Should improve\n    end\n    \n    @testset \"Convergence Properties\" begin\n        # Test convergence behavior with different tolerances\n        config = AnalysisConfig{Float64}(\n            grid_size = (3, 3, 2),\n            max_iterations = 20\n        )\n        \n        cost_function = CostFunction(config)\n        n_state = prod(config.grid_size)\n        \n        B = Diagonal(ones(n_state))\n        initialize_background_covariance!(cost_function, B)\n        set_background!(cost_function, zeros(n_state))\n        \n        H(x) = x[1:4]\n        observations = [1.0, -0.5, 0.5, 0.0]\n        R = Diagonal(ones(4) * 0.5)\n        \n        set_observations!(cost_function, observations, H)\n        cost_function.observation_covariance = R\n        \n        initial_guess = ControlVector(config)\n        fill!(initial_guess.values, 0.2)\n        \n        # Test with tight tolerance\n        tight_solver = PCGSolver(config, max_iterations = 20, tolerance = 1e-6)\n        tight_result = minimize_cost_function(cost_function, initial_guess, tight_solver)\n        \n        # Test with loose tolerance  \n        loose_solver = PCGSolver(config, max_iterations = 20, tolerance = 1e-2)\n        loose_result = minimize_cost_function(cost_function, initial_guess, loose_solver)\n        \n        # Tight tolerance should take more iterations (if it converges)\n        if tight_result.converged && loose_result.converged\n            @test tight_result.iterations >= loose_result.iterations\n            @test tight_result.final_gradient_norm <= loose_result.final_gradient_norm\n        end\n        \n        # Both should improve the cost\n        initial_cost = evaluate_cost(cost_function, initial_guess.values)\n        @test tight_result.final_cost <= initial_cost\n        @test loose_result.final_cost <= initial_cost\n    end\n    \n    @testset \"Error Handling\" begin\n        config = AnalysisConfig{Float64}(grid_size = (2, 2, 1))\n        \n        # Test with problematic cost function\n        bad_cost_function = CostFunction(config)\n        # Leave background covariance uninitialized\n        \n        initial_guess = ControlVector(config)\n        solver = PCGSolver(config, max_iterations = 5)\n        \n        # Should handle gracefully (may not converge but shouldn't crash)\n        @test_nowarn minimize_cost_function(bad_cost_function, initial_guess, solver)\n        \n        # Test unknown solver type\n        struct DummySolver{T} <: AbstractSolver{T}\n            config::AbstractAnalysisConfig\n        end\n        \n        dummy_solver = DummySolver{Float64}(config)\n        @test_throws ErrorException minimize_cost_function(bad_cost_function, initial_guess, dummy_solver)\n    end\n    \n    @testset \"Memory and Performance\" begin\n        # Test that solvers don't have excessive memory allocation\n        config = AnalysisConfig{Float64}(grid_size = (6, 6, 3))\n        \n        cost_function = CostFunction(config)\n        n_state = prod(config.grid_size)\n        \n        B = Diagonal(ones(n_state))\n        initialize_background_covariance!(cost_function, B)\n        set_background!(cost_function, zeros(n_state))\n        \n        H(x) = x[1:10]\n        observations = randn(10) * 0.2\n        R = Diagonal(ones(10))\n        \n        set_observations!(cost_function, observations, H)\n        cost_function.observation_covariance = R\n        \n        initial_guess = ControlVector(config)\n        randn!(initial_guess.values)\n        initial_guess.values .*= 0.1\n        \n        solver = PCGSolver(config, max_iterations = 5, tolerance = 1e-3)\n        \n        # Test performance (should complete in reasonable time)\n        elapsed_time = @elapsed result = minimize_cost_function(cost_function, initial_guess, solver)\n        \n        @test elapsed_time < 5.0  # Should complete in less than 5 seconds\n        @test result isa MinimizationResult\n        \n        # Test memory usage isn't excessive\n        memory_used = @allocated minimize_cost_function(cost_function, initial_guess, solver)\n        \n        # Memory usage should be reasonable (this is problem-dependent)\n        @test memory_used < 100_000_000  # Less than 100MB for this size problem\n    end\nend\n\nprintln(\"Minimization module tests completed successfully.\")