using Test
using FLEXINVERT
using FLEXINVERT.ConjugateGradient
using FLEXINVERT.QuasiNewton
using FLEXINVERT.Inversion
using FLEXINVERT.StateVector
using FLEXINVERT.PriorCovariance
using FLEXINVERT.CoreTypes
using FLEXINVERT.Settings
using FLEXINVERT.ObservationsCore
using FLEXINVERT.ForwardModel
using LinearAlgebra

@testset "Optimization Tests" begin

    @testset "Conjugate Gradient" begin
        # Test quadratic function: f(x) = 0.5 * x^T * A * x + b^T * x + c
        # Minimum at x = -A^{-1} * b
        A = [2.0 0.5; 0.5 1.0]  # Positive definite
        b = [1.0, -0.5]
        c = 0.0

        cost_func = x -> 0.5 * dot(x, A * x) + dot(b, x) + c
        grad_func = x -> A * x + b

        x0 = [0.0, 0.0]
        result = congrad!(cost_func, grad_func, x0; maxiter=20, tol=1e-8, verbose=false)

        @test result.success
        @test result.iterations <= 10  # Should converge quickly for quadratic

        # Check solution accuracy
        x_true = -A \ b
        @test norm(result.x - x_true) < 1e-6

        # Check cost history is decreasing
        @test all(diff(result.cost_history) .<= 1e-10)
    end

    @testset "L-BFGS Quasi-Newton" begin
        # Rosenbrock function: f(x) = 100(x₂ - x₁²)² + (1 - x₁)²
        # Minimum at (1, 1)
        rosenbrock = x -> 100 * (x[2] - x[1]^2)^2 + (1 - x[1])^2
        rosenbrock_grad = x -> [
            -400 * x[1] * (x[2] - x[1]^2) - 2 * (1 - x[1]),
            200 * (x[2] - x[1]^2)
        ]

        x0 = [-1.0, 2.0]
        result = lbfgs_optimize(rosenbrock, rosenbrock_grad, x0;
                               maxiter=100, tol=1e-6, verbose=false)

        @test result.success
        @test norm(result.x - [1.0, 1.0]) < 0.1  # Close to minimum

        # Check cost reduction
        @test result.cost_history[end] < result.cost_history[1]
    end

    @testset "M1QN3 Interface" begin
        # Simple quadratic
        cost_func = x -> sum(x.^2)
        grad_func = x -> 2 * x

        x0 = [1.0, -1.0, 0.5]
        result = m1qn3!(cost_func, grad_func, x0; maxiter=50, tol=1e-8, verbose=false)

        @test result.success
        @test norm(result.x) < 1e-6  # Should converge to zero
    end

    @testset "Inversion Cost Function" begin
        # Create test configuration
        config = Config(
            datei = 20200301, datef = 20200302,
            iwest = 110.0, ieast = 112.0,
            isouth = 30.0, inorth = 32.0,
            dx = 0.5, dy = 0.5,
            species = "co2",
            method = "lbfgs"
        )

        domain = Domain(config)

        # Create test observations
        obs = Observations(
            [1.0, 2.0], ["AAA", "AAA"], [420.1, 420.2], [0.5, 0.5], [0.05, 0.05],
            [DateTime(2020, 3, 1), DateTime(2020, 3, 2)],
            Dict{String, Vector{Int}}("AAA" => [1, 2]),
            Dict{Float64, Vector{Int}}(1.0 => [1], 2.0 => [2]),
            [], [], [], [], [], [], [], []
        )

        # Create covariance and state
        covariance = build_prior_covariance(config, domain)
        state = initialize_state(config, domain, covariance)

        # Create prior fluxes
        prior_fluxes = PriorFluxes(nothing, nothing, nothing, [420.0], nothing, nothing, nothing)

        # Test cost function
        phi_test = zeros(state.n_control)
        cost = cost_function(phi_test, state, prior_fluxes, obs, covariance, config, domain)

        @test cost >= 0.0  # Cost should be non-negative
        @test isfinite(cost)

        # Test gradient function
        grad = gradient_function(phi_test, state, prior_fluxes, obs, covariance, config, domain)

        @test length(grad) == state.n_control
        @test all(isfinite.(grad))

        # Test finite difference gradient
        eps = 1e-6
        grad_fd = zeros(state.n_control)
        for i in 1:state.n_control
            phi_plus = copy(phi_test)
            phi_plus[i] += eps
            phi_minus = copy(phi_test)
            phi_minus[i] -= eps

            cost_plus = cost_function(phi_plus, state, prior_fluxes, obs, covariance, config, domain)
            cost_minus = cost_function(phi_minus, state, prior_fluxes, obs, covariance, config, domain)

            grad_fd[i] = (cost_plus - cost_minus) / (2 * eps)
        end

        # Check gradient accuracy (may not be exact due to simplified forward model)
        @test norm(grad - grad_fd) / norm(grad_fd) < 0.5  # Relative error within 50%
    end

    @testset "Full Inversion Pipeline" begin
        # Create test setup
        config = Config(
            datei = 20200301, datef = 20200302,
            iwest = 110.0, ieast = 112.0,
            isouth = 30.0, inorth = 32.0,
            dx = 0.5, dy = 0.5,
            species = "co2",
            method = "lbfgs"
        )

        domain = Domain(config)
        files = Files(path_output = tempdir())

        # Create simple observations
        obs = Observations(
            [1.0], ["AAA"], [420.1], [0.5], [0.05],
            [DateTime(2020, 3, 1)],
            Dict{String, Vector{Int}}("AAA" => [1]),
            Dict{Float64, Vector{Int}}(1.0 => [1]),
            [], [], [], [], [], [], [], []
        )

        # Build covariance and initialize state
        covariance = build_prior_covariance(config, domain)
        state = initialize_state(config, domain, covariance)
        prior_fluxes = PriorFluxes(nothing, nothing, nothing, [420.0], nothing, nothing, nothing)

        # Run inversion
        result = invert!(state, prior_fluxes, obs, covariance, config, files, domain)

        @test result isa InversionResult
        @test result.method == "lbfgs"
        @test result.final_cost >= 0.0
        @test !isempty(result.cost_history)

        # Test optimization logs
        save_optimization_logs(result, files)
        cost_file = joinpath(files.path_output, "cost.txt")
        @test isfile(cost_file)
    end

    @testset "Line Search Functions" begin
        # Test backtracking line search
        f = x -> sum(x.^2)
        g = x -> 2 * x

        x = [1.0, -1.0]
        p = -g(x)  # Steepest descent direction

        alpha = line_search(f, g, x, p)

        @test alpha > 0
        @test alpha <= 1.0

        # Check Armijo condition
        f0 = f(x)
        c1 = 1e-4
        directional_derivative = dot(g(x), p)
        @test f(x + alpha * p) <= f0 + c1 * alpha * directional_derivative
    end

    @testset "Preconditioned Conjugate Gradient" begin
        # Test with simple diagonal preconditioning
        A = Diagonal([4.0, 1.0, 2.0])
        b = [1.0, -0.5, 2.0]

        cost_func = x -> 0.5 * dot(x, A * x) + dot(b, x)
        grad_func = x -> A * x + b

        # Diagonal preconditioner
        preconditioner = g -> g ./ diag(A)

        x0 = zeros(3)
        result = preconditioned_congrad!(cost_func, grad_func, preconditioner, x0;
                                       maxiter=20, tol=1e-8, verbose=false)

        @test result.success
        @test result.iterations <= 10  # Should converge quickly with good preconditioning

        # Check solution
        x_true = -A \ b
        @test norm(result.x - x_true) < 1e-6
    end

    @testset "BFGS Full Hessian" begin
        # Test BFGS with small problem
        cost_func = x -> sum((x .- [1.0, 2.0]).^2)
        grad_func = x -> 2 * (x .- [1.0, 2.0])

        x0 = [0.0, 0.0]
        result = bfgs_optimize(cost_func, grad_func, x0; maxiter=20, tol=1e-8, verbose=false)

        @test result.success
        @test norm(result.x - [1.0, 2.0]) < 1e-6
    end
end
