"""
Scientific Validation Tests for GSICoreAnalysis.jl

Tests the scientific correctness of atmospheric data assimilation algorithms.
"""

using Test
using Random
using Statistics
using LinearAlgebra

@testset "Scientific Validation Tests" begin
    
    @testset "Bias Correction Algorithm Validation" begin
        @info "Testing bias correction algorithm accuracy..."
        
        # Create synthetic radiance observations with known bias
        n_obs = 100
        true_bias = 2.5  # Known bias in brightness temperature
        observation_error = 0.8
        
        Random.seed!(2024)
        
        # Generate synthetic "true" brightness temperatures
        true_bt = 250.0 .+ 20 * randn(n_obs)
        
        # Add known bias and random errors
        observed_bt = true_bt .+ true_bias .+ observation_error * randn(n_obs)
        
        # Mock VarBC-style bias correction
        function estimate_bias_varbc_style(observations, true_values; max_iter=10)
            bias_estimate = 0.0
            
            for iter in 1:max_iter
                # Compute innovations (O - B)
                innovations = observations .- true_values .- bias_estimate
                
                # Simple bias update (would be more complex in real VarBC)
                bias_update = mean(innovations) * 0.1  # Conservative update
                bias_estimate += bias_update
                
                # Check convergence
                if abs(bias_update) < 0.01
                    break
                end
            end
            
            return bias_estimate
        end
        
        estimated_bias = estimate_bias_varbc_style(observed_bt, true_bt)
        bias_error = abs(estimated_bias - true_bias)
        
        @test bias_error < 0.5  # Should estimate within 0.5K
        @test abs(estimated_bias - true_bias) / true_bias < 0.2  # Within 20% relative error
        
        # Apply bias correction
        corrected_bt = observed_bt .- estimated_bias
        
        # Validate correction effectiveness
        original_rmse = sqrt(mean((observed_bt .- true_bt).^2))
        corrected_rmse = sqrt(mean((corrected_bt .- true_bt).^2))
        
        @test corrected_rmse < original_rmse  # Should improve RMSE
        @test corrected_rmse < observation_error * 1.5  # Should be close to observation error
        
        @info "Bias estimation: true=$(round(true_bias, digits=2))K, estimated=$(round(estimated_bias, digits=2))K, error=$(round(bias_error, digits=2))K"
        @info "RMSE: original=$(round(original_rmse, digits=2))K, corrected=$(round(corrected_rmse, digits=2))K"
    end
    
    @testset "Spatial Processing Effectiveness Validation" begin
        @info "Testing spatial processing effectiveness..."
        
        # Create spatially correlated observation field
        nx, ny = 20, 20
        x_coords = range(-10, 10, length=nx)
        y_coords = range(-10, 10, length=ny)
        
        # Generate correlated field using simple Gaussian correlation
        correlation_length = 3.0
        true_field = zeros(nx, ny)
        
        Random.seed!(12345)
        
        for i in 1:nx, j in 1:ny
            # Simple random field with spatial correlation
            true_field[i, j] = sin(x_coords[i] * 0.5) * cos(y_coords[j] * 0.3) + 0.5 * randn()
        end
        
        # Create dense observations from this field
        dense_obs = []
        for i in 1:2:nx, j in 1:2:ny  # Every other grid point
            obs = (
                x = x_coords[i],
                y = y_coords[j],
                value = true_field[i, j] + 0.2 * randn(),  # Add observation error
                error = 0.2,
                quality = 0.8 + 0.2 * rand()
            )
            push!(dense_obs, obs)
        end
        
        original_count = length(dense_obs)
        @test original_count > 0
        
        # Mock spatial thinning
        function thin_observations_spatially(obs_list, min_distance=2.0)
            thinned = []
            
            for obs in obs_list
                keep = true
                for kept_obs in thinned
                    distance = sqrt((obs.x - kept_obs.x)^2 + (obs.y - kept_obs.y)^2)
                    if distance < min_distance
                        # Keep higher quality observation
                        if obs.quality <= kept_obs.quality
                            keep = false
                            break
                        else
                            # Replace the kept observation with this higher quality one
                            filter!(o -> o != kept_obs, thinned)
                            break
                        end
                    end
                end
                
                if keep
                    push!(thinned, obs)
                end
            end
            
            return thinned
        end
        
        thinned_obs = thin_observations_spatially(dense_obs, 3.0)
        thinned_count = length(thinned_obs)
        
        @test thinned_count < original_count
        @test thinned_count > 0
        
        # Validate that thinning preserved spatial coverage
        original_x_range = maximum(obs.x for obs in dense_obs) - minimum(obs.x for obs in dense_obs)
        original_y_range = maximum(obs.y for obs in dense_obs) - minimum(obs.y for obs in dense_obs)
        
        thinned_x_range = maximum(obs.x for obs in thinned_obs) - minimum(obs.x for obs in thinned_obs)
        thinned_y_range = maximum(obs.y for obs in thinned_obs) - minimum(obs.y for obs in thinned_obs)
        
        # Should preserve most of the spatial coverage
        @test thinned_x_range > 0.7 * original_x_range
        @test thinned_y_range > 0.7 * original_y_range
        
        # Validate that average quality improved or stayed the same
        original_avg_quality = mean(obs.quality for obs in dense_obs)
        thinned_avg_quality = mean(obs.quality for obs in thinned_obs)
        
        @test thinned_avg_quality ≥ original_avg_quality * 0.95  # Should maintain or improve quality
        
        @info "Spatial thinning: $(original_count) → $(thinned_count) observations"
        @info "Quality: $(round(original_avg_quality, digits=3)) → $(round(thinned_avg_quality, digits=3))"
        @info "Coverage: X $(round(thinned_x_range/original_x_range*100, digits=1))%, Y $(round(thinned_y_range/original_y_range*100, digits=1))%"
    end
    
    @testset "Innovation Statistics Validation" begin
        @info "Testing innovation statistics..."
        
        # Create synthetic observation-background pairs
        n_obs = 200
        background_error = 1.5  # Background RMS error
        observation_error = 0.8  # Observation RMS error
        
        Random.seed!(54321)
        
        # True atmospheric state
        true_state = 285.0 .+ 10 * randn(n_obs)
        
        # Background estimates (with error)
        background = true_state .+ background_error * randn(n_obs)
        
        # Observations (with error)  
        observations = true_state .+ observation_error * randn(n_obs)
        
        # Compute innovations (O - B)
        innovations = observations .- background
        
        # Innovation statistics
        innovation_mean = mean(innovations)
        innovation_std = std(innovations)
        
        # Theoretical innovation standard deviation
        # σ²_innovation = σ²_observation + σ²_background
        theoretical_innovation_std = sqrt(observation_error^2 + background_error^2)
        
        # Validate innovation statistics
        @test abs(innovation_mean) < 0.3  # Should be nearly unbiased
        @test abs(innovation_std - theoretical_innovation_std) < 0.3  # Should match theory
        
        # Test innovation normalization
        normalized_innovations = innovations / innovation_std
        normalized_mean = mean(normalized_innovations)
        normalized_std = std(normalized_innovations)
        
        @test abs(normalized_mean) < 0.2  # Should be nearly zero mean
        @test abs(normalized_std - 1.0) < 0.2  # Should have unit variance
        
        @info "Innovation stats: mean=$(round(innovation_mean, digits=3)), std=$(round(innovation_std, digits=3))"
        @info "Theoretical std=$(round(theoretical_innovation_std, digits=3)), ratio=$(round(innovation_std/theoretical_innovation_std, digits=3))"
    end
    
    @testset "Quality Control Effectiveness Validation" begin
        @info "Testing quality control effectiveness..."
        
        # Create observations with known outliers
        n_good = 80
        n_outliers = 20
        
        Random.seed!(98765)
        
        # Good observations
        good_obs = []
        for i in 1:n_good
            obs = (
                value = 285.0 + 2.0 * randn(),  # Normal variability
                error = 0.5,
                background = 285.0 + 1.5 * randn(),
                is_good = true
            )
            push!(good_obs, obs)
        end
        
        # Outlier observations
        outlier_obs = []
        for i in 1:n_outliers
            obs = (
                value = 285.0 + (rand() > 0.5 ? 15.0 : -15.0) + randn(),  # Large errors
                error = 0.5,
                background = 285.0 + 1.5 * randn(),
                is_good = false
            )
            push!(outlier_obs, obs)
        end
        
        all_obs = vcat(good_obs, outlier_obs)
        shuffle!(all_obs)  # Randomize order
        
        # Mock quality control checks
        function apply_gross_check(obs_list, threshold_factor=3.0)
            qc_results = []
            
            for obs in obs_list
                innovation = obs.value - obs.background
                expected_std = sqrt(obs.error^2 + 2.0^2)  # Assume background error = 2.0
                
                gross_check_flag = abs(innovation) > threshold_factor * expected_std
                
                push!(qc_results, (
                    observation = obs,
                    innovation = innovation,
                    gross_check_failed = gross_check_flag,
                    qc_passed = !gross_check_flag
                ))
            end
            
            return qc_results
        end
        
        qc_results = apply_gross_check(all_obs, 3.0)
        
        # Count QC results
        passed_count = count(result -> result.qc_passed, qc_results)
        failed_count = length(qc_results) - passed_count
        
        # Analyze effectiveness
        true_positives = count(result -> !result.qc_passed && !result.observation.is_good, qc_results)
        false_positives = count(result -> !result.qc_passed && result.observation.is_good, qc_results)
        true_negatives = count(result -> result.qc_passed && result.observation.is_good, qc_results)
        false_negatives = count(result -> result.qc_passed && !result.observation.is_good, qc_results)
        
        # Calculate QC metrics
        sensitivity = true_positives / (true_positives + false_negatives)  # Recall
        specificity = true_negatives / (true_negatives + false_positives)
        precision = true_positives / (true_positives + false_positives)
        
        @test sensitivity > 0.6  # Should catch most outliers
        @test specificity > 0.8  # Should not reject too many good observations
        @test precision > 0.5   # Should have reasonable precision
        
        @info "QC Results: $(passed_count) passed, $(failed_count) failed"
        @info "QC Metrics: Sensitivity=$(round(sensitivity, digits=3)), Specificity=$(round(specificity, digits=3)), Precision=$(round(precision, digits=3))"
        @info "Confusion Matrix: TP=$true_positives, FP=$false_positives, TN=$true_negatives, FN=$false_negatives"
    end
    
    @testset "Algorithm Convergence Validation" begin
        @info "Testing algorithm convergence properties..."
        
        # Mock iterative minimization algorithm
        function mock_cost_function(x::Vector{Float64})
            # Simple quadratic cost function: J(x) = x'Ax + b'x + c
            A = [2.0 1.0; 1.0 3.0]  # Positive definite matrix
            b = [1.0, -2.0]
            c = 5.0
            
            return 0.5 * dot(x, A * x) + dot(b, x) + c
        end
        
        function mock_gradient(x::Vector{Float64})
            A = [2.0 1.0; 1.0 3.0]
            b = [1.0, -2.0]
            return A * x + b
        end
        
        # Minimize using simple gradient descent
        function gradient_descent(x0::Vector{Float64}; max_iter=100, tol=1e-6, step_size=0.1)
            x = copy(x0)
            costs = Float64[]
            gradients = Float64[]
            
            for iter in 1:max_iter
                cost = mock_cost_function(x)
                grad = mock_gradient(x)
                grad_norm = norm(grad)
                
                push!(costs, cost)
                push!(gradients, grad_norm)
                
                # Check convergence
                if grad_norm < tol
                    @info "Converged after $iter iterations"
                    return x, costs, gradients, true
                end
                
                # Update
                x = x - step_size * grad
            end
            
            @warn "Did not converge after $max_iter iterations"
            return x, costs, gradients, false
        end
        
        # Test convergence from different starting points
        starting_points = [[1.0, 1.0], [-2.0, 3.0], [5.0, -1.0]]
        
        for (i, x0) in enumerate(starting_points)
            x_opt, costs, grad_norms, converged = gradient_descent(x0, max_iter=50, tol=1e-4)
            
            @test converged
            @test length(costs) > 1
            @test costs[end] < costs[1]  # Should decrease cost
            @test grad_norms[end] < grad_norms[1]  # Should decrease gradient norm
            
            # Test monotonic decrease (mostly)
            non_decreasing_count = count(i -> costs[i+1] <= costs[i], 1:length(costs)-1)
            @test non_decreasing_count / (length(costs)-1) > 0.8  # Should be mostly decreasing
        end
        
        @info "Convergence test passed for $(length(starting_points)) starting points"
    end
end