#!/usr/bin/env julia

"""
    test_quality_control.jl

Comprehensive test suite for observation quality control algorithms in GSI.
Tests all QC methods including background checks, buddy checks, variational QC,
and observation error modeling.

Test Coverage:
- Background departure quality control
- Buddy check algorithms for spatial consistency
- Variational quality control with cost function analysis
- Observation error assignment and adaptive error inflation
- Cross-validation between different QC methods
- Performance testing for large observation datasets
"""

using Test
using GSICoreAnalysis
using GSICoreAnalysis.ObservationProcessing.QualityControl
using LinearAlgebra
using Statistics
using Random
using Dates

"""Test background departure quality control"""
@testset "Background Departure QC" begin
    println("  Testing background departure QC...")
    
    @testset "Simple Background Check" begin
        # Create test observation
        obs = ConventionalObservation{Float64}(
            obs_type = "temperature",
            value = 288.5,
            error = 1.5,
            latitude = 40.0,
            longitude = -100.0,
            time = DateTime(2024, 1, 1, 12, 0, 0)
        )
        
        # Test with good background value
        background_value = 287.8
        departure = obs.value - background_value
        normalized_departure = departure / obs.error
        
        @test abs(normalized_departure) < 3.0  # Should pass 3-sigma test
        
        qc_result = background_departure_check(obs, background_value)
        @test qc_result.passed == true
        @test qc_result.departure ≈ departure
        @test qc_result.normalized_departure ≈ normalized_departure
        
        # Test with bad background value (gross error)
        bad_background = 275.0  # 13.5 K difference
        bad_departure = obs.value - bad_background
        bad_normalized = bad_departure / obs.error
        
        @test abs(bad_normalized) > 5.0  # Should exceed gross error threshold
        
        bad_qc_result = background_departure_check(obs, bad_background)
        @test bad_qc_result.passed == false
        @test bad_qc_result.flag == QC_GROSS_ERROR
    end
    
    @testset "Observation Type Specific Thresholds" begin
        # Different observation types should have different QC thresholds
        
        # Surface pressure - typically has tight QC
        ps_obs = ConventionalObservation{Float64}(
            obs_type = "surface_pressure",
            value = 1013.25,
            error = 1.0,
            latitude = 40.0,
            longitude = -100.0,
            time = DateTime(2024, 1, 1, 12, 0, 0)
        )
        
        # Small departure should pass
        ps_background = 1012.0
        ps_qc = background_departure_check(ps_obs, ps_background)
        @test ps_qc.passed == true
        
        # Large departure should fail
        ps_bad_background = 1005.0  # 8.25 hPa difference
        ps_bad_qc = background_departure_check(ps_obs, ps_bad_background)
        @test ps_bad_qc.passed == false
        
        # Upper-air temperature - more lenient thresholds
        temp_obs = ConventionalObservation{Float64}(
            obs_type = "temperature",
            value = 245.0,
            error = 2.0,
            latitude = 40.0,
            longitude = -100.0,
            pressure = 300.0,  # Upper level
            time = DateTime(2024, 1, 1, 12, 0, 0)
        )
        
        temp_background = 248.0  # 3 K difference
        temp_qc = background_departure_check(temp_obs, temp_background)
        @test temp_qc.passed == true  # Should pass at upper levels
        
        # Wind observations - different error characteristics
        wind_obs = ConventionalObservation{Float64}(
            obs_type = "wind_speed",
            value = 15.0,
            error = 3.0,
            latitude = 40.0,
            longitude = -100.0,
            pressure = 850.0,
            time = DateTime(2024, 1, 1, 12, 0, 0)
        )
        
        wind_background = 18.5
        wind_qc = background_departure_check(wind_obs, wind_background)
        @test wind_qc.passed == true  # Within error bounds
    end
    
    @testset "Adaptive Error Inflation" begin
        # Test adaptive observation error inflation based on departures
        obs_list = ConventionalObservation{Float64}[]
        
        # Create ensemble of observations with varying departures
        for i in 1:20
            obs = ConventionalObservation{Float64}(
                obs_type = "temperature",
                value = 288.0 + randn() * 2.0,  # Random temperature
                error = 1.5,
                latitude = 40.0 + randn() * 0.1,
                longitude = -100.0 + randn() * 0.1,
                time = DateTime(2024, 1, 1, 12, 0, 0)
            )
            push!(obs_list, obs)
        end
        
        # Background field
        background_field = fill(287.5, length(obs_list))
        
        # Compute adaptive error inflation
        inflated_errors = adaptive_error_inflation(obs_list, background_field)
        
        @test length(inflated_errors) == length(obs_list)
        
        # All inflated errors should be >= original errors
        for (i, obs) in enumerate(obs_list)
            @test inflated_errors[i] >= obs.error
            @test inflated_errors[i] <= 3.0 * obs.error  # Reasonable upper bound
        end
        
        # Observations with larger departures should have larger inflation
        for (i, obs) in enumerate(obs_list)
            departure = abs(obs.value - background_field[i])
            if departure > 3.0
                @test inflated_errors[i] > obs.error * 1.2  # Some inflation
            end
        end
    end
end

"""Test buddy check quality control"""
@testset "Buddy Check QC" begin
    println("  Testing buddy check QC...")
    
    @testset "Basic Buddy Check Algorithm" begin
        # Create central observation
        central_obs = ConventionalObservation{Float64}(
            obs_type = "temperature",
            value = 288.0,
            error = 1.5,
            latitude = 40.0,
            longitude = -100.0,
            time = DateTime(2024, 1, 1, 12, 0, 0)
        )
        
        # Create surrounding "buddy" observations
        buddy_observations = ConventionalObservation{Float64}[]
        
        # Good buddies - consistent values
        for i in 1:6
            angle = 2π * i / 6  # Hexagonal pattern
            lat_offset = 0.1 * cos(angle)  # ~11 km spacing
            lon_offset = 0.1 * sin(angle)
            
            buddy_value = 288.0 + randn() * 0.5  # Small variations
            
            buddy = ConventionalObservation{Float64}(
                obs_type = "temperature",
                value = buddy_value,
                error = 1.5,
                latitude = 40.0 + lat_offset,
                longitude = -100.0 + lon_offset,
                time = DateTime(2024, 1, 1, 12, 0, 0)
            )
            
            push!(buddy_observations, buddy)
        end
        
        # Perform buddy check
        buddy_result = buddy_check(central_obs, buddy_observations, 50.0e3)
        
        @test buddy_result.passed == true
        @test buddy_result.n_buddies >= 5  # Should find most buddies
        @test abs(buddy_result.buddy_mean - 288.0) < 2.0
        @test buddy_result.buddy_std < 2.0  # Consistent buddies
        @test abs(buddy_result.departure) < 2.0 * buddy_result.buddy_std
    end
    
    @testset "Buddy Check with Outlier Detection" begin
        # Central observation that differs from surroundings
        outlier_obs = ConventionalObservation{Float64}(
            obs_type = "temperature",
            value = 295.0,  # 7K warmer than buddies
            error = 1.5,
            latitude = 40.0,
            longitude = -100.0,
            time = DateTime(2024, 1, 1, 12, 0, 0)
        )
        
        # Create consistent buddy observations
        buddy_observations = ConventionalObservation{Float64}[]
        
        for i in 1:8
            angle = 2π * i / 8
            lat_offset = 0.08 * cos(angle)
            lon_offset = 0.08 * sin(angle)
            
            buddy_value = 288.0 + randn() * 0.3  # Consistent around 288K
            
            buddy = ConventionalObservation{Float64}(
                obs_type = "temperature",
                value = buddy_value,
                error = 1.5,
                latitude = 40.0 + lat_offset,
                longitude = -100.0 + lon_offset,
                time = DateTime(2024, 1, 1, 12, 0, 0)
            )
            
            push!(buddy_observations, buddy)
        end
        
        # Perform buddy check - should detect outlier
        outlier_result = buddy_check(outlier_obs, buddy_observations, 40.0e3)
        
        @test outlier_result.passed == false
        @test outlier_result.flag == QC_BUDDY_FAILURE
        @test abs(outlier_result.departure) > 3.0 * outlier_result.buddy_std
        @test outlier_result.buddy_mean < 290.0  # Buddies are cooler
    end
    
    @testset "Sparse Observation Buddy Check" begin
        # Test buddy check when there are few nearby observations
        central_obs = ConventionalObservation{Float64}(
            obs_type = "surface_pressure",
            value = 1013.25,
            error = 2.0,
            latitude = 65.0,  # Arctic location
            longitude = -150.0,
            time = DateTime(2024, 1, 1, 12, 0, 0)
        )
        
        # Only one nearby observation
        sparse_buddies = [ConventionalObservation{Float64}(
            obs_type = "surface_pressure",
            value = 1015.0,
            error = 2.0,
            latitude = 64.8,
            longitude = -149.5,
            time = DateTime(2024, 1, 1, 12, 0, 0)
        )]
        
        # Should handle sparse case gracefully
        sparse_result = buddy_check(central_obs, sparse_buddies, 100.0e3)
        
        @test sparse_result.n_buddies == 1
        @test sparse_result.passed == true  # Single buddy, reasonable value
        @test sparse_result.confidence < 0.8  # Low confidence due to sparse data
        
        # Test with no buddies at all
        no_buddy_result = buddy_check(central_obs, ConventionalObservation{Float64}[], 100.0e3)
        
        @test no_buddy_result.n_buddies == 0
        @test no_buddy_result.passed == true  # Accept when no buddies available
        @test no_buddy_result.confidence < 0.5
    end
    
    @testset "Multi-Level Buddy Check" begin
        # Test buddy check for upper-air observations at multiple levels
        pressure_levels = [1000.0, 850.0, 700.0, 500.0, 300.0]
        temperatures = [288.0, 283.0, 278.0, 265.0, 245.0]
        
        # Central sounding
        central_profile = ConventionalObservation{Float64}[]
        for (p, t) in zip(pressure_levels, temperatures)
            obs = ConventionalObservation{Float64}(
                obs_type = "temperature",
                value = t,
                error = 1.0,
                latitude = 40.0,
                longitude = -100.0,
                pressure = p,
                time = DateTime(2024, 1, 1, 12, 0, 0)
            )
            push!(central_profile, obs)
        end
        
        # Nearby sounding profiles
        buddy_profiles = Vector{ConventionalObservation{Float64}}[]
        
        for i in 1:3  # Three nearby stations
            lat_offset = (i-2) * 0.5
            lon_offset = (i-2) * 0.5
            
            profile = ConventionalObservation{Float64}[]
            for (p, t) in zip(pressure_levels, temperatures)
                # Add some realistic profile variations
                t_variation = t + randn() * 0.8
                
                obs = ConventionalObservation{Float64}(
                    obs_type = "temperature",
                    value = t_variation,
                    error = 1.0,
                    latitude = 40.0 + lat_offset,
                    longitude = -100.0 + lon_offset,
                    pressure = p,
                    time = DateTime(2024, 1, 1, 12, 0, 0)
                )
                push!(profile, obs)
            end
            push!(buddy_profiles, profile)
        end
        
        # Perform multi-level buddy check
        profile_results = multilevel_buddy_check(central_profile, buddy_profiles, 200.0e3)
        
        @test length(profile_results) == length(pressure_levels)
        
        # Most levels should pass QC
        passed_count = sum(result.passed for result in profile_results)
        @test passed_count >= 4
        
        # Lower levels should have more buddies (surface networks denser)
        surface_result = profile_results[1]  # 1000 hPa
        upper_result = profile_results[end]   # 300 hPa
        
        @test surface_result.n_buddies >= upper_result.n_buddies
    end
end

"""Test variational quality control"""
@testset "Variational Quality Control" begin
    println("  Testing variational QC...")
    
    @testset "VarQC Cost Function Analysis" begin
        # Create test observation
        obs = ConventionalObservation{Float64}(
            obs_type = "temperature",
            value = 290.0,
            error = 1.5,
            latitude = 40.0,
            longitude = -100.0,
            time = DateTime(2024, 1, 1, 12, 0, 0)
        )
        
        # Background value and error
        background_value = 288.5
        background_error = 2.0
        
        # Test different observation weights in cost function
        weight_factors = [0.1, 0.5, 1.0, 2.0, 5.0]
        cost_contributions = Float64[]
        
        for w in weight_factors
            # Compute Jo contribution: 0.5 * w * (obs - Hx)^2 / R
            innovation = obs.value - background_value
            R_inv = 1.0 / (w * obs.error^2)
            Jo = 0.5 * innovation^2 * R_inv
            push!(cost_contributions, Jo)
        end
        
        # Cost should decrease as weight decreases (less trust in observation)
        @test issorted(reverse(cost_contributions))
        
        # Test VarQC weight adjustment
        varqc_result = variational_qc(obs, background_value, background_error)
        
        @test 0.0 <= varqc_result.final_weight <= 1.0
        @test varqc_result.passed == true  # Reasonable departure
        @test varqc_result.cost_reduction > 0.0
    end
    
    @testset "VarQC with Outlier Detection" begin
        # Create outlier observation
        outlier_obs = ConventionalObservation{Float64}(
            obs_type = "temperature",
            value = 310.0,  # Very warm
            error = 1.0,
            latitude = 40.0,
            longitude = -100.0,
            time = DateTime(2024, 1, 1, 12, 0, 0)
        )
        
        background_value = 285.0  # 25K difference
        background_error = 1.5
        
        # VarQC should heavily downweight this observation
        outlier_result = variational_qc(outlier_obs, background_value, background_error)
        
        @test outlier_result.final_weight < 0.1  # Heavily downweighted
        @test outlier_result.passed == false     # Should be flagged
        @test outlier_result.flag == QC_VARQC_OUTLIER
        
        # Test that cost function is reduced by downweighting
        original_cost = 0.5 * (outlier_obs.value - background_value)^2 / outlier_obs.error^2
        weighted_cost = outlier_result.final_weight * original_cost
        
        @test weighted_cost < 0.1 * original_cost
    end
    
    @testset "VarQC Convergence Properties" begin
        # Test that VarQC converges for various observation scenarios
        test_cases = [
            (obs_val=288.0, bg_val=287.5, obs_err=1.0, bg_err=1.5),  # Good obs
            (obs_val=295.0, bg_val=285.0, obs_err=2.0, bg_err=1.0),  # Large departure
            (obs_val=283.0, bg_val=284.0, obs_err=0.5, bg_err=3.0),  # Precise obs
            (obs_val=290.0, bg_val=290.2, obs_err=5.0, bg_err=0.8),  # Imprecise obs
        ]
        
        for (obs_val, bg_val, obs_err, bg_err) in test_cases
            obs = ConventionalObservation{Float64}(
                obs_type = "temperature",
                value = obs_val,
                error = obs_err,
                latitude = 40.0,
                longitude = -100.0,
                time = DateTime(2024, 1, 1, 12, 0, 0)
            )
            
            result = variational_qc(obs, bg_val, bg_err)
            
            # All cases should converge
            @test result.converged == true
            @test result.iterations <= 10  # Reasonable convergence
            @test 0.0 <= result.final_weight <= 1.0
            
            # Final analysis should be between observation and background
            if result.passed && result.final_weight > 0.1
                analysis = result.analysis_value
                @test min(obs_val, bg_val) <= analysis <= max(obs_val, bg_val)
            end
        end
    end
end

"""Test observation error modeling"""
@testset "Observation Error Modeling" begin
    println("  Testing observation error modeling...")
    
    @testset "Error Assignment by Observation Type" begin
        # Different observation types should have different error characteristics
        
        # Surface pressure - typically very accurate
        ps_error = assign_observation_error("surface_pressure", 1013.25, 
                                           latitude=40.0, elevation=100.0)
        @test 0.5 <= ps_error <= 2.5
        
        # Upper-air temperature - varies with level
        temp_error_surface = assign_observation_error("temperature", 288.0, 
                                                     pressure=1000.0)
        temp_error_upper = assign_observation_error("temperature", 220.0, 
                                                   pressure=200.0)
        @test temp_error_upper >= temp_error_surface  # Upper levels less accurate
        
        # Wind observations - typically larger errors
        wind_error = assign_observation_error("wind_speed", 15.0, 
                                             pressure=850.0)
        @test wind_error >= 1.0  # Wind errors typically > 1 m/s
        
        # Satellite radiances - depends on channel
        radiance_error_window = assign_observation_error("amsu_a", 245.0, 
                                                        channel=1)
        radiance_error_water = assign_observation_error("amsu_a", 220.0, 
                                                       channel=18)
        @test radiance_error_water > radiance_error_window  # Water vapor channels noisier
    end
    
    @testset "Adaptive Error Inflation" begin
        # Test error inflation based on observation density and consistency
        
        # Dense, consistent observations - low inflation
        dense_obs = ConventionalObservation{Float64}[]
        for i in 1:20
            obs = ConventionalObservation{Float64}(
                obs_type = "temperature",
                value = 288.0 + randn() * 0.5,  # Very consistent
                error = 1.0,
                latitude = 40.0 + (i-10) * 0.02,
                longitude = -100.0 + (i-10) * 0.02,
                time = DateTime(2024, 1, 1, 12, 0, 0)
            )
            push!(dense_obs, obs)
        end
        
        dense_inflation = compute_error_inflation(dense_obs, 50.0e3)
        @test 0.8 <= dense_inflation <= 1.2  # Minimal inflation
        
        # Sparse, inconsistent observations - higher inflation
        sparse_obs = ConventionalObservation{Float64}[]
        for i in 1:5
            obs = ConventionalObservation{Float64}(
                obs_type = "temperature", 
                value = 288.0 + randn() * 3.0,  # More variable
                error = 1.0,
                latitude = 40.0 + (i-3) * 1.0,  # Wider spacing
                longitude = -100.0 + (i-3) * 1.0,
                time = DateTime(2024, 1, 1, 12, 0, 0)
            )
            push!(sparse_obs, obs)
        end
        
        sparse_inflation = compute_error_inflation(sparse_obs, 50.0e3)
        @test sparse_inflation >= dense_inflation
        @test sparse_inflation <= 3.0  # Reasonable upper bound
    end
    
    @testset "Correlated Error Modeling" begin
        # Test handling of spatially/temporally correlated observation errors
        
        # Create clustered observations (correlated errors expected)
        cluster_center = (lat=45.0, lon=-110.0)
        clustered_obs = ConventionalObservation{Float64}[]
        
        for i in 1:15
            # Tight spatial clustering
            lat_offset = randn() * 0.05  # ~5 km spread
            lon_offset = randn() * 0.05
            
            # Correlated measurement bias
            bias = 0.8  # All observations have same bias
            obs_value = 287.0 + bias + randn() * 0.3
            
            obs = ConventionalObservation{Float64}(
                obs_type = "temperature",
                value = obs_value,
                error = 1.0,
                latitude = cluster_center.lat + lat_offset,
                longitude = cluster_center.lon + lon_offset,
                time = DateTime(2024, 1, 1, 12, 0, 0)
            )
            push!(clustered_obs, obs)
        end
        
        # Compute error correlation matrix
        error_correlation = compute_error_correlations(clustered_obs, 20.0e3)
        
        @test size(error_correlation) == (length(clustered_obs), length(clustered_obs))
        
        # Diagonal should be 1.0
        for i in 1:length(clustered_obs)
            @test error_correlation[i, i] ≈ 1.0
        end
        
        # Nearby observations should have high correlation
        avg_correlation = mean(error_correlation[error_correlation .< 0.99])
        @test avg_correlation > 0.3  # Significant correlation expected
        
        # Test that correlated errors are properly inflated
        inflated_errors = apply_correlation_inflation(clustered_obs, error_correlation)
        
        for (i, obs) in enumerate(clustered_obs)
            @test inflated_errors[i] >= obs.error
        end
        
        # Average inflation should be significant for correlated errors
        avg_inflation = mean(inflated_errors ./ [obs.error for obs in clustered_obs])
        @test avg_inflation > 1.2
    end
end

"""Test cross-validation between QC methods"""
@testset "QC Method Cross-Validation" begin
    println("  Testing QC method cross-validation...")
    
    @testset "Consistency Between QC Methods" begin
        # Create test dataset with known issues
        mixed_obs = ConventionalObservation{Float64}[]
        background_values = Float64[]
        
        # Good observations
        for i in 1:10
            obs = ConventionalObservation{Float64}(
                obs_type = "temperature",
                value = 288.0 + randn() * 0.8,
                error = 1.2,
                latitude = 40.0 + randn() * 0.1,
                longitude = -100.0 + randn() * 0.1,
                time = DateTime(2024, 1, 1, 12, 0, 0)
            )
            push!(mixed_obs, obs)
            push!(background_values, obs.value + randn() * 0.5)  # Good background
        end
        
        # Add some outliers
        for i in 1:3
            outlier = ConventionalObservation{Float64}(
                obs_type = "temperature",
                value = 298.0 + randn() * 1.0,  # Warm outliers
                error = 1.2,
                latitude = 40.0 + randn() * 0.1,
                longitude = -100.0 + randn() * 0.1,
                time = DateTime(2024, 1, 1, 12, 0, 0)
            )
            push!(mixed_obs, outlier)
            push!(background_values, 287.0 + randn() * 0.5)  # Background disagrees
        end
        
        # Apply different QC methods
        bg_results = [background_departure_check(obs, bg) 
                     for (obs, bg) in zip(mixed_obs, background_values)]
        
        buddy_results = [buddy_check(obs, mixed_obs, 50.0e3) 
                        for obs in mixed_obs]
        
        varqc_results = [variational_qc(obs, bg, 1.5) 
                        for (obs, bg) in zip(mixed_obs, background_values)]
        
        # Count rejections by each method
        bg_rejections = sum(.!result.passed for result in bg_results)
        buddy_rejections = sum(.!result.passed for result in buddy_results)
        varqc_rejections = sum(.!result.passed for result in varqc_results)
        
        @test bg_rejections >= 2  # Should catch outliers
        @test buddy_rejections >= 1  # Buddy check should find some issues
        @test varqc_rejections >= 2  # VarQC should downweight outliers
        
        # Methods should show some consistency for obvious outliers
        consensus_rejections = 0
        for i in 1:length(mixed_obs)
            bg_fail = !bg_results[i].passed
            buddy_fail = !buddy_results[i].passed
            varqc_fail = !varqc_results[i].passed
            
            if bg_fail && (buddy_fail || varqc_fail)
                consensus_rejections += 1
            end
        end
        
        @test consensus_rejections >= 1  # At least some consensus
    end
    
    @testset "QC Performance Metrics" begin
        # Create controlled test case with known "truth"
        n_good = 50
        n_bad = 10
        
        # Generate good observations
        good_obs = ConventionalObservation{Float64}[]
        good_backgrounds = Float64[]
        
        for i in 1:n_good
            true_temp = 288.0 + randn() * 1.0
            obs_error = 1.0
            obs_noise = randn() * obs_error
            bg_error = 1.5
            bg_noise = randn() * bg_error
            
            obs = ConventionalObservation{Float64}(
                obs_type = "temperature",
                value = true_temp + obs_noise,
                error = obs_error,
                latitude = 40.0 + randn() * 2.0,
                longitude = -100.0 + randn() * 2.0,
                time = DateTime(2024, 1, 1, 12, 0, 0)
            )
            push!(good_obs, obs)
            push!(good_backgrounds, true_temp + bg_noise)
        end
        
        # Generate bad observations (gross errors)
        bad_obs = ConventionalObservation{Float64}[]
        bad_backgrounds = Float64[]
        
        for i in 1:n_bad
            true_temp = 288.0
            gross_error = 15.0 * (rand() > 0.5 ? 1.0 : -1.0)  # ±15K error
            
            obs = ConventionalObservation{Float64}(
                obs_type = "temperature",
                value = true_temp + gross_error,
                error = 1.0,
                latitude = 40.0 + randn() * 2.0,
                longitude = -100.0 + randn() * 2.0,
                time = DateTime(2024, 1, 1, 12, 0, 0)
            )
            push!(bad_obs, obs)
            push!(bad_backgrounds, true_temp + randn() * 1.5)
        end
        
        # Combine datasets
        all_obs = vcat(good_obs, bad_obs)
        all_backgrounds = vcat(good_backgrounds, bad_backgrounds)
        truth_labels = vcat(fill(true, n_good), fill(false, n_bad))  # true = good
        
        # Apply background check QC
        bg_qc_results = [background_departure_check(obs, bg) 
                        for (obs, bg) in zip(all_obs, all_backgrounds)]
        bg_predictions = [result.passed for result in bg_qc_results]
        
        # Compute performance metrics
        bg_metrics = compute_qc_metrics(truth_labels, bg_predictions)
        
        @test bg_metrics.true_positive_rate > 0.8  # Should catch most good obs
        @test bg_metrics.true_negative_rate > 0.6  # Should catch most bad obs
        @test bg_metrics.false_positive_rate < 0.3  # Low false alarm rate
        @test bg_metrics.accuracy > 0.75           # Overall accuracy
        
        # VarQC should perform at least as well
        varqc_results = [variational_qc(obs, bg, 1.5) 
                        for (obs, bg) in zip(all_obs, all_backgrounds)]
        varqc_predictions = [result.passed for result in varqc_results]
        
        varqc_metrics = compute_qc_metrics(truth_labels, varqc_predictions)
        
        @test varqc_metrics.accuracy >= bg_metrics.accuracy - 0.1  # Comparable or better
    end
end

"""Test performance with large datasets"""
@testset "QC Performance Testing" begin
    println("  Testing QC performance with large datasets...")
    
    @testset "Large Dataset Processing" begin
        # Create large observation dataset
        n_obs = 5000
        large_obs_set = ConventionalObservation{Float64}[]
        background_field = Float64[]
        
        for i in 1:n_obs
            # Realistic global distribution
            lat = -90.0 + 180.0 * rand()
            lon = -180.0 + 360.0 * rand()
            
            # Temperature varies with latitude
            temp_base = 288.0 - 30.0 * abs(lat/90.0) + randn() * 3.0
            bg_value = temp_base + randn() * 2.0
            
            obs = ConventionalObservation{Float64}(
                obs_type = "temperature",
                value = temp_base + randn() * 1.5,
                error = 1.0 + 0.5 * rand(),
                latitude = lat,
                longitude = lon,
                time = DateTime(2024, 1, 1, 12, 0, 0)
            )
            
            push!(large_obs_set, obs)
            push!(background_field, bg_value)
        end
        
        # Time background departure QC
        println("    Timing background departure QC for $(n_obs) observations...")
        bg_time = @elapsed begin
            bg_results = [background_departure_check(obs, bg) 
                         for (obs, bg) in zip(large_obs_set, background_field)]
        end
        
        @test length(bg_results) == n_obs
        @test bg_time < 5.0  # Should complete within 5 seconds
        println("    Background QC time: $(bg_time:.2f) seconds")
        
        # Time buddy check (more expensive)
        sample_size = min(500, n_obs)  # Sample for buddy check due to O(n²) complexity
        sample_obs = large_obs_set[1:sample_size]
        
        println("    Timing buddy check QC for $(sample_size) observations...")
        buddy_time = @elapsed begin
            buddy_results = [buddy_check(obs, sample_obs, 100.0e3) 
                           for obs in sample_obs[1:min(50, sample_size)]]
        end
        
        @test length(buddy_results) <= 50
        @test buddy_time < 10.0  # Should be reasonably fast
        println("    Buddy check time: $(buddy_time:.2f) seconds for 50 observations")
        
        # Check that QC results are reasonable
        passed_count = sum(result.passed for result in bg_results)
        rejection_rate = 1.0 - passed_count / n_obs
        
        @test 0.02 <= rejection_rate <= 0.15  # Typical rejection rates
    end
    
    @testset "Memory Usage Testing" begin
        # Test that QC operations don't create excessive memory usage
        
        initial_memory = Base.gc_live_bytes()
        
        # Process observations in batches
        batch_size = 1000
        total_processed = 0
        
        for batch in 1:5
            batch_obs = ConventionalObservation{Float64}[]
            
            # Create batch
            for i in 1:batch_size
                obs = ConventionalObservation{Float64}(
                    obs_type = "temperature",
                    value = 288.0 + randn() * 5.0,
                    error = 1.0 + rand() * 1.0,
                    latitude = -60.0 + 120.0 * rand(),
                    longitude = -180.0 + 360.0 * rand(),
                    time = DateTime(2024, 1, 1, 12, 0, 0)
                )
                push!(batch_obs, obs)
            end
            
            # Process batch
            batch_results = [background_departure_check(obs, 287.0 + randn() * 2.0) 
                           for obs in batch_obs]
            
            total_processed += length(batch_results)
            
            # Force garbage collection
            GC.gc()
        end
        
        final_memory = Base.gc_live_bytes()
        memory_growth = final_memory - initial_memory
        
        @test total_processed == 5 * batch_size
        
        # Memory growth should be reasonable (less than 100 MB)
        @test memory_growth < 100 * 1024 * 1024
    end
end

"""Helper functions for QC testing"""

struct QCResult{T<:Real}
    passed::Bool
    flag::Int
    departure::T
    normalized_departure::T
    confidence::T
    
    QCResult{T}(passed, flag, departure, normalized_departure, confidence=1.0) where T = 
        new{T}(passed, flag, departure, normalized_departure, confidence)
end

struct BuddyResult{T<:Real}
    passed::Bool
    flag::Int
    n_buddies::Int
    buddy_mean::T
    buddy_std::T
    departure::T
    confidence::T
    
    BuddyResult{T}(passed, flag, n_buddies, buddy_mean, buddy_std, departure, confidence) where T =
        new{T}(passed, flag, n_buddies, buddy_mean, buddy_std, departure, confidence)
end

struct VarQCResult{T<:Real}
    passed::Bool
    flag::Int
    final_weight::T
    analysis_value::T
    cost_reduction::T
    converged::Bool
    iterations::Int
    
    VarQCResult{T}(passed, flag, final_weight, analysis_value, cost_reduction, 
                  converged, iterations) where T =
        new{T}(passed, flag, final_weight, analysis_value, cost_reduction, 
                converged, iterations)
end

struct QCMetrics{T<:Real}
    true_positive_rate::T
    true_negative_rate::T
    false_positive_rate::T
    false_negative_rate::T
    accuracy::T
    precision::T
    recall::T
    f1_score::T
end

# QC flag constants
const QC_GOOD = 0
const QC_SUSPECT = 1
const QC_GROSS_ERROR = 2
const QC_BUDDY_FAILURE = 3
const QC_VARQC_OUTLIER = 4

println("  ✓ All quality control tests completed successfully")