"""
    test_spatial_processing.jl

Comprehensive test suite for the SpatialProcessing module.
Tests all major functionality including spatial thinning, super-observation creation,
and spatial indexing algorithms.
"""

using Test
using Random
using Dates
using LinearAlgebra
using Statistics

# Include the module (adjust path as needed)
include("SpatialProcessing.jl")
using .SpatialProcessing

# Mock observation types for testing
struct MockObservationLocation{T}
    longitude::T
    latitude::T
    pressure::T
end

struct MockQualityMetrics{T}
    overall_quality::T
    background_check::T
    spatial_consistency::T
    temporal_consistency::T
    instrument_quality::T
end

struct MockObservationMetadata
    station_id::String
    instrument_type::String
end

struct MockQualityFlags
    gross_error::Bool
    buddy_check::Bool
    background_check::Bool
end

struct MockObservation{T} <: AbstractObservation{T}
    observation_type::String
    location::MockObservationLocation{T}
    time::DateTime
    value::T
    error::T
    quality_metrics::MockQualityMetrics{T}
    metadata::MockObservationMetadata
    qc_flags::MockQualityFlags
end

# Test data generation functions
function generate_test_observations(n::Int = 100; T::Type = Float64)
    observations = MockObservation{T}[]
    
    Random.seed!(1234)
    base_time = DateTime(2023, 1, 1, 12, 0, 0)
    
    for i in 1:n
        # Random locations within a domain
        lon = -180.0 + 360.0 * rand(T)
        lat = -90.0 + 180.0 * rand(T)
        pressure = 1000.0 + rand(T) * 100.0
        
        location = MockObservationLocation{T}(lon, lat, pressure)
        
        # Random time within 6 hours
        time_offset = Dates.Minute(rand(0:360))
        obs_time = base_time + time_offset
        
        # Random observation value and error
        value = 15.0 + 10.0 * randn(T)
        error = 0.5 + 0.5 * rand(T)
        
        # Random quality metrics
        overall_quality = rand(T)
        quality_metrics = MockQualityMetrics{T}(
            overall_quality,
            rand(T), rand(T), rand(T), rand(T)
        )
        
        # Mock metadata
        metadata = MockObservationMetadata("STATION_$i", "TEST_INSTRUMENT")
        qc_flags = MockQualityFlags(false, false, false)
        
        obs = MockObservation{T}(
            "test_observation", location, obs_time, value, error,
            quality_metrics, metadata, qc_flags
        )
        
        push!(observations, obs)
    end
    
    return observations
end

function generate_clustered_observations(n::Int = 50; T::Type = Float64)
    """Generate observations clustered around specific locations for super-obs testing"""
    observations = MockObservation{T}[]
    
    Random.seed!(5678)
    base_time = DateTime(2023, 1, 1, 12, 0, 0)
    
    # Define cluster centers
    centers = [(-105.0, 40.0), (-95.0, 35.0), (-85.0, 42.0)]
    
    for center in centers
        center_lon, center_lat = center
        
        # Generate observations around each center
        for i in 1:(n÷3)
            # Small random offset from center
            lon = center_lon + 0.5 * randn(T)
            lat = center_lat + 0.5 * randn(T)
            pressure = 1000.0 + rand(T) * 50.0
            
            location = MockObservationLocation{T}(lon, lat, pressure)
            time_offset = Dates.Minute(rand(0:60))
            obs_time = base_time + time_offset
            
            value = 20.0 + 5.0 * randn(T)
            error = 0.3 + 0.2 * rand(T)
            
            quality_metrics = MockQualityMetrics{T}(
                0.8 + 0.2 * rand(T),  # High quality
                rand(T), rand(T), rand(T), rand(T)
            )
            
            metadata = MockObservationMetadata("CLUSTER_$(length(observations)+1)", "CLUSTER_INSTRUMENT")
            qc_flags = MockQualityFlags(false, false, false)
            
            obs = MockObservation{T}(
                "clustered_observation", location, obs_time, value, error,
                quality_metrics, metadata, qc_flags
            )
            
            push!(observations, obs)
        end
    end
    
    return observations
end

# Test suite
@testset "SpatialProcessing Tests" begin
    
    @testset "Basic Functionality" begin
        # Test data generation
        obs = generate_test_observations(20)
        @test length(obs) == 20
        @test all(o -> isa(o, MockObservation{Float64}), obs)
    end
    
    @testset "Spatial Indexing" begin
        obs = generate_test_observations(50)
        
        # Test KD-Tree indexing
        index = build_spatial_index(obs, KD_TREE)
        @test index.method == KD_TREE
        @test index.tree !== nothing
        @test length(index.observations) == 50
        
        # Test spatial search
        center = (0.0, 0.0)
        radius = 100.0
        nearby_indices = spatial_search(index, center, radius)
        @test isa(nearby_indices, Vector{Int})
        @test all(i -> 1 <= i <= length(obs), nearby_indices)
        
        # Test range query
        bounds = (-10.0, 10.0, -10.0, 10.0)
        region_indices = range_query(index, bounds)
        @test isa(region_indices, Vector{Int})
        
        # Test grid-hash indexing
        grid_index = build_spatial_index(obs, GRID_HASH, (5.0, 5.0))
        @test grid_index.method == GRID_HASH
        @test !isempty(grid_index.grid_hash)
    end
    
    @testset "Spatial Thinning" begin
        obs = generate_test_observations(100)
        
        # Test spatial grid thinning
        config = ThinningConfig{Float64}(
            method = SPATIAL_GRID,
            grid_spacing = (50.0, 50.0),
            quality_threshold = 0.3,
            max_observations = 50
        )
        
        thinned_obs, indices = spatial_thinning(obs, config)
        @test length(thinned_obs) <= length(obs)
        @test length(thinned_obs) <= config.max_observations
        @test all(i -> 1 <= i <= length(obs), indices)
        @test length(thinned_obs) == length(indices)
        
        # Test quality priority thinning
        quality_config = ThinningConfig{Float64}(
            method = QUALITY_PRIORITY,
            grid_spacing = (100.0, 100.0),
            quality_threshold = 0.5,
            max_observations = 30
        )
        
        quality_thinned, quality_indices = spatial_thinning(obs, quality_config)
        @test length(quality_thinned) <= 30
        @test all(o -> o.quality_metrics.overall_quality >= 0.5, quality_thinned)
        
        # Test random statistical thinning
        random_config = ThinningConfig{Float64}(
            method = RANDOM_STATISTICAL,
            max_observations = 25,
            preserve_distribution = true,
            random_seed = 9999
        )
        
        random_thinned, _ = spatial_thinning(obs, random_config)
        @test length(random_thinned) <= 25
    end
    
    @testset "Temporal Thinning" begin
        obs = generate_test_observations(60)
        
        config = ThinningConfig{Float64}(
            temporal_interval = 2.0,  # 2 hours
            quality_threshold = 0.0,
            max_observations = 20
        )
        
        temporal_thinned, indices = temporal_thinning(obs, config)
        @test length(temporal_thinned) <= length(obs)
        @test length(temporal_thinned) <= config.max_observations
    end
    
    @testset "Super-Observation Creation" begin
        clustered_obs = generate_clustered_observations(60)
        
        # Test simple average super-observations
        config = SuperObservationConfig{Float64}(
            method = SIMPLE_AVERAGE,
            radius = 100.0,  # Large radius to capture clusters
            min_observations = 3,
            max_observations = 15
        )
        
        super_obs, constituent_map = create_super_observations(clustered_obs, config)
        @test length(super_obs) >= 1
        @test length(constituent_map) == length(super_obs)
        
        # Verify each super-observation has minimum constituents
        for (idx, constituents) in constituent_map
            @test length(constituents) >= config.min_observations
            @test length(constituents) <= config.max_observations
        end
        
        # Test quality-weighted super-observations
        quality_config = SuperObservationConfig{Float64}(
            method = QUALITY_WEIGHTED,
            radius = 50.0,
            min_observations = 2,
            quality_weighting = true,
            error_weighting = false
        )
        
        quality_super_obs, _ = create_super_observations(clustered_obs, quality_config)
        @test length(quality_super_obs) >= 1
        
        # Test error-weighted super-observations
        error_config = SuperObservationConfig{Float64}(
            method = ERROR_WEIGHTED,
            radius = 75.0,
            min_observations = 3,
            error_weighting = true,
            error_inflation = 1.2
        )
        
        error_super_obs, _ = create_super_observations(clustered_obs, error_config)
        @test length(error_super_obs) >= 1
    end
    
    @testset "Observation Distribution Optimization" begin
        obs = generate_test_observations(200)
        
        target_count = 50
        quality_weight = 0.7
        spatial_weight = 0.3
        
        optimized_obs, stats = optimize_observation_distribution(
            obs, target_count, quality_weight, spatial_weight
        )
        
        @test length(optimized_obs) == target_count
        @test stats["initial_count"] == length(obs)
        @test stats["final_count"] == target_count
        @test stats["reduction_ratio"] == target_count / length(obs)
        @test haskey(stats, "average_quality")
        @test haskey(stats, "spatial_coverage")
    end
    
    @testset "Processing Pipeline" begin
        obs = generate_test_observations(100)
        
        # Create simple pipeline
        stages = [spatial_thinning]
        configs = [ThinningConfig{Float64}(
            method = SPATIAL_GRID,
            grid_spacing = (100.0, 100.0),
            max_observations = 30
        )]
        
        pipeline = ProcessingPipeline{Float64}(stages, configs, validation=true, parallel=false)
        
        processed_obs, stats = process_observations(obs, pipeline)
        
        @test length(processed_obs) <= 30
        @test stats["initial_count"] == length(obs)
        @test stats["final_count"] == length(processed_obs)
        @test length(stats["stage_results"]) == 1
        @test stats["stage_results"][1]["success"] == true
    end
    
    @testset "Validation Functions" begin
        original_obs = generate_test_observations(100)
        processed_obs = generate_test_observations(50)
        
        validation_results = validate_spatial_processing(processed_obs, original_obs)
        
        @test haskey(validation_results, "valid")
        @test haskey(validation_results, "warnings")
        @test haskey(validation_results, "errors")
        @test haskey(validation_results, "reduction_ratio")
        
        # Test empty processed observations
        empty_validation = validate_spatial_processing(MockObservation{Float64}[], original_obs)
        @test empty_validation["valid"] == false
        @test "Processed observation set is empty" in empty_validation["errors"]
    end
    
    @testset "Utility Functions" begin
        # Test haversine distance
        coord1 = (0.0, 0.0)
        coord2 = (1.0, 1.0)
        dist = haversine_distance(coord1, coord2)
        @test dist > 0.0
        @test isa(dist, Float64)
        
        # Test distance between same points
        same_dist = haversine_distance(coord1, coord1)
        @test same_dist ≈ 0.0 atol=1e-10
        
        # Test observation density calculation
        obs = generate_test_observations(20)
        density = calculate_observation_density(obs, (10.0, 10.0))
        @test isa(density, Dict{Tuple{Int,Int},Int})
        
        # Test spatial coverage calculation
        coverage = calculate_spatial_coverage(obs)
        @test haskey(coverage, "longitude_range")
        @test haskey(coverage, "latitude_range")
        @test haskey(coverage, "total_area")
        @test haskey(coverage, "average_spacing_km")
        @test haskey(coverage, "observation_density")
    end
    
    @testset "Edge Cases" begin
        # Test empty observation list
        empty_obs = MockObservation{Float64}[]
        
        # Empty spatial thinning
        config = ThinningConfig{Float64}()
        thinned_empty, indices_empty = spatial_thinning(empty_obs, config)
        @test isempty(thinned_empty)
        @test isempty(indices_empty)
        
        # Empty super-observation creation
        super_config = SuperObservationConfig{Float64}()
        super_empty, map_empty = create_super_observations(empty_obs, super_config)
        @test isempty(super_empty)
        @test isempty(map_empty)
        
        # Single observation
        single_obs = generate_test_observations(1)
        single_index = build_spatial_index(single_obs)
        @test length(single_index.observations) == 1
        
        # Search with single observation
        search_results = spatial_search(single_index, (0.0, 0.0), 1000.0)
        @test length(search_results) <= 1
    end
    
    @testset "Configuration Validation" begin
        # Test default configurations
        default_thinning = ThinningConfig{Float64}()
        @test default_thinning.method == SPATIAL_GRID
        @test default_thinning.grid_spacing == (50.0, 50.0)
        
        default_super = SuperObservationConfig{Float64}()
        @test default_super.method == QUALITY_WEIGHTED
        @test default_super.radius == 25.0
        @test default_super.min_observations == 2
        
        # Test custom configurations
        custom_thinning = ThinningConfig{Float64}(
            method = ADAPTIVE_DENSITY,
            grid_spacing = (200.0, 200.0),
            quality_threshold = 0.8,
            max_observations = 1000,
            adaptive_params = Dict("density_threshold" => 10.0)
        )
        @test custom_thinning.method == ADAPTIVE_DENSITY
        @test custom_thinning.adaptive_params["density_threshold"] == 10.0
    end
end

println("All SpatialProcessing tests completed successfully!")