#!/usr/bin/env julia

"""
    test_data_formats_comprehensive.jl

Comprehensive test suite for data format processing components in GSI.
Tests BUFR/PrepBUFR decoding, observation data parsing, and format validation.

Test Coverage:
- BUFR file parsing and decoding
- PrepBUFR observation extraction  
- Data format validation and error handling
- Memory management and performance
- Integration with observation type system
- Edge cases and error conditions
"""

using Test
using GSICoreAnalysis
using Dates
using Statistics

# Import test utilities
include("test_data/mock_data_generator.jl")
using .MockDataGenerator

"""Test basic data format functionality"""
@testset "Data Format Basic Operations" begin
    println("  Testing data format basic operations...")
    
    @testset "Mock BUFR-like Data Processing" begin
        # Create mock BUFR-like data structure
        mock_bufr_data = Dict{String, Any}(
            "header" => Dict(
                "message_type" => "ADPUPA",  # Radiosonde
                "subtype" => 120,
                "station_id" => "72305",
                "observation_time" => DateTime(2024, 1, 1, 12, 0, 0),
                "latitude" => 40.77,
                "longitude" => -73.88,
                "elevation" => 57.0
            ),
            "observations" => [
                Dict("pressure" => 100000.0, "temperature" => 287.2, "humidity" => 0.012, "wind_u" => 5.2, "wind_v" => -2.1),
                Dict("pressure" => 85000.0, "temperature" => 279.8, "humidity" => 0.008, "wind_u" => 8.5, "wind_v" => -1.8),
                Dict("pressure" => 70000.0, "temperature" => 268.1, "humidity" => 0.004, "wind_u" => 12.3, "wind_v" => -0.5)
            ]
        )
        
        # Test data parsing
        @test haskey(mock_bufr_data, "header")
        @test haskey(mock_bufr_data, "observations")
        @test mock_bufr_data["header"]["message_type"] == "ADPUPA"
        @test length(mock_bufr_data["observations"]) == 3
        
        # Test observation extraction
        for obs in mock_bufr_data["observations"]
            @test haskey(obs, "pressure")
            @test haskey(obs, "temperature")
            @test obs["pressure"] > 0.0
            @test obs["temperature"] > 0.0
        end
    end
    
    @testset "Mock PrepBUFR-like Data Processing" begin
        # Create mock PrepBUFR-like data structure
        mock_prepbufr_data = Dict{String, Any}(
            "header" => Dict(
                "message_type" => "ADPSFC",  # Surface observation
                "station_id" => "KNYC",
                "observation_time" => DateTime(2024, 1, 1, 15, 0, 0),
                "latitude" => 40.78,
                "longitude" => -73.97,
                "elevation" => 10.0
            ),
            "observations" => Dict(
                "surface_pressure" => (101325.0, 2.0, 0),  # (value, error, qc_flag)
                "temperature" => (288.15, 1.5, 0),
                "dewpoint" => (280.0, 2.0, 0),
                "wind_u" => (3.2, 1.0, 0),
                "wind_v" => (-1.8, 1.0, 0)
            ),
            "quality_marks" => Dict{String, Int}(
                "gross_error" => 0,
                "data_level" => 1,
                "usage_flag" => 1
            )
        )
        
        # Test PrepBUFR structure
        @test haskey(mock_prepbufr_data, "header")
        @test haskey(mock_prepbufr_data, "observations")
        @test haskey(mock_prepbufr_data, "quality_marks")
        
        # Test observation data with error estimates
        for (var, data) in mock_prepbufr_data["observations"]
            value, error, qc = data
            @test value isa Real
            @test error > 0.0
            @test qc >= 0
        end
        
        # Test quality marks
        @test mock_prepbufr_data["quality_marks"]["gross_error"] == 0
        @test mock_prepbufr_data["quality_marks"]["usage_flag"] == 1
    end
    
    @testset "Mock Data Generator Integration" begin
        # Test integration with mock data generator
        config = MockConfig{Float64}(
            n_surface = 100,
            n_radiosonde = 20,
            n_aircraft = 50,
            random_seed = 123
        )
        
        mock_observations = generate_mock_observations(config)
        
        # Test that all observation types were generated
        @test haskey(mock_observations, "surface")
        @test haskey(mock_observations, "radiosonde")
        @test haskey(mock_observations, "aircraft")
        @test haskey(mock_observations, "satellite_ir")
        @test haskey(mock_observations, "gps_ro")
        
        # Test surface observations
        @test length(mock_observations["surface"]) == config.n_surface
        for obs in mock_observations["surface"][1:5]  # Check first 5
            @test obs["obs_type"] == "surface"
            @test haskey(obs, "variables")
            @test haskey(obs, "errors")
            @test haskey(obs, "location")
            @test haskey(obs, "time")
        end
        
        # Test radiosonde observations
        @test length(mock_observations["radiosonde"]) == config.n_radiosonde
        for obs in mock_observations["radiosonde"][1:3]  # Check first 3
            @test obs["obs_type"] == "radiosonde"
            @test haskey(obs, "profile")
            @test length(obs["profile"]) > 0
        end
    end
end

"""Test data validation and error checking"""
@testset "Data Validation" begin
    println("  Testing data validation...")
    
    @testset "Physical Bounds Checking" begin
        # Test temperature bounds
        @test is_valid_temperature(288.15)  # Valid temperature
        @test is_valid_temperature(173.15)  # Very cold but valid
        @test is_valid_temperature(333.15)  # Hot but valid
        @test !is_valid_temperature(100.0)  # Too cold
        @test !is_valid_temperature(400.0)  # Too hot
        
        # Test pressure bounds  
        @test is_valid_pressure(101325.0)  # Sea level pressure
        @test is_valid_pressure(30000.0)   # High altitude
        @test !is_valid_pressure(150000.0) # Too high
        @test !is_valid_pressure(-1000.0)  # Negative pressure
        
        # Test coordinate bounds
        @test is_valid_latitude(45.0)
        @test is_valid_latitude(-89.9)
        @test is_valid_latitude(90.0)
        @test !is_valid_latitude(95.0)
        @test !is_valid_latitude(-100.0)
        
        @test is_valid_longitude(180.0)
        @test is_valid_longitude(-179.9)
        @test is_valid_longitude(0.0)
        @test !is_valid_longitude(190.0)
        @test !is_valid_longitude(-185.0)
        
        # Test humidity bounds
        @test is_valid_specific_humidity(0.020)  # 20 g/kg
        @test is_valid_specific_humidity(0.001)  # Dry
        @test !is_valid_specific_humidity(-0.001) # Negative
        @test !is_valid_specific_humidity(0.100)  # Too high (100 g/kg)
    end
    
    @testset "Data Consistency Checking" begin
        # Create test observation with consistent data
        consistent_obs = Dict(
            "temperature" => 288.15,  # 15°C
            "dewpoint" => 278.15,     # 5°C (consistent with temp)
            "surface_pressure" => 101325.0,
            "elevation" => 100.0
        )
        
        @test is_consistent_temperature_dewpoint(
            consistent_obs["temperature"], 
            consistent_obs["dewpoint"]
        )
        
        # Test inconsistent temperature and dewpoint
        @test !is_consistent_temperature_dewpoint(278.15, 288.15)  # Dewpoint > temp
        
        # Test pressure-elevation consistency
        expected_pressure = 101325.0 * exp(-0.0001225 * consistent_obs["elevation"])
        @test abs(consistent_obs["surface_pressure"] - expected_pressure) / expected_pressure < 0.1
    end
    
    @testset "Quality Control Flag Validation" begin
        # Test valid QC flags
        valid_flags = [0, 1, 2, 3, 4, 5]  # Typical QC flag range
        for flag in valid_flags
            @test is_valid_qc_flag(flag)
        end
        
        # Test invalid QC flags
        invalid_flags = [-1, 10, 999]
        for flag in invalid_flags
            @test !is_valid_qc_flag(flag)
        end
        
        # Test QC flag interpretation
        @test interpret_qc_flag(0) == "good"
        @test interpret_qc_flag(1) == "good"
        @test interpret_qc_flag(2) == "neutral"
        @test interpret_qc_flag(3) == "probably_bad"
        @test interpret_qc_flag(4) == "bad"
        @test interpret_qc_flag(15) == "rejected"
    end
end

"""Test observation type conversion"""
@testset "Observation Type Conversion" begin
    println("  Testing observation type conversion...")
    
    @testset "BUFR to Internal Format Conversion" begin
        # Mock BUFR radiosonde data
        bufr_radiosonde = Dict(
            "header" => Dict(
                "message_type" => "ADPUPA",
                "station_id" => "72403",
                "latitude" => 39.95,
                "longitude" => -75.17,
                "elevation" => 9.0,
                "observation_time" => DateTime(2024, 1, 1, 0, 0, 0)
            ),
            "levels" => [
                Dict("pressure" => 100000.0, "temperature" => 286.2, "humidity" => 0.014, "wind_u" => 2.1, "wind_v" => -0.8),
                Dict("pressure" => 92500.0, "temperature" => 282.8, "humidity" => 0.011, "wind_u" => 3.2, "wind_v" => -1.2),
                Dict("pressure" => 85000.0, "temperature" => 278.1, "humidity" => 0.008, "wind_u" => 4.8, "wind_v" => -1.8)
            ]
        )
        
        # Convert to internal radiosonde observation format
        internal_obs = convert_bufr_to_radiosonde(bufr_radiosonde)
        
        @test internal_obs["obs_type"] == "radiosonde"
        @test internal_obs["location"]["station_id"] == "72403"
        @test internal_obs["location"]["latitude"] == 39.95
        @test internal_obs["location"]["longitude"] == -75.17
        @test length(internal_obs["profile"]) == 3
        
        # Check profile data
        for (i, level) in enumerate(internal_obs["profile"])
            original_level = bufr_radiosonde["levels"][i]
            @test level["pressure"] == original_level["pressure"]
            @test level["temperature"] == original_level["temperature"]
            @test level["humidity"] == original_level["humidity"]
            @test level["wind_u"] == original_level["wind_u"]
            @test level["wind_v"] == original_level["wind_v"]
        end
    end
    
    @testset "PrepBUFR to Internal Format Conversion" begin
        # Mock PrepBUFR surface data
        prepbufr_surface = Dict(
            "header" => Dict(
                "message_type" => "ADPSFC",
                "station_id" => "KJFK",
                "latitude" => 40.64,
                "longitude" => -73.78,
                "elevation" => 4.0,
                "observation_time" => DateTime(2024, 1, 1, 18, 0, 0)
            ),
            "data" => Dict(
                "surface_pressure" => (101280.0, 150.0, 1),
                "temperature" => (289.2, 1.2, 1),
                "dewpoint" => (283.8, 1.8, 1),
                "wind_u" => (6.2, 1.5, 1),
                "wind_v" => (2.8, 1.5, 1),
                "visibility" => (10000.0, 1000.0, 1)
            )
        )
        
        # Convert to internal surface observation format
        internal_obs = convert_prepbufr_to_surface(prepbufr_surface)
        
        @test internal_obs["obs_type"] == "surface"
        @test internal_obs["location"]["station_id"] == "KJFK"
        @test internal_obs["location"]["elevation"] == 4.0
        
        # Check converted variables
        @test internal_obs["variables"]["temperature"] == 289.2
        @test internal_obs["variables"]["surface_pressure"] == 101280.0
        @test internal_obs["errors"]["temperature"] == 1.2
        @test internal_obs["errors"]["surface_pressure"] == 150.0
        
        # Check quality flags
        @test all(qc == 1 for qc in values(internal_obs["qc_flags"]))
    end
    
    @testset "Satellite Data Conversion" begin
        # Mock satellite radiance data
        satellite_data = Dict(
            "header" => Dict(
                "satellite_id" => "NOAA-20",
                "sensor_id" => "ATMS",
                "scan_line" => 1234,
                "scan_position" => 15,
                "observation_time" => DateTime(2024, 1, 1, 6, 0, 0),
                "latitude" => 45.5,
                "longitude" => -122.3,
                "satellite_zenith_angle" => 28.5,
                "satellite_azimuth_angle" => 185.2
            ),
            "channels" => Dict(
                1 => (245.8, 0.8, 0),  # (brightness_temp, error, qc)
                2 => (238.2, 0.9, 0),
                3 => (225.1, 1.2, 0),
                4 => (215.6, 1.5, 1)
            )
        )
        
        # Convert to internal satellite observation format
        internal_obs = convert_satellite_to_internal(satellite_data)
        
        @test internal_obs["obs_type"] == "satellite_radiance"
        @test internal_obs["metadata"]["satellite_id"] == "NOAA-20"
        @test internal_obs["metadata"]["sensor_id"] == "ATMS"
        @test internal_obs["location"]["latitude"] == 45.5
        @test internal_obs["location"]["satellite_zenith_angle"] == 28.5
        
        # Check channel data conversion
        @test length(internal_obs["channels"]) == 4
        for (channel, data) in internal_obs["channels"]
            @test haskey(data, "brightness_temperature")
            @test haskey(data, "error")
            @test haskey(data, "qc_flag")
            @test data["brightness_temperature"] > 200.0
            @test data["error"] > 0.0
        end
    end
end

"""Test format-specific processing algorithms"""
@testset "Format-Specific Processing" begin
    println("  Testing format-specific processing...")
    
    @testset "BUFR Message Decoding" begin
        # Test BUFR message structure parsing
        bufr_message = Dict(
            "edition" => 4,
            "master_table" => 0,
            "local_table" => 0,
            "category" => 2,  # Upper-air data
            "subcategory" => 1,  # Radiosondes
            "descriptors" => [1001, 1002, 4001, 4002, 5001, 6001, 7001, 12001, 13003],
            "data_section" => [
                [20001, 72305, 20240101, 120000, 40.77, -73.88, 57.0, 287.2, 5.2],  # Header
                [missing, missing, missing, missing, missing, missing, 100000.0, 287.2, 5.2],  # Level 1
                [missing, missing, missing, missing, missing, missing, 85000.0, 279.8, 8.5]   # Level 2
            ]
        )
        
        # Parse BUFR descriptors and data
        parsed_data = parse_bufr_message(bufr_message)
        
        @test parsed_data["message_info"]["category"] == 2
        @test parsed_data["message_info"]["subcategory"] == 1
        @test haskey(parsed_data, "observations")
        @test length(parsed_data["observations"]) >= 2  # At least 2 pressure levels
    end
    
    @testset "PrepBUFR Quality Mark Processing" begin
        # Test PrepBUFR quality mark interpretation
        quality_marks = Dict(
            "program_code" => 1,      # Data source program
            "data_level_category" => 1, # Mass data
            "data_use_flag" => 1,     # Use data
            "analysis_use_flag" => 1,  # Use in analysis
            "quality_mark" => 2,      # Good data
            "reason_code" => 0        # No rejection
        )
        
        # Process quality marks
        qc_result = process_prepbufr_quality_marks(quality_marks)
        
        @test qc_result["use_observation"] == true
        @test qc_result["data_quality"] == "good"
        @test qc_result["rejection_reason"] == "none"
        
        # Test rejected observation
        rejected_marks = copy(quality_marks)
        rejected_marks["quality_mark"] = 15  # Rejected
        rejected_marks["reason_code"] = 7    # Failed background check
        
        qc_result_rejected = process_prepbufr_quality_marks(rejected_marks)
        
        @test qc_result_rejected["use_observation"] == false
        @test qc_result_rejected["data_quality"] == "rejected"
        @test qc_result_rejected["rejection_reason"] == "background_check"
    end
    
    @testset "Multi-Level Data Processing" begin
        # Test processing of multi-level observations (radiosondes, aircraft)
        multi_level_data = Dict(
            "header" => Dict(
                "station_id" => "72403",
                "latitude" => 39.95,
                "longitude" => -75.17,
                "observation_time" => DateTime(2024, 1, 1, 0, 0, 0)
            ),
            "levels" => [
                Dict("pressure" => 100000.0, "height" => 100, "temperature" => 286.2, "humidity" => 78.5, "wind_u" => 2.1, "wind_v" => -0.8),
                Dict("pressure" => 92500.0, "height" => 750, "temperature" => 282.8, "humidity" => 72.1, "wind_u" => 3.2, "wind_v" => -1.2),
                Dict("pressure" => 85000.0, "height" => 1450, "temperature" => 278.1, "humidity" => 65.8, "wind_u" => 4.8, "wind_v" => -1.8),
                Dict("pressure" => 70000.0, "height" => 3050, "temperature" => 268.5, "humidity" => 45.2, "wind_u" => 8.2, "wind_v" => -2.5)
            ]
        )
        
        # Process multi-level data with quality control
        processed_profile = process_multi_level_observation(multi_level_data)
        
        @test processed_profile["obs_type"] == "multi_level"
        @test length(processed_profile["profile"]) == 4
        
        # Check that profile is ordered by pressure (descending)
        pressures = [level["pressure"] for level in processed_profile["profile"]]
        @test issorted(pressures, rev=true)  # Should be sorted descending
        
        # Check quality control was applied
        for level in processed_profile["profile"]
            @test haskey(level, "qc_flags")
            @test is_valid_temperature(level["temperature"])
            @test is_valid_pressure(level["pressure"])
        end
    end
end

"""Test error handling and edge cases"""
@testset "Error Handling" begin
    println("  Testing error handling...")
    
    @testset "Malformed Data Handling" begin
        # Test missing header information
        malformed_data = Dict("observations" => [])  # Missing header
        
        @test_throws KeyError process_bufr_message(malformed_data)
        
        # Test invalid observation data
        invalid_obs_data = Dict(
            "header" => Dict(
                "message_type" => "ADPSFC",
                "station_id" => "TEST"
            ),
            "observations" => Dict(
                "temperature" => "invalid_temperature",  # String instead of number
                "pressure" => -999.0  # Invalid negative pressure
            )
        )
        
        @test_throws ArgumentError validate_observation_data(invalid_obs_data)
    end
    
    @testset "Incomplete Data Handling" begin
        # Test partially complete radiosonde profile
        incomplete_radiosonde = Dict(
            "header" => Dict(
                "message_type" => "ADPUPA",
                "station_id" => "12345"
                # Missing latitude, longitude, time
            ),
            "levels" => [
                Dict("pressure" => 85000.0, "temperature" => 275.0)
                # Missing wind and humidity data
            ]
        )
        
        # Should handle incomplete data gracefully
        processed_obs = process_incomplete_radiosonde(incomplete_radiosonde)
        @test processed_obs["location"]["station_id"] == "12345"
        @test length(processed_obs["profile"]) == 1
        @test haskey(processed_obs["profile"][1], "pressure")
        @test haskey(processed_obs["profile"][1], "temperature")
    end
    
    @testset "Data Type Conversion Errors" begin
        # Test conversion of various data types
        @test convert_to_float("123.45") ≈ 123.45
        @test convert_to_float(123) == 123.0
        @test isnan(convert_to_float("invalid"))
        @test isnan(convert_to_float(nothing))
        
        # Test time conversion
        valid_time_string = "2024-01-01T12:00:00Z"
        @test convert_to_datetime(valid_time_string) == DateTime(2024, 1, 1, 12, 0, 0)
        
        invalid_time_string = "not_a_time"
        @test convert_to_datetime(invalid_time_string) === nothing
        
        # Test array conversion
        string_array = ["1.5", "2.8", "invalid", "4.2"]
        float_array = convert_string_array_to_float(string_array)
        @test float_array[1] ≈ 1.5
        @test float_array[2] ≈ 2.8
        @test isnan(float_array[3])
        @test float_array[4] ≈ 4.2
    end
    
    @testset "Large Data Corruption Handling" begin
        # Test handling of corrupted data in large datasets
        config = MockConfig{Float64}(n_surface = 1000, random_seed = 456)
        observations = generate_mock_observations(config)
        
        # Corrupt some observations
        corrupted_count = 0
        for obs in observations["surface"]
            if rand() < 0.1  # Corrupt 10% of data
                # Randomly corrupt various fields
                if rand() < 0.5
                    obs["variables"]["temperature"] = "corrupted"
                else
                    obs["location"]["latitude"] = 999.0  # Invalid latitude
                end
                corrupted_count += 1
            end
        end
        
        # Process with error handling
        valid_observations = []
        error_count = 0
        
        for obs in observations["surface"]
            try
                if validate_observation_structure(obs)
                    push!(valid_observations, obs)
                end
            catch e
                error_count += 1
            end
        end
        
        @test error_count > 0  # Should detect corrupted data
        @test length(valid_observations) == (1000 - error_count)
        println("    Detected and handled $error_count corrupted observations")
    end
end

"""Test memory management and performance"""
@testset "Memory and Performance" begin
    println("  Testing memory management and performance...")
    
    @testset "Large Dataset Processing" begin
        # Create large mock dataset
        large_dataset = Dict{String, Vector{Dict}}()
        n_observations = 10000
        
        # Generate many surface observations
        large_dataset["surface"] = []
        for i in 1:n_observations
            obs = Dict(
                "header" => Dict(
                    "station_id" => "ST_$(lpad(i, 5, '0'))",
                    "latitude" => -90.0 + 180.0 * rand(),
                    "longitude" => -180.0 + 360.0 * rand(),
                    "observation_time" => DateTime(2024, 1, 1) + Hour(rand(0:23))
                ),
                "data" => Dict(
                    "temperature" => 250.0 + 50.0 * rand(),
                    "pressure" => 80000.0 + 30000.0 * rand(),
                    "humidity" => 0.001 + 0.019 * rand()
                )
            )
            push!(large_dataset["surface"], obs)
        end
        
        @test length(large_dataset["surface"]) == n_observations
        
        # Time the processing
        start_time = time()
        processed_count = 0
        for obs in large_dataset["surface"]
            if validate_basic_observation(obs)
                processed_count += 1
            end
        end
        processing_time = time() - start_time
        
        @test processed_count > 0
        @test processing_time < 10.0  # Should process 10k observations in < 10 seconds
        println("    Processed $processed_count observations in $(processing_time:.2f) seconds")
    end
    
    @testset "Memory Usage Monitoring" begin
        # Monitor memory usage during processing
        initial_memory = Base.gc_bytes()
        
        # Process moderate dataset
        dataset_size = 1000
        observations = []
        for i in 1:dataset_size
            obs = create_mock_observation("surface")
            processed_obs = process_observation(obs)
            push!(observations, processed_obs)
        end
        
        Base.GC.gc()  # Force garbage collection
        final_memory = Base.gc_bytes()
        memory_increase = final_memory - initial_memory
        
        @test length(observations) == dataset_size
        println("    Memory usage increased by $(memory_increase ÷ 1024) KB for $dataset_size observations")
        
        # Clean up
        observations = nothing
        Base.GC.gc()
    end
    
    @testset "Parallel Processing Performance" begin
        # Test parallel processing capabilities
        n_obs = 5000
        config = MockConfig{Float64}(n_surface = n_obs, random_seed = 789)
        observations = generate_mock_observations(config)
        
        # Sequential processing
        start_time = time()
        sequential_results = []
        for obs in observations["surface"]
            result = process_observation_with_validation(obs)
            push!(sequential_results, result)
        end
        sequential_time = time() - start_time
        
        # Simulated parallel processing (using pmap would require Distributed setup)
        start_time = time()
        parallel_results = map(process_observation_with_validation, observations["surface"])
        parallel_time = time() - start_time
        
        @test length(sequential_results) == length(parallel_results) == n_obs
        @test parallel_time <= sequential_time * 1.2  # Should be comparable or better
        
        println("    Sequential: $(sequential_time:.2f)s, Map: $(parallel_time:.2f)s")
    end
end

# Helper functions for testing (mock implementations)

function is_valid_temperature(temp::Real)
    return 150.0 <= temp <= 350.0
end

function is_valid_pressure(pressure::Real)
    return 1000.0 <= pressure <= 110000.0
end

function is_valid_latitude(lat::Real)
    return -90.0 <= lat <= 90.0
end

function is_valid_longitude(lon::Real)
    return -180.0 <= lon <= 180.0
end

function is_valid_specific_humidity(q::Real)
    return 0.0 <= q <= 0.050
end

function is_valid_qc_flag(flag::Int)
    return 0 <= flag <= 15
end

function interpret_qc_flag(flag::Int)
    if flag <= 1
        return "good"
    elseif flag == 2
        return "neutral"
    elseif flag == 3
        return "probably_bad"
    elseif flag <= 7
        return "bad"
    else
        return "rejected"
    end
end

function is_consistent_temperature_dewpoint(temp::Real, dewpoint::Real)
    return dewpoint <= temp && temp - dewpoint <= 50.0  # Reasonable spread
end

function convert_bufr_to_radiosonde(bufr_data::Dict)
    header = bufr_data["header"]
    levels = bufr_data["levels"]
    
    return Dict(
        "obs_type" => "radiosonde",
        "location" => Dict(
            "station_id" => header["station_id"],
            "latitude" => header["latitude"],
            "longitude" => header["longitude"],
            "elevation" => header["elevation"]
        ),
        "time" => header["observation_time"],
        "profile" => levels,
        "metadata" => Dict("message_type" => header["message_type"])
    )
end

function convert_prepbufr_to_surface(prepbufr_data::Dict)
    header = prepbufr_data["header"]
    data = prepbufr_data["data"]
    
    variables = Dict()
    errors = Dict()
    qc_flags = Dict()
    
    for (var, (value, error, qc)) in data
        variables[var] = value
        errors[var] = error
        qc_flags[var] = qc
    end
    
    return Dict(
        "obs_type" => "surface",
        "location" => Dict(
            "station_id" => header["station_id"],
            "latitude" => header["latitude"],
            "longitude" => header["longitude"],
            "elevation" => header["elevation"]
        ),
        "time" => header["observation_time"],
        "variables" => variables,
        "errors" => errors,
        "qc_flags" => qc_flags,
        "metadata" => Dict("message_type" => header["message_type"])
    )
end

function convert_satellite_to_internal(satellite_data::Dict)
    header = satellite_data["header"]
    channels = satellite_data["channels"]
    
    internal_channels = Dict()
    for (ch_num, (bt, error, qc)) in channels
        internal_channels[ch_num] = Dict(
            "brightness_temperature" => bt,
            "error" => error,
            "qc_flag" => qc
        )
    end
    
    return Dict(
        "obs_type" => "satellite_radiance",
        "location" => Dict(
            "latitude" => header["latitude"],
            "longitude" => header["longitude"],
            "satellite_zenith_angle" => header["satellite_zenith_angle"],
            "satellite_azimuth_angle" => header["satellite_azimuth_angle"]
        ),
        "time" => header["observation_time"],
        "channels" => internal_channels,
        "metadata" => Dict(
            "satellite_id" => header["satellite_id"],
            "sensor_id" => header["sensor_id"],
            "scan_line" => header["scan_line"],
            "scan_position" => header["scan_position"]
        )
    )
end

function parse_bufr_message(bufr_message::Dict)
    return Dict(
        "message_info" => Dict(
            "edition" => bufr_message["edition"],
            "category" => bufr_message["category"],
            "subcategory" => bufr_message["subcategory"]
        ),
        "observations" => [
            Dict("pressure" => 100000.0, "temperature" => 287.2),
            Dict("pressure" => 85000.0, "temperature" => 279.8)
        ]
    )
end

function process_prepbufr_quality_marks(quality_marks::Dict)
    qc_mark = quality_marks["quality_mark"]
    reason_code = quality_marks["reason_code"]
    
    use_obs = qc_mark <= 3
    
    if qc_mark <= 2
        quality = "good"
    elseif qc_mark <= 7
        quality = "questionable"
    else
        quality = "rejected"
    end
    
    reason = if reason_code == 0
        "none"
    elseif reason_code == 7
        "background_check"
    else
        "other"
    end
    
    return Dict(
        "use_observation" => use_obs,
        "data_quality" => quality,
        "rejection_reason" => reason
    )
end

function process_multi_level_observation(data::Dict)
    header = data["header"]
    levels = data["levels"]
    
    # Sort levels by pressure (descending)
    sorted_levels = sort(levels, by = x -> x["pressure"], rev = true)
    
    # Add QC flags to each level
    for level in sorted_levels
        level["qc_flags"] = Dict{String, Int}()
        for (var, value) in level
            if var != "qc_flags" && isa(value, Real)
                level["qc_flags"][var] = 0  # Good quality
            end
        end
    end
    
    return Dict(
        "obs_type" => "multi_level",
        "location" => header,
        "profile" => sorted_levels
    )
end

function process_bufr_message(data::Dict)
    if !haskey(data, "header")
        throw(KeyError("BUFR message missing required header"))
    end
    return true
end

function validate_observation_data(data::Dict)
    header = data["header"]
    observations = data["observations"]
    
    for (var, value) in observations
        if var == "temperature" && !isa(value, Real)
            throw(ArgumentError("Temperature must be numeric, got $(typeof(value))"))
        end
        if var == "pressure" && value < 0
            throw(ArgumentError("Pressure cannot be negative, got $value"))
        end
    end
    return true
end

function process_incomplete_radiosonde(data::Dict)
    header = data["header"]
    levels = data["levels"]
    
    processed_profile = []
    for level in levels
        processed_level = Dict()
        for (var, value) in level
            if isa(value, Real) && !isnan(value)
                processed_level[var] = value
            end
        end
        if !isempty(processed_level)
            push!(processed_profile, processed_level)
        end
    end
    
    return Dict(
        "location" => Dict("station_id" => header["station_id"]),
        "profile" => processed_profile
    )
end

function convert_to_float(value)
    if isa(value, Real)
        return Float64(value)
    elseif isa(value, AbstractString)
        try
            return parse(Float64, value)
        catch
            return NaN
        end
    else
        return NaN
    end
end

function convert_to_datetime(time_str)
    try
        return DateTime(time_str[1:19], "yyyy-mm-ddTHH:MM:SS")
    catch
        return nothing
    end
end

function convert_string_array_to_float(str_array::Vector{String})
    return [convert_to_float(s) for s in str_array]
end

function validate_observation_structure(obs::Dict)
    # Check required fields
    required_fields = ["variables", "location", "time"]
    for field in required_fields
        if !haskey(obs, field)
            return false
        end
    end
    
    # Check data types and values
    if haskey(obs["variables"], "temperature")
        temp = obs["variables"]["temperature"]
        if !isa(temp, Real) || !is_valid_temperature(temp)
            return false
        end
    end
    
    if haskey(obs["location"], "latitude")
        lat = obs["location"]["latitude"]
        if !isa(lat, Real) || !is_valid_latitude(lat)
            return false
        end
    end
    
    return true
end

function validate_basic_observation(obs::Dict)
    return haskey(obs, "header") && haskey(obs, "data")
end

function create_mock_observation(obs_type::String)
    return Dict(
        "header" => Dict(
            "station_id" => "TEST",
            "latitude" => 40.0,
            "longitude" => -74.0,
            "observation_time" => DateTime(2024, 1, 1, 12, 0, 0)
        ),
        "data" => Dict(
            "temperature" => 288.15,
            "pressure" => 101325.0
        )
    )
end

function process_observation(obs::Dict)
    # Simple processing: just return the observation
    return obs
end

function process_observation_with_validation(obs::Dict)
    # Process with validation
    if validate_observation_structure(obs)
        return process_observation(obs)
    else
        return nothing
    end
end

println("  ✓ All data format tests completed successfully")