"""
    Performance Module

Top-level module for comprehensive performance optimization and monitoring capabilities
in GSICoreAnalysis.jl. This module provides:

1. **PerformanceOptimization**: Core optimization utilities including algorithmic improvements,
   spatial indexing, vectorized operations, and efficient data structures.

2. **ParallelProcessing**: Multi-threading and distributed processing capabilities with
   load balancing, fault tolerance, and scalability optimizations.

3. **MemoryManagement**: Advanced memory management including memory pooling, efficient 
   data transformation, garbage collection optimization, and memory-mapped I/O.

4. **Profiling**: Comprehensive performance profiling and monitoring tools with
   bottleneck detection, benchmarking, and performance regression testing.

# Quick Start

```julia
using GSICoreAnalysis.Performance

# Enable performance optimizations
config = PerformanceConfig(
    enable_simd = true,
    enable_threading = true,
    batch_size = 1000,
    spatial_index_type = :kdtree
)
enable_performance_optimizations(config)

# Profile operations
profiler = PerformanceProfiler("data_analysis")
enable_profiling(profiler)

@profile profiler "observation_processing" begin
    # Your data processing code here
    results = process_observations(observations)
end

# Generate performance report
report = generate_performance_report(profiler)
save_performance_report(report, "performance_analysis.html")

# Parallel processing
parallel_config = ParallelConfig(
    max_threads = nthreads(),
    enable_distributed = false,
    load_balancing = :dynamic
)

parallel_results = parallel_observation_processing(
    observations, process_func, parallel_config
)

# Memory management
memory_monitor = MemoryMonitor(gc_threshold = 0.8)
start_memory_monitoring(memory_monitor)

obs_pool = MemoryPool{ObservationType}(initial_size = 1000)
optimized_obs = allocate_from_pool(obs_pool)
```

# Module Architecture

## Performance Optimization Layer
- Spatial indexing (KD-trees, spatial hashing)
- Vectorized operations with SIMD support
- Efficient data structures and transformations
- I/O optimization and buffering

## Parallel Processing Layer  
- Thread-safe multi-threading with load balancing
- Distributed processing with fault tolerance
- GPU acceleration support (CUDA)
- Work-stealing and dynamic scheduling

## Memory Management Layer
- Object pooling and reuse
- Memory-mapped file I/O for large datasets
- Garbage collection optimization
- Memory usage monitoring and alerts

## Profiling and Monitoring Layer
- Real-time performance profiling
- Bottleneck detection and analysis
- Benchmark suite and regression testing
- Performance reporting and visualization

# Production Usage

For production atmospheric data assimilation workloads:

1. **Configure for Scale**: Set appropriate batch sizes, thread counts, and memory limits
2. **Enable Monitoring**: Use profiling to identify bottlenecks in operational runs
3. **Optimize Memory**: Use memory pools for frequent allocations
4. **Leverage Parallelism**: Distribute processing across available cores and nodes
5. **Monitor Performance**: Set up automated performance regression detection

The Performance module is designed to handle operational workloads processing
100k+ observations efficiently while maintaining numerical accuracy and robustness.
"""
module Performance

# Re-export all performance modules
include("PerformanceOptimization.jl")
include("ParallelProcessing.jl")
include("MemoryManagement.jl")
include("Profiling.jl")

using .PerformanceOptimization
using .ParallelProcessing
using .MemoryManagement
using .Profiling

# Export all public interfaces from submodules

# From PerformanceOptimization
export PerformanceConfig, OptimizedDataStructure
export SpatialIndex, KDTree, SpatialHash
export VectorizedOperations, SIMDOperations
export optimize_data_structure, create_spatial_index, query_spatial_index
export vectorized_bias_correction, vectorized_quality_control
export efficient_matrix_operations, optimized_loop_operations
export performance_aware_batch_processing
export enable_performance_optimizations, disable_performance_optimizations

# From ParallelProcessing
export ParallelConfig, DistributedConfig, GPUConfig
export WorkerPool, LoadBalancer, DistributedTask
export parallel_observation_processing, parallel_bias_correction
export parallel_quality_control, parallel_spatial_processing
export distributed_analysis, distributed_ensemble_processing
export setup_distributed_workers, cleanup_distributed_workers
export gpu_accelerated_operations, gpu_matrix_operations
export fault_tolerant_processing, parallel_io_operations
export load_balanced_processing, dynamic_load_balancing

# From MemoryManagement
export MemoryPool, MemoryMonitor, IOBufferManager
export MemoryMappedReader, MemoryMappedWriter
export EfficientDataTransform, ReferenceCounter
export allocate_from_pool, return_to_pool, reset_pool
export start_memory_monitoring, stop_memory_monitoring
export create_memory_mapped_reader, create_memory_mapped_writer
export efficient_copy, efficient_transform, in_place_transform
export track_memory_usage, get_memory_stats, trigger_gc_if_needed
export optimize_memory_layout, minimize_allocations
export buffer_io_operations, manage_temporary_data

# From Profiling
export PerformanceProfiler, BenchmarkSuite, PerformanceReport
export ProfileSession, TimingResult, MemoryProfile, BottleneckAnalysis
export enable_profiling, disable_profiling, @profile
export benchmark_operation, comprehensive_benchmark
export generate_performance_report, save_performance_report
export identify_bottlenecks, detect_performance_regressions
export performance_dashboard, monitor_real_time_performance
export optimize_performance_configuration, suggest_optimizations
export compare_performance, validate_performance_improvements

"""
    configure_performance(;
        enable_optimizations = true,
        enable_profiling = false,
        enable_monitoring = false,
        parallel_config = ParallelConfig(),
        performance_config = PerformanceConfig()
    )

Configure performance settings for GSICoreAnalysis.jl with sensible defaults.
"""
function configure_performance(;
    enable_optimizations = true,
    enable_profiling = false,
    enable_monitoring = false,
    parallel_config = ParallelConfig(),
    performance_config = PerformanceConfig()
)
    results = Dict{String, Any}()
    
    # Enable performance optimizations
    if enable_optimizations
        enabled_config = enable_performance_optimizations(performance_config)
        results["optimizations"] = enabled_config
        @info "Performance optimizations enabled"
    end
    
    # Enable profiling if requested
    if enable_profiling
        profiler = PerformanceProfiler("gsi_analysis")
        enabled_profiler = enable_profiling(profiler)
        results["profiler"] = enabled_profiler
        @info "Performance profiling enabled"
    end
    
    # Enable memory monitoring if requested
    if enable_monitoring
        monitor = MemoryMonitor()
        start_memory_monitoring(monitor)
        results["monitor"] = monitor
        @info "Memory monitoring enabled"
    end
    
    # Store parallel configuration
    results["parallel_config"] = parallel_config
    
    return results
end

"""
    optimize_for_production(data_scale::Symbol = :large)

Configure performance optimizations for production workloads.

# Arguments
- `data_scale`: Scale of data processing - :small, :medium, :large, or :massive

# Returns
- Configuration dictionary with optimized settings
"""
function optimize_for_production(data_scale::Symbol = :large)
    # Scale-dependent configuration
    config_params = if data_scale == :small
        Dict(
            :batch_size => 500,
            :memory_limit_gb => 2.0,
            :spatial_index_type => :spatial_hash,
            :optimization_level => 1,
            :enable_distributed => false,
            :max_threads => min(4, Threads.nthreads())
        )
    elseif data_scale == :medium
        Dict(
            :batch_size => 2000,
            :memory_limit_gb => 8.0,
            :spatial_index_type => :kdtree,
            :optimization_level => 2,
            :enable_distributed => false,
            :max_threads => Threads.nthreads()
        )
    elseif data_scale == :large
        Dict(
            :batch_size => 5000,
            :memory_limit_gb => 16.0,
            :spatial_index_type => :kdtree,
            :optimization_level => 2,
            :enable_distributed => nprocs() > 1,
            :max_threads => Threads.nthreads()
        )
    elseif data_scale == :massive
        Dict(
            :batch_size => 10000,
            :memory_limit_gb => 32.0,
            :spatial_index_type => :kdtree,
            :optimization_level => 3,
            :enable_distributed => true,
            :max_threads => Threads.nthreads()
        )
    else
        throw(ArgumentError("Invalid data_scale: $data_scale. Must be :small, :medium, :large, or :massive"))
    end
    
    # Create configurations
    performance_config = PerformanceConfig(
        enable_simd = true,
        enable_threading = true,
        enable_vectorization = true,
        batch_size = config_params[:batch_size],
        memory_limit_gb = config_params[:memory_limit_gb],
        spatial_index_type = config_params[:spatial_index_type],
        optimization_level = config_params[:optimization_level],
        cache_size_mb = 1024.0,
        gc_threshold = 0.85
    )
    
    parallel_config = ParallelConfig(
        max_threads = config_params[:max_threads],
        enable_distributed = config_params[:enable_distributed],
        enable_gpu = false,  # Conservative default
        chunk_size = config_params[:batch_size] ÷ 2,
        load_balancing = :dynamic,
        fault_tolerance = true
    )
    
    # Configure for production
    results = configure_performance(
        enable_optimizations = true,
        enable_profiling = false,  # Disable by default in production
        enable_monitoring = true,
        parallel_config = parallel_config,
        performance_config = performance_config
    )
    
    @info "Production performance configuration applied" data_scale=data_scale
    
    return results
end

"""
    benchmark_system_performance(;
        quick_test = false,
        save_results = true,
        output_dir = "performance_analysis"
    )

Run system performance benchmarks to establish baseline performance.
"""
function benchmark_system_performance(;
    quick_test = false,
    save_results = true,
    output_dir = "performance_analysis"
)
    @info "Running system performance benchmarks..." quick_test=quick_test
    
    # Create test configurations
    test_sizes = quick_test ? [1000, 5000] : [1000, 10000, 50000]
    
    results = Dict{String, Any}()
    
    # System information
    results["system_info"] = Dict(
        "julia_version" => string(VERSION),
        "cpu_threads" => Sys.CPU_THREADS,
        "available_threads" => Threads.nthreads(),
        "total_memory_gb" => round(Sys.total_memory() / 1024^3, digits=2),
        "timestamp" => now()
    )
    
    # Basic operation benchmarks
    for n in test_sizes
        @info "Benchmarking with $n elements..."
        
        size_results = Dict{String, Any}()
        
        # Vector operations
        data = randn(n)
        size_results["vector_sum"] = @benchmark sum($data)
        size_results["vector_sort"] = @benchmark sort($data)
        
        # Matrix operations (smaller sizes)
        if n <= 10000
            matrix_size = min(100, isqrt(n))
            A = randn(matrix_size, matrix_size)
            B = randn(matrix_size, matrix_size)
            
            size_results["matrix_mult"] = @benchmark $A * $B
            size_results["matrix_inv"] = @benchmark inv($A + I)
        end
        
        # Simulated observation processing
        observations = [(
            value = 285.0 + 10*randn(),
            quality = rand(),
            location = (lat = 40 + 10*randn(), lon = -75 + 10*randn())
        ) for _ in 1:n]
        
        size_results["obs_filter"] = @benchmark filter(obs -> obs.quality > 0.7, $observations)
        
        results["size_$n"] = size_results
    end
    
    # Save results if requested
    if save_results
        mkpath(output_dir)
        timestamp = Dates.format(now(), "yyyy-mm-dd_HH-MM-SS")
        
        # Serialize benchmark results
        serialized_results = Dict{String, Any}()
        for (key, value) in results
            if key == "system_info"
                serialized_results[key] = value
            else
                serialized_results[key] = Dict{String, Any}()
                for (subkey, benchmark) in value
                    if benchmark isa BenchmarkTools.Trial
                        serialized_results[key][subkey] = Dict(
                            "median_time_ns" => median(benchmark.times),
                            "mean_time_ns" => mean(benchmark.times),
                            "min_time_ns" => minimum(benchmark.times),
                            "max_time_ns" => maximum(benchmark.times),
                            "median_memory_bytes" => median(benchmark.memory),
                            "allocs" => benchmark.allocs
                        )
                    else
                        serialized_results[key][subkey] = value
                    end
                end
            end
        end
        
        results_file = joinpath(output_dir, "system_benchmark_$timestamp.json")
        open(results_file, "w") do io
            JSON3.pretty(io, serialized_results)
        end
        
        @info "Benchmark results saved" file=results_file
    end
    
    @info "System performance benchmarking completed"
    return results
end

"""
    validate_performance_improvements(baseline_file::String, current_results::Dict)

Validate that performance improvements are significant compared to baseline.
"""
function validate_performance_improvements(baseline_file::String, current_results::Dict)
    if !isfile(baseline_file)
        @warn "Baseline file not found, cannot validate improvements" file=baseline_file
        return nothing
    end
    
    baseline_results = JSON3.read(baseline_file, Dict)
    
    improvements = Profiling.validate_performance_improvements(baseline_results, current_results)
    
    if !isempty(improvements)
        @info "Performance improvements detected:"
        for imp in improvements[1:min(5, end)]  # Show top 5
            @info "  $(imp.operation): $(round((imp.speedup - 1) * 100, digits=1))% faster"
        end
    else
        @info "No significant performance improvements detected"
    end
    
    return improvements
end

"""
    cleanup_performance()

Clean up performance-related resources and reset to defaults.
"""
function cleanup_performance()
    @info "Cleaning up performance resources..."
    
    # Disable optimizations
    disable_performance_optimizations()
    
    # Stop any running memory monitoring
    global_monitor = MemoryManagement.GLOBAL_MEMORY_MONITOR[]
    if global_monitor !== nothing
        stop_memory_monitoring(global_monitor)
    end
    
    # Disable any active profiling
    global_profiler = Profiling.GLOBAL_PROFILER[]
    if global_profiler !== nothing
        disable_profiling(global_profiler)
    end
    
    # Cleanup distributed workers if any
    if nprocs() > 1
        cleanup_distributed_workers()
    end
    
    # Force garbage collection
    GC.gc()
    
    @info "Performance cleanup completed"
end

end # module Performance