"""
    Parallel Processing Module

Comprehensive parallel processing capabilities for GSICoreAnalysis.jl including:
- Multi-threading with load balancing
- Distributed processing for large-scale operations
- GPU acceleration support (CUDA)
- Fault-tolerant processing with automatic recovery
- Parallel I/O and communication patterns

# Usage

```julia
using GSICoreAnalysis.Performance.ParallelProcessing

# Configure parallel processing
parallel_config = ParallelConfig(
    max_threads = nthreads(),
    enable_distributed = false,
    enable_gpu = false,
    chunk_size = 1000,
    load_balancing = :dynamic
)

# Process observations in parallel
results = parallel_observation_processing(observations, process_func, parallel_config)

# Distributed processing for very large datasets
distributed_results = distributed_analysis(data_chunks, analysis_func, parallel_config)

# GPU-accelerated operations
if parallel_config.enable_gpu
    gpu_results = gpu_accelerated_operations(matrix_data, parallel_config)
end
```
"""
module ParallelProcessing

using Distributed
using Base.Threads
using LinearAlgebra
using SharedArrays
using Printf
using Dates
using Random
using Statistics

# Conditional GPU support
try
    using CUDA
    const HAS_CUDA = CUDA.functional()
catch
    const HAS_CUDA = false
end

# Export main types and functions
export ParallelConfig, DistributedConfig, GPUConfig
export WorkerPool, LoadBalancer, DistributedTask
export parallel_observation_processing, parallel_bias_correction
export parallel_quality_control, parallel_spatial_processing
export distributed_analysis, distributed_ensemble_processing
export setup_distributed_workers, cleanup_distributed_workers
export gpu_accelerated_operations, gpu_matrix_operations
export fault_tolerant_processing, parallel_io_operations
export load_balanced_processing, dynamic_load_balancing

"""
    ParallelConfig

Configuration for parallel processing operations.
"""
struct ParallelConfig
    max_threads::Int
    enable_distributed::Bool
    enable_gpu::Bool
    chunk_size::Int
    load_balancing::Symbol  # :static, :dynamic, :work_stealing
    fault_tolerance::Bool
    memory_limit_per_worker::Float64  # GB
    communication_timeout::Int  # seconds
    enable_parallel_io::Bool
    gpu_batch_size::Int
    
    function ParallelConfig(;
        max_threads = nthreads(),
        enable_distributed = false,
        enable_gpu = HAS_CUDA,
        chunk_size = 1000,
        load_balancing = :dynamic,
        fault_tolerance = true,
        memory_limit_per_worker = 4.0,
        communication_timeout = 30,
        enable_parallel_io = true,
        gpu_batch_size = 10000
    )
        new(max_threads, enable_distributed, enable_gpu, chunk_size,
            load_balancing, fault_tolerance, memory_limit_per_worker,
            communication_timeout, enable_parallel_io, gpu_batch_size)
    end
end

"""
    DistributedConfig

Configuration for distributed processing.
"""
struct DistributedConfig
    worker_hosts::Vector{String}
    processes_per_host::Int
    shared_filesystem::Bool
    data_distribution::Symbol  # :replicated, :partitioned, :shared
    checkpoint_frequency::Int
    
    function DistributedConfig(;
        worker_hosts = String[],
        processes_per_host = 1,
        shared_filesystem = true,
        data_distribution = :partitioned,
        checkpoint_frequency = 100
    )
        new(worker_hosts, processes_per_host, shared_filesystem,
            data_distribution, checkpoint_frequency)
    end
end

"""
    GPUConfig

Configuration for GPU acceleration.
"""
struct GPUConfig
    device_id::Int
    memory_fraction::Float64
    enable_mixed_precision::Bool
    streams::Int
    
    function GPUConfig(;
        device_id = 0,
        memory_fraction = 0.8,
        enable_mixed_precision = false,
        streams = 4
    )
        new(device_id, memory_fraction, enable_mixed_precision, streams)
    end
end

"""
    WorkerPool

Manages a pool of worker threads or processes.
"""
mutable struct WorkerPool
    workers::Vector{Int}
    task_queue::Channel{Any}
    result_queue::Channel{Any}
    load_balancer::LoadBalancer
    active_tasks::Dict{Int, Any}
    
    function WorkerPool(workers::Vector{Int}, queue_size::Int = 1000)
        task_queue = Channel{Any}(queue_size)
        result_queue = Channel{Any}(queue_size)
        load_balancer = LoadBalancer(workers)
        active_tasks = Dict{Int, Any}()
        
        new(workers, task_queue, result_queue, load_balancer, active_tasks)
    end
end

"""
    LoadBalancer

Implements load balancing strategies for parallel execution.
"""
mutable struct LoadBalancer
    workers::Vector{Int}
    load_counts::Vector{Int}
    strategy::Symbol
    
    function LoadBalancer(workers::Vector{Int}, strategy::Symbol = :dynamic)
        load_counts = zeros(Int, length(workers))
        new(workers, load_counts, strategy)
    end
end

"""
    DistributedTask

Represents a task for distributed execution.
"""
struct DistributedTask{T}
    id::Int
    data::T
    function_name::Symbol
    parameters::Dict{Symbol, Any}
    dependencies::Vector{Int}
    
    function DistributedTask{T}(id::Int, data::T, func_name::Symbol, 
                               params::Dict{Symbol, Any} = Dict{Symbol, Any}(),
                               deps::Vector{Int} = Int[]) where T
        new{T}(id, data, func_name, params, deps)
    end
end

"""
    parallel_observation_processing(observations::Vector{T}, process_func::Function,
                                   config::ParallelConfig) where T

Process observations in parallel using multi-threading or distributed computing.
"""
function parallel_observation_processing(observations::Vector{T}, process_func::Function,
                                        config::ParallelConfig) where T
    n = length(observations)
    @info "Starting parallel observation processing" n_obs=n max_threads=config.max_threads
    
    if config.enable_distributed && nprocs() > 1
        return distributed_observation_processing(observations, process_func, config)
    elseif config.max_threads > 1
        return threaded_observation_processing(observations, process_func, config)
    else
        return sequential_observation_processing(observations, process_func)
    end
end

"""
    threaded_observation_processing(observations::Vector{T}, process_func::Function,
                                   config::ParallelConfig) where T

Multi-threaded observation processing with load balancing.
"""
function threaded_observation_processing(observations::Vector{T}, process_func::Function,
                                        config::ParallelConfig) where T
    n = length(observations)
    results = Vector{Any}(undef, n)
    
    if config.load_balancing == :dynamic
        # Dynamic load balancing with work-stealing
        chunk_size = max(1, config.chunk_size ÷ config.max_threads)
        
        @threads for tid in 1:config.max_threads
            thread_start = (tid - 1) * chunk_size + 1
            thread_end = min(tid * chunk_size, n)
            
            if thread_start <= n
                for i in thread_start:thread_end
                    try
                        results[i] = process_func(observations[i])
                    catch e
                        if config.fault_tolerance
                            @warn "Processing failed for observation $i" exception=e
                            results[i] = nothing
                        else
                            rethrow(e)
                        end
                    end
                end
            end
        end
    else
        # Static load balancing
        @threads for i in 1:n
            try
                results[i] = process_func(observations[i])
            catch e
                if config.fault_tolerance
                    @warn "Processing failed for observation $i" exception=e
                    results[i] = nothing
                else
                    rethrow(e)
                end
            end
        end
    end
    
    @info "Parallel processing completed" successful=count(!isnothing, results) failed=count(isnothing, results)
    
    return results
end

"""
    distributed_observation_processing(observations::Vector{T}, process_func::Function,
                                      config::ParallelConfig) where T

Distributed observation processing across multiple processes.
"""
function distributed_observation_processing(observations::Vector{T}, process_func::Function,
                                           config::ParallelConfig) where T
    n = length(observations)
    worker_pids = workers()
    
    @info "Starting distributed processing" n_obs=n n_workers=length(worker_pids)
    
    # Distribute data across workers
    chunk_size = max(1, n ÷ length(worker_pids))
    futures = Future[]
    
    for (i, worker) in enumerate(worker_pids)
        start_idx = (i - 1) * chunk_size + 1
        end_idx = i == length(worker_pids) ? n : i * chunk_size
        
        if start_idx <= n
            chunk = observations[start_idx:end_idx]
            future = @spawnat worker process_observation_chunk(chunk, process_func, config)
            push!(futures, future)
        end
    end
    
    # Collect results
    all_results = Any[]
    for future in futures
        try
            chunk_results = fetch(future)
            append!(all_results, chunk_results)
        catch e
            if config.fault_tolerance
                @warn "Worker failed during processing" exception=e
                # Add placeholder results for failed chunk
                append!(all_results, fill(nothing, chunk_size))
            else
                rethrow(e)
            end
        end
    end
    
    return all_results[1:n]  # Trim to exact size
end

"""
    process_observation_chunk(chunk::Vector{T}, process_func::Function,
                             config::ParallelConfig) where T

Process a chunk of observations on a worker process.
"""
function process_observation_chunk(chunk::Vector{T}, process_func::Function,
                                  config::ParallelConfig) where T
    results = Vector{Any}(undef, length(chunk))
    
    for (i, obs) in enumerate(chunk)
        try
            results[i] = process_func(obs)
        catch e
            if config.fault_tolerance
                @warn "Processing failed for chunk observation $i" exception=e
                results[i] = nothing
            else
                rethrow(e)
            end
        end
    end
    
    return results
end

"""
    sequential_observation_processing(observations::Vector{T}, process_func::Function) where T

Sequential processing fallback.
"""
function sequential_observation_processing(observations::Vector{T}, process_func::Function) where T
    return [process_func(obs) for obs in observations]
end

"""
    parallel_bias_correction(observations::Vector{T}, bias_coeffs::Vector{Float64},
                            config::ParallelConfig) where T

Parallel bias correction with thread-safe coefficient updates.
"""
function parallel_bias_correction(observations::Vector{T}, bias_coeffs::Vector{Float64},
                                 config::ParallelConfig) where T
    @info "Applying parallel bias correction" n_obs=length(observations)
    
    # Thread-safe shared coefficient access
    shared_coeffs = SharedArray{Float64}(bias_coeffs)
    
    correction_func = obs -> begin
        # Extract observation characteristics for bias lookup
        coeff_idx = determine_bias_index(obs, length(shared_coeffs))
        correction = shared_coeffs[coeff_idx]
        
        # Apply correction
        return apply_bias_correction(obs, correction)
    end
    
    return parallel_observation_processing(observations, correction_func, config)
end

"""
    determine_bias_index(obs::T, n_coeffs::Int) where T

Determine appropriate bias coefficient index for observation.
"""
function determine_bias_index(obs::T, n_coeffs::Int) where T
    # Simple hash-based index assignment
    # In practice, this would use observation type, location, instrument, etc.
    if hasfield(T, :id)
        return mod(hash(getfield(obs, :id)), n_coeffs) + 1
    elseif hasfield(T, :location)
        loc = getfield(obs, :location)
        return mod(hash((loc.lat, loc.lon)), n_coeffs) + 1
    else
        return 1
    end
end

"""
    apply_bias_correction(obs::T, correction::Float64) where T

Apply bias correction to a single observation.
"""
function apply_bias_correction(obs::T, correction::Float64) where T
    if hasfield(T, :value)
        corrected_value = getfield(obs, :value) + correction
        
        # Return corrected observation
        if obs isa NamedTuple
            return merge(obs, (value=corrected_value,))
        else
            # For other types, implement appropriate update
            return obs
        end
    end
    
    return obs
end

"""
    parallel_quality_control(observations::Vector{T}, qc_func::Function,
                            config::ParallelConfig) where T

Parallel quality control with aggregated statistics.
"""
function parallel_quality_control(observations::Vector{T}, qc_func::Function,
                                 config::ParallelConfig) where T
    @info "Applying parallel quality control" n_obs=length(observations)
    
    # Parallel QC processing
    qc_results = parallel_observation_processing(observations, qc_func, config)
    
    # Aggregate QC statistics
    passed = count(result -> result isa NamedTuple && get(result, :qc_passed, false), qc_results)
    failed = length(qc_results) - passed
    
    @info "Quality control completed" passed=passed failed=failed
    
    return qc_results
end

"""
    parallel_spatial_processing(observations::Vector{T}, spatial_func::Function,
                               config::ParallelConfig) where T

Parallel spatial processing with domain decomposition.
"""
function parallel_spatial_processing(observations::Vector{T}, spatial_func::Function,
                                    config::ParallelConfig) where T
    @info "Starting parallel spatial processing" n_obs=length(observations)
    
    if !hasfield(T, :location)
        throw(ArgumentError("Observations must have location field for spatial processing"))
    end
    
    # Domain decomposition by latitude/longitude
    spatial_domains = create_spatial_domains(observations, config.max_threads)
    
    # Process each domain in parallel
    all_results = Vector{Any}()
    
    @threads for domain in spatial_domains
        domain_results = spatial_func(domain.observations)
        append!(all_results, domain_results)
    end
    
    return all_results
end

"""
    create_spatial_domains(observations::Vector{T}, n_domains::Int) where T

Create spatial domains for parallel processing.
"""
function create_spatial_domains(observations::Vector{T}, n_domains::Int) where T
    # Extract locations
    locations = [getfield(obs, :location) for obs in observations]
    lats = [loc.lat for loc in locations]
    lons = [loc.lon for loc in locations]
    
    # Create latitude-based domains
    lat_min, lat_max = extrema(lats)
    lat_step = (lat_max - lat_min) / n_domains
    
    domains = []
    for i in 1:n_domains
        domain_lat_min = lat_min + (i - 1) * lat_step
        domain_lat_max = lat_min + i * lat_step
        
        # Find observations in this domain
        domain_indices = findall(lat -> domain_lat_min <= lat <= domain_lat_max, lats)
        domain_obs = observations[domain_indices]
        
        push!(domains, (id = i, observations = domain_obs, 
                       bounds = (lat_min = domain_lat_min, lat_max = domain_lat_max)))
    end
    
    return domains
end

"""
    distributed_analysis(data_chunks::Vector{T}, analysis_func::Function,
                        config::ParallelConfig) where T

Distributed analysis for large-scale data processing.
"""
function distributed_analysis(data_chunks::Vector{T}, analysis_func::Function,
                             config::ParallelConfig) where T
    @info "Starting distributed analysis" n_chunks=length(data_chunks)
    
    if !config.enable_distributed || nprocs() == 1
        @warn "Distributed processing not available, falling back to local processing"
        return [analysis_func(chunk) for chunk in data_chunks]
    end
    
    # Create distributed tasks
    tasks = [DistributedTask{T}(i, chunk, :analysis_func, 
                               Dict(:config => config)) 
             for (i, chunk) in enumerate(data_chunks)]
    
    # Execute tasks on workers
    futures = Future[]
    worker_pids = workers()
    
    for (i, task) in enumerate(tasks)
        worker = worker_pids[mod(i - 1, length(worker_pids)) + 1]
        future = @spawnat worker execute_analysis_task(task.data, analysis_func)
        push!(futures, future)
    end
    
    # Collect results
    results = Vector{Any}(undef, length(futures))
    for (i, future) in enumerate(futures)
        try
            results[i] = fetch(future)
        catch e
            if config.fault_tolerance
                @warn "Distributed task $i failed" exception=e
                results[i] = nothing
            else
                rethrow(e)
            end
        end
    end
    
    return results
end

"""
    execute_analysis_task(data::T, analysis_func::Function) where T

Execute analysis task on worker process.
"""
function execute_analysis_task(data::T, analysis_func::Function) where T
    return analysis_func(data)
end

"""
    setup_distributed_workers(config::DistributedConfig)

Set up distributed worker processes.
"""
function setup_distributed_workers(config::DistributedConfig)
    if isempty(config.worker_hosts)
        # Local workers only
        n_workers = max(1, Sys.CPU_THREADS - 1)
        addprocs(n_workers)
        @info "Added local workers" n_workers=n_workers
    else
        # Remote workers
        for host in config.worker_hosts
            addprocs([(host, config.processes_per_host)])
        end
        @info "Added remote workers" hosts=config.worker_hosts processes_per_host=config.processes_per_host
    end
    
    # Load modules on all workers
    @everywhere using GSICoreAnalysis
    
    return workers()
end

"""
    cleanup_distributed_workers()

Clean up distributed worker processes.
"""
function cleanup_distributed_workers()
    if nprocs() > 1
        rmprocs(workers())
        @info "Removed distributed workers"
    end
end

# GPU acceleration functions (conditional compilation)
if HAS_CUDA
    """
        gpu_accelerated_operations(data::Matrix{Float64}, config::ParallelConfig)

    GPU-accelerated matrix operations using CUDA.
    """
    function gpu_accelerated_operations(data::Matrix{Float64}, config::ParallelConfig)
        @info "Starting GPU-accelerated operations" size=size(data)
        
        # Transfer data to GPU
        gpu_data = CuArray(data)
        
        # Perform GPU operations
        result = gpu_matrix_computation(gpu_data)
        
        # Transfer back to CPU
        cpu_result = Array(result)
        
        @info "GPU operations completed"
        return cpu_result
    end
    
    """
        gpu_matrix_computation(gpu_data::CuMatrix{Float64})

    Perform matrix computations on GPU.
    """
    function gpu_matrix_computation(gpu_data::CuMatrix{Float64})
        # Example operations - matrix transpose and multiplication
        transposed = gpu_data'
        result = gpu_data * transposed
        return result
    end
else
    # Fallback implementations when CUDA is not available
    function gpu_accelerated_operations(data::Matrix{Float64}, config::ParallelConfig)
        @warn "CUDA not available, falling back to CPU operations"
        return cpu_matrix_operations(data)
    end
    
    function cpu_matrix_operations(data::Matrix{Float64})
        transposed = data'
        result = data * transposed
        return result
    end
end

"""
    fault_tolerant_processing(data::Vector{T}, process_func::Function,
                             config::ParallelConfig) where T

Process data with fault tolerance and automatic recovery.
"""
function fault_tolerant_processing(data::Vector{T}, process_func::Function,
                                  config::ParallelConfig) where T
    max_retries = 3
    results = Vector{Any}(undef, length(data))
    failed_indices = Set{Int}()
    
    @info "Starting fault-tolerant processing" n_items=length(data)
    
    for attempt in 1:max_retries
        current_failed = Set{Int}()
        
        # Process items (first attempt processes all, subsequent attempts only failed ones)
        items_to_process = attempt == 1 ? 1:length(data) : collect(failed_indices)
        
        @threads for i in items_to_process
            if !haskey(results, i) || results[i] === nothing
                try
                    results[i] = process_func(data[i])
                catch e
                    @warn "Processing failed for item $i on attempt $attempt" exception=e
                    push!(current_failed, i)
                    results[i] = nothing
                end
            end
        end
        
        failed_indices = current_failed
        
        if isempty(failed_indices)
            @info "All processing completed successfully on attempt $attempt"
            break
        else
            @info "Attempt $attempt completed" successful=count(!isnothing, results) failed=length(failed_indices)
        end
    end
    
    if !isempty(failed_indices)
        @warn "Some items failed after all retry attempts" n_failed=length(failed_indices)
    end
    
    return results
end

"""
    parallel_io_operations(file_paths::Vector{String}, io_func::Function,
                          config::ParallelConfig)

Perform parallel I/O operations with load balancing.
"""
function parallel_io_operations(file_paths::Vector{String}, io_func::Function,
                               config::ParallelConfig)
    @info "Starting parallel I/O operations" n_files=length(file_paths)
    
    if !config.enable_parallel_io
        return [io_func(path) for path in file_paths]
    end
    
    # Limit I/O parallelism to avoid filesystem overload
    io_threads = min(config.max_threads, 8)  # Reasonable I/O thread limit
    
    results = Vector{Any}(undef, length(file_paths))
    
    @threads for tid in 1:io_threads
        thread_start = ((tid - 1) * length(file_paths)) ÷ io_threads + 1
        thread_end = (tid * length(file_paths)) ÷ io_threads
        
        for i in thread_start:thread_end
            try
                results[i] = io_func(file_paths[i])
            catch e
                if config.fault_tolerance
                    @warn "I/O operation failed for file $(file_paths[i])" exception=e
                    results[i] = nothing
                else
                    rethrow(e)
                end
            end
        end
    end
    
    @info "Parallel I/O completed" successful=count(!isnothing, results)
    
    return results
end

"""
    load_balanced_processing(data::Vector{T}, process_func::Function,
                            load_balancer::LoadBalancer) where T

Process data with dynamic load balancing.
"""
function load_balanced_processing(data::Vector{T}, process_func::Function,
                                 load_balancer::LoadBalancer) where T
    results = Vector{Any}(undef, length(data))
    
    if load_balancer.strategy == :work_stealing
        return work_stealing_processing(data, process_func, load_balancer)
    else
        return dynamic_load_balancing(data, process_func, load_balancer)
    end
end

"""
    dynamic_load_balancing(data::Vector{T}, process_func::Function,
                          load_balancer::LoadBalancer) where T

Dynamic load balancing based on worker load.
"""
function dynamic_load_balancing(data::Vector{T}, process_func::Function,
                               load_balancer::LoadBalancer) where T
    results = Vector{Any}(undef, length(data))
    completed = Threads.Atomic{Int}(0)
    
    @threads for tid in 1:nthreads()
        while true
            # Get next item index atomically
            idx = Threads.atomic_add!(completed, 1) + 1
            
            if idx > length(data)
                break
            end
            
            # Process item
            results[idx] = process_func(data[idx])
            
            # Update load balancer
            worker_idx = findfirst(==(tid), load_balancer.workers)
            if worker_idx !== nothing
                load_balancer.load_counts[worker_idx] += 1
            end
        end
    end
    
    return results
end

"""
    work_stealing_processing(data::Vector{T}, process_func::Function,
                           load_balancer::LoadBalancer) where T

Work-stealing load balancing implementation.
"""
function work_stealing_processing(data::Vector{T}, process_func::Function,
                                 load_balancer::LoadBalancer) where T
    n_workers = length(load_balancer.workers)
    chunk_size = max(1, length(data) ÷ (n_workers * 4))  # Smaller chunks for work stealing
    
    # Create work queues for each worker
    work_queues = [Channel{Int}(chunk_size * 2) for _ in 1:n_workers]
    
    # Initialize work queues
    for (i, queue) in enumerate(work_queues)
        start_idx = (i - 1) * chunk_size + 1
        end_idx = min(i * chunk_size, length(data))
        
        for idx in start_idx:end_idx
            put!(queue, idx)
        end
    end
    
    results = Vector{Any}(undef, length(data))
    
    @threads for tid in 1:n_workers
        worker_queue = work_queues[tid]
        
        while true
            # Try to get work from own queue
            if isready(worker_queue)
                idx = take!(worker_queue)
                results[idx] = process_func(data[idx])
            else
                # Try to steal work from other queues
                stolen = false
                for other_queue in work_queues
                    if other_queue !== worker_queue && isready(other_queue)
                        try
                            idx = take!(other_queue)
                            results[idx] = process_func(data[idx])
                            stolen = true
                            break
                        catch
                            # Queue became empty, continue
                        end
                    end
                end
                
                if !stolen
                    # No work available, exit
                    break
                end
            end
        end
    end
    
    return results
end

end # module ParallelProcessing