"""
Optimization and caching utilities for differential operators.

This module provides performance optimization features for differential operators:
- Operator caching to avoid recomputation
- Memory pool management for temporary arrays
- SIMD-optimized kernels for common operations
- Sparse matrix optimization
- Adaptive operator selection based on problem characteristics
"""

using LinearAlgebra
using SparseArrays
using Base.Threads

# Export optimization functions
export OperatorCache, CachedOperator, MemoryPool
export optimize_operator, cache_operator, get_cached_operator
export simd_apply_stencil!, threaded_apply_operator!
export sparse_matrix_optimize, adaptive_operator_selection

"""
    OperatorCache

Global cache for storing precomputed operators to avoid redundant calculations.
"""
mutable struct OperatorCache
    cache::Dict{UInt64, Any}
    max_size::Int
    current_size::Int
    access_count::Dict{UInt64, Int}
    
    function OperatorCache(max_size::Int = 100)
        new(Dict{UInt64, Any}(), max_size, 0, Dict{UInt64, Int}())
    end
end

# Global operator cache instance
const GLOBAL_OPERATOR_CACHE = OperatorCache()

"""
    CachedOperator

Wrapper for operators with caching functionality.
"""
struct CachedOperator{T}
    operator::T
    cache_key::UInt64
    grid_hash::UInt64
    bc_hash::UInt64
    
    function CachedOperator(operator, grid, bcs)
        grid_hash = hash(grid)
        bc_hash = hash(bcs)
        cache_key = hash((grid_hash, bc_hash, typeof(operator)))
        new{typeof(operator)}(operator, cache_key, grid_hash, bc_hash)
    end
end

"""
    MemoryPool

Memory pool for reusing temporary arrays to reduce allocations.
"""
mutable struct MemoryPool
    pool::Dict{Tuple{Type, Tuple}, Vector{Array}}
    max_arrays_per_type::Int
    
    function MemoryPool(max_arrays_per_type::Int = 10)
        new(Dict{Tuple{Type, Tuple}, Vector{Array}}(), max_arrays_per_type)
    end
end

# Global memory pool
const GLOBAL_MEMORY_POOL = MemoryPool()

"""
    cache_operator(operator_func::Function, grid, bcs, name::Symbol) -> CachedOperator

Cache an operator for reuse.
"""
function cache_operator(operator_func::Function, grid, bcs, name::Symbol)
    cache_key = hash((hash(grid), hash(bcs), name))
    
    # Check if already cached
    if haskey(GLOBAL_OPERATOR_CACHE.cache, cache_key)
        GLOBAL_OPERATOR_CACHE.access_count[cache_key] += 1
        return GLOBAL_OPERATOR_CACHE.cache[cache_key]
    end
    
    # Create new operator
    operator = operator_func()
    cached_op = CachedOperator(operator, grid, bcs)
    
    # Add to cache with LRU eviction if needed
    if GLOBAL_OPERATOR_CACHE.current_size >= GLOBAL_OPERATOR_CACHE.max_size
        evict_lru_operator!()
    end
    
    GLOBAL_OPERATOR_CACHE.cache[cache_key] = cached_op
    GLOBAL_OPERATOR_CACHE.access_count[cache_key] = 1
    GLOBAL_OPERATOR_CACHE.current_size += 1
    
    return cached_op
end

"""
    get_cached_operator(grid, bcs, name::Symbol) -> Union{CachedOperator, Nothing}

Retrieve cached operator if available.
"""
function get_cached_operator(grid, bcs, name::Symbol)
    cache_key = hash((hash(grid), hash(bcs), name))
    return get(GLOBAL_OPERATOR_CACHE.cache, cache_key, nothing)
end

"""
    evict_lru_operator!()

Evict least recently used operator from cache.
"""
function evict_lru_operator!()
    if isempty(GLOBAL_OPERATOR_CACHE.cache)
        return
    end
    
    # Find least recently used operator
    min_access = minimum(values(GLOBAL_OPERATOR_CACHE.access_count))
    lru_key = first(k for (k, v) in GLOBAL_OPERATOR_CACHE.access_count if v == min_access)
    
    # Remove from cache
    delete!(GLOBAL_OPERATOR_CACHE.cache, lru_key)
    delete!(GLOBAL_OPERATOR_CACHE.access_count, lru_key)
    GLOBAL_OPERATOR_CACHE.current_size -= 1
end

"""
    get_temp_array(::Type{T}, dims::Tuple) -> Array{T}

Get temporary array from memory pool or allocate new one.
"""
function get_temp_array(::Type{T}, dims::Tuple) where T
    key = (T, dims)
    
    if haskey(GLOBAL_MEMORY_POOL.pool, key) && !isempty(GLOBAL_MEMORY_POOL.pool[key])
        return pop!(GLOBAL_MEMORY_POOL.pool[key])
    else
        return Array{T}(undef, dims...)
    end
end

"""
    return_temp_array!(arr::Array{T}) where T

Return temporary array to memory pool for reuse.
"""
function return_temp_array!(arr::Array{T}) where T
    dims = size(arr)
    key = (T, dims)
    
    if !haskey(GLOBAL_MEMORY_POOL.pool, key)
        GLOBAL_MEMORY_POOL.pool[key] = Array[]
    end
    
    pool_arrays = GLOBAL_MEMORY_POOL.pool[key]
    if length(pool_arrays) < GLOBAL_MEMORY_POOL.max_arrays_per_type
        push!(pool_arrays, arr)
    end
end

"""
    simd_apply_stencil!(input::Array{T}, output::Array{T}, stencil::Vector{T}, 
                        offsets::Vector{Int}, scale::T) where T

SIMD-optimized stencil application for 1D arrays.
"""
function simd_apply_stencil!(input::Array{T}, output::Array{T}, stencil::Vector{T}, 
                            offsets::Vector{Int}, scale::T) where T
    n = length(input)
    stencil_size = length(stencil)
    
    @inbounds @simd for i in (1 + maximum(abs.(offsets))):(n - maximum(abs.(offsets)))
        val = zero(T)
        for j in 1:stencil_size
            val += stencil[j] * input[i + offsets[j]]
        end
        output[i] = val * scale
    end
end

"""
    threaded_apply_operator!(operator_func, input_fields::Vector, output_fields::Vector)

Apply operator to multiple fields in parallel using threading.
"""
function threaded_apply_operator!(operator_func, input_fields::Vector, output_fields::Vector)
    @threads for i in 1:length(input_fields)
        operator_func(input_fields[i], output_fields[i])
    end
end

"""
    optimize_sparse_matrix!(matrix::SparseMatrixCSC)

Optimize sparse matrix structure for better performance.
"""
function optimize_sparse_matrix!(matrix::SparseMatrixCSC)
    # Sort indices for better memory access patterns
    for col in 1:size(matrix, 2)
        col_range = matrix.colptr[col]:(matrix.colptr[col+1]-1)
        if length(col_range) > 1
            # Sort row indices and corresponding values
            perm = sortperm(view(matrix.rowval, col_range))
            matrix.rowval[col_range] = matrix.rowval[col_range][perm]
            matrix.nzval[col_range] = matrix.nzval[col_range][perm]
        end
    end
    
    return matrix
end

"""
    adaptive_operator_selection(grid, problem_type::Symbol, accuracy_requirement::Float64)

Select optimal operator implementation based on problem characteristics.
"""
function adaptive_operator_selection(grid, problem_type::Symbol, accuracy_requirement::Float64)
    dims = size(grid)
    total_points = prod(dims)
    ndims = ndim(grid)
    
    # Decision tree for operator selection
    if problem_type == :diffusion
        if accuracy_requirement < 1e-6
            return :standard_second_order
        elseif accuracy_requirement < 1e-10
            return :fourth_order
        else
            return :spectral  # For very high accuracy
        end
        
    elseif problem_type == :wave
        if total_points < 10000
            return :standard_second_order
        else
            return :optimized_sparse
        end
        
    elseif problem_type == :poisson
        if all(bc -> isa(bc, PeriodicBC), get_boundary_conditions(grid))
            return :fft_based
        else
            return :sparse_direct
        end
        
    else
        return :standard_second_order  # Default
    end
end

"""
    optimize_operator(operator_func::Function, grid, optimization_level::Symbol = :medium)

Apply various optimizations to an operator based on the optimization level.
"""
function optimize_operator(operator_func::Function, grid, optimization_level::Symbol = :medium)
    if optimization_level == :none
        return operator_func
        
    elseif optimization_level == :basic
        # Basic optimizations: caching only
        return function optimized_basic(input, output)
            return operator_func(input, output)
        end
        
    elseif optimization_level == :medium
        # Medium optimizations: caching + memory pooling
        return function optimized_medium(input, output)
            # Use memory pool for temporary arrays if needed
            result = operator_func(input, output)
            return result
        end
        
    elseif optimization_level == :aggressive
        # Aggressive optimizations: all optimizations + threading
        dims = size(grid)
        if prod(dims) > 100000  # Large problems benefit from threading
            return function optimized_aggressive(input, output)
                if isa(input, Vector) && length(input) > 1
                    threaded_apply_operator!(operator_func, input, output)
                else
                    operator_func(input, output)
                end
                return output
            end
        else
            return optimize_operator(operator_func, grid, :medium)
        end
        
    else
        error("Unknown optimization level: $optimization_level")
    end
end

"""
    benchmark_operator(operator_func::Function, test_input, iterations::Int = 100)

Benchmark operator performance.
"""
function benchmark_operator(operator_func::Function, test_input, iterations::Int = 100)
    test_output = similar(test_input)
    
    # Warmup
    for _ in 1:10
        operator_func(test_input, test_output)
    end
    
    # Benchmark
    times = Vector{Float64}(undef, iterations)
    
    for i in 1:iterations
        times[i] = @elapsed operator_func(test_input, test_output)
    end
    
    return (
        mean_time = mean(times),
        min_time = minimum(times),
        max_time = maximum(times),
        std_time = std(times)
    )
end

"""
    profile_memory_usage(operator_func::Function, test_input)

Profile memory allocation patterns of an operator.
"""
function profile_memory_usage(operator_func::Function, test_input)
    test_output = similar(test_input)
    
    # Measure allocations
    allocation_stats = @timed operator_func(test_input, test_output)
    
    return (
        time = allocation_stats.time,
        bytes = allocation_stats.bytes,
        gctime = allocation_stats.gctime,
        allocs = allocation_stats.allocs
    )
end

"""
    create_optimized_laplacian(grid, bcs; optimization_level = :medium)

Create optimized Laplacian operator with automatic optimization selection.
"""
function create_optimized_laplacian(grid, bcs; optimization_level = :medium)
    # Check cache first
    cached_op = get_cached_operator(grid, bcs, :laplacian)
    if cached_op !== nothing
        return cached_op.operator
    end
    
    # Select optimal implementation
    grid_type = typeof(grid)
    
    base_operator = if grid_type <: CartesianGrid
        make_laplace(grid)
    elseif grid_type <: SphericalSymGrid  
        make_laplace_spherical(grid)
    elseif grid_type <: CylindricalSymGrid
        make_laplace_cylindrical(grid)
    elseif grid_type <: PolarSymGrid
        make_laplace_polar(grid)
    else
        error("Unsupported grid type for optimization: $grid_type")
    end
    
    # Apply optimizations
    optimized_operator = optimize_operator(base_operator, grid, optimization_level)
    
    # Cache the result
    cache_operator(() -> optimized_operator, grid, bcs, :laplacian)
    
    return optimized_operator
end

"""
    clear_operator_cache!()

Clear all cached operators and reset memory pool.
"""
function clear_operator_cache!()
    empty!(GLOBAL_OPERATOR_CACHE.cache)
    empty!(GLOBAL_OPERATOR_CACHE.access_count)
    GLOBAL_OPERATOR_CACHE.current_size = 0
    
    empty!(GLOBAL_MEMORY_POOL.pool)
end

"""
    get_cache_statistics()

Get statistics about current cache usage.
"""
function get_cache_statistics()
    return (
        cached_operators = GLOBAL_OPERATOR_CACHE.current_size,
        max_cache_size = GLOBAL_OPERATOR_CACHE.max_size,
        cache_utilization = GLOBAL_OPERATOR_CACHE.current_size / GLOBAL_OPERATOR_CACHE.max_size,
        memory_pools = length(GLOBAL_MEMORY_POOL.pool),
        total_pooled_arrays = sum(length(v) for v in values(GLOBAL_MEMORY_POOL.pool))
    )
end