"""
Performance Monitoring and Memory Management

Provides comprehensive performance monitoring, memory usage tracking, and
benchmarking utilities for the linear algebra components of the lid-driven
cavity solver.

# Key Features

- Memory usage monitoring during matrix assembly and solving
- Comprehensive benchmarking of different solver configurations
- Performance profiling with detailed timing breakdowns
- Memory optimization suggestions
- Comparative analysis tools
- Integration with system resource monitoring
"""

"""
    MemoryTracker

Tracks memory usage throughout solver execution.

# Fields
- `initial_memory::Int`: Memory usage at start (bytes)
- `peak_memory::Int`: Peak memory usage during execution (bytes)  
- `current_memory::Int`: Current memory usage (bytes)
- `memory_samples::Vector{Tuple{Float64, Int}}`: Time-stamped memory samples
- `gc_count::Int`: Number of garbage collection events
- `start_time::Float64`: Monitoring start time
"""
mutable struct MemoryTracker
    initial_memory::Int
    peak_memory::Int
    current_memory::Int
    memory_samples::Vector{Tuple{Float64, Int}}
    gc_count::Int
    start_time::Float64
    
    function MemoryTracker()
        gc_stats = GC.gc_num()
        initial_mem = get_memory_usage()
        
        new(initial_mem, initial_mem, initial_mem,
            [(0.0, initial_mem)], gc_stats.collect, time())
    end
end

"""
    get_memory_usage() -> Int

Get current memory usage in bytes.
"""
function get_memory_usage()
    return Sys.total_memory() - Sys.free_memory()
end

"""
    update_memory_tracker!(tracker::MemoryTracker)

Update memory tracker with current usage.
"""
function update_memory_tracker!(tracker::MemoryTracker)
    current_mem = get_memory_usage()
    current_time = time() - tracker.start_time
    
    tracker.current_memory = current_mem
    tracker.peak_memory = max(tracker.peak_memory, current_mem)
    
    push!(tracker.memory_samples, (current_time, current_mem))
    
    # Update GC count
    gc_stats = GC.gc_num()
    tracker.gc_count = gc_stats.collect
    
    return tracker
end

"""
    get_memory_summary(tracker::MemoryTracker) -> Dict

Get memory usage summary from tracker.
"""
function get_memory_summary(tracker::MemoryTracker)
    peak_mb = tracker.peak_memory / (1024^2)
    initial_mb = tracker.initial_memory / (1024^2)
    current_mb = tracker.current_memory / (1024^2)
    increase_mb = (tracker.peak_memory - tracker.initial_memory) / (1024^2)
    
    return Dict(
        "initial_memory_mb" => round(initial_mb, digits=1),
        "peak_memory_mb" => round(peak_mb, digits=1),
        "current_memory_mb" => round(current_mb, digits=1),
        "memory_increase_mb" => round(increase_mb, digits=1),
        "gc_collections" => tracker.gc_count,
        "sample_count" => length(tracker.memory_samples)
    )
end

"""
    SolverBenchmarkResult

Comprehensive benchmark result for a solver configuration.

# Fields
- `solver_type::String`: Type of solver used
- `problem_size::Int`: Problem size (number of interior nodes)
- `matrix_properties::Dict`: Matrix characteristics
- `solve_time::Float64`: Solution time (seconds)
- `assembly_time::Float64`: Matrix assembly time (seconds)
- `memory_usage::Dict`: Memory usage statistics
- `iterations::Int`: Number of iterations
- `residual_norm::Float64`: Final residual norm
- `converged::Bool`: Whether solution converged
- `additional_metrics::Dict`: Additional performance metrics
"""
struct SolverBenchmarkResult
    solver_type::String
    problem_size::Int
    matrix_properties::Dict{String, Any}
    solve_time::Float64
    assembly_time::Float64
    memory_usage::Dict{String, Any}
    iterations::Int
    residual_norm::Float64
    converged::Bool
    additional_metrics::Dict{String, Any}
end

"""
    comprehensive_solver_benchmark(domain::LShapeDomain{D}, h_values::Vector{Float64};
                                  solver_configs=nothing, 
                                  repetitions=3,
                                  warmup=true,
                                  memory_monitoring=true) where D

Run comprehensive benchmarks across different problem sizes and solver configurations.

# Arguments
- `domain::LShapeDomain{D}`: Base domain geometry
- `h_values::Vector{Float64}`: Grid spacings to test
- `solver_configs`: Solver configurations to test (default: auto-generated)
- `repetitions::Int`: Number of repetitions per configuration
- `warmup::Bool`: Whether to run warmup iterations
- `memory_monitoring::Bool`: Enable detailed memory monitoring

# Returns
- `Vector{SolverBenchmarkResult}`: Comprehensive benchmark results
"""
function comprehensive_solver_benchmark(domain::LShapeDomain{D}, h_values::Vector{Float64};
                                       solver_configs=nothing,
                                       repetitions=3,
                                       warmup=true,
                                       memory_monitoring=true) where D
    
    if solver_configs === nothing
        solver_configs = generate_default_solver_configs()
    end
    
    println("Comprehensive Solver Benchmark ($(D)D)")
    println("="^60)
    println("Grid spacings: $(h_values)")
    println("Solver configurations: $(length(solver_configs))")
    println("Repetitions per config: $repetitions")
    println()
    
    results = SolverBenchmarkResult[]
    
    for h in h_values
        # Determine problem size
        test_domain = scale_domain(domain, h)
        problem_size = sum(test_domain.interior_mask)
        
        println("Testing grid spacing h = $h (problem size: $problem_size)")
        println("-"^50)
        
        for (config_name, config_options) in solver_configs
            println("  Configuration: $config_name")
            
            config_results = []
            
            for rep in 1:repetitions
                try
                    # Create memory tracker if enabled
                    memory_tracker = memory_monitoring ? MemoryTracker() : nothing
                    
                    # Time matrix assembly
                    assembly_start = time()
                    if memory_tracker !== nothing
                        update_memory_tracker!(memory_tracker)
                    end
                    
                    A, id_map, interior_indices = assemble_poisson_matrix(test_domain, h)
                    assembly_time = time() - assembly_start
                    
                    if memory_tracker !== nothing
                        update_memory_tracker!(memory_tracker)
                    end
                    
                    # Compute matrix properties
                    matrix_props = compute_matrix_properties(A)
                    
                    # Create solver
                    solver = create_solver_from_config(A, id_map, interior_indices, config_options)
                    
                    # Generate test RHS
                    rhs_field, solution_field = generate_test_problem(test_domain, h)
                    
                    if memory_tracker !== nothing
                        update_memory_tracker!(memory_tracker)
                    end
                    
                    # Warmup run if requested
                    if warmup && rep == 1
                        _ = solve_poisson!(solver, copy(rhs_field), copy(solution_field))
                        GC.gc()  # Clean up after warmup
                    end
                    
                    # Benchmark run
                    solve_start = time()
                    result = solve_poisson!(solver, rhs_field, copy(solution_field))
                    solve_time = time() - solve_start
                    
                    if memory_tracker !== nothing
                        update_memory_tracker!(memory_tracker)
                        memory_summary = get_memory_summary(memory_tracker)
                    else
                        memory_summary = Dict("monitoring_disabled" => true)
                    end
                    
                    # Additional metrics
                    additional = Dict{String, Any}()
                    additional["repetition"] = rep
                    additional["h_value"] = h
                    additional["flops_estimate"] = estimate_flops(A, result.iterations)
                    
                    if haskey(result, :gcr_result)
                        additional["gcr_details"] = result.gcr_result
                    end
                    
                    # Create benchmark result
                    bench_result = SolverBenchmarkResult(
                        config_name,
                        problem_size,
                        Dict(string(k) => v for (k, v) in pairs(matrix_props)),
                        solve_time,
                        assembly_time,
                        memory_summary,
                        result.iterations,
                        result.residual_norm,
                        result.converged,
                        additional
                    )
                    
                    push!(config_results, bench_result)
                    
                    if rep == 1  # Print first result details
                        status = result.converged ? "✓" : "✗"
                        println("    $status $(result.iterations) iters, $(round(solve_time, digits=3))s, $(round(result.residual_norm, sigdigits=2)) residual")
                    end
                    
                catch e
                    @error "Benchmark failed for $config_name (rep $rep): $e"
                    
                    # Create failed result
                    failed_result = SolverBenchmarkResult(
                        config_name, problem_size, Dict("failed" => true),
                        Inf, Inf, Dict("error" => string(e)),
                        0, Inf, false, Dict("repetition" => rep, "error" => e)
                    )
                    push!(config_results, failed_result)
                end
            end
            
            append!(results, config_results)
        end
        
        println()
        GC.gc()  # Clean up between problem sizes
    end
    
    return results
end

"""
    generate_default_solver_configs() -> Vector{Pair{String, Dict}}

Generate a set of default solver configurations for benchmarking.
"""
function generate_default_solver_configs()
    configs = Pair{String, Dict{Symbol, Any}}[]
    
    # Basic configurations
    push!(configs, "GCR+Diagonal" => Dict(:solver => :julia, :preconditioner => :diagonal))
    push!(configs, "GCR+ILU" => Dict(:solver => :julia, :preconditioner => :ilu))
    push!(configs, "Fallback+CG" => Dict(:solver => :fallback))
    
    # Adaptive configuration
    push!(configs, "Adaptive" => Dict(:solver => :adaptive))
    
    # PETSc configurations (if available)
    if HAS_PETSC
        push!(configs, "PETSc+GAMG" => Dict(:solver => :petsc, :preconditioner => :gamg))
        push!(configs, "PETSc+ILU" => Dict(:solver => :petsc, :preconditioner => :ilu))
    end
    
    return configs
end

"""
    create_solver_from_config(matrix, id_map, interior_indices, config) -> PoissonSolver

Create a solver instance from configuration dictionary.
"""
function create_solver_from_config(matrix, id_map, interior_indices, config)
    solver_type = get(config, :solver, :julia)
    
    if solver_type == :adaptive
        return AdaptivePoissonSolver(matrix, id_map, interior_indices,
                                   backend = get(config, :backend, :julia),
                                   tol = get(config, :tol, 1e-8),
                                   maxiter = get(config, :maxiter, 1000))
    elseif solver_type == :petsc && HAS_PETSC
        # This would use the PETSc backend
        # For now, fall back to regular GCR
        return GCRPoissonSolver(matrix, id_map, interior_indices,
                              preconditioner = get(config, :preconditioner, :diagonal))
    elseif solver_type == :fallback
        return FallbackPoissonSolver(matrix, id_map, interior_indices)
    else
        return GCRPoissonSolver(matrix, id_map, interior_indices,
                              preconditioner = get(config, :preconditioner, :diagonal),
                              tol = get(config, :tol, 1e-8),
                              maxiter = get(config, :maxiter, 1000))
    end
end

"""
    generate_test_problem(domain::LShapeDomain{D}, h::Float64) where D

Generate a test right-hand side and initial solution for benchmarking.
"""
function generate_test_problem(domain::LShapeDomain{D}, h::Float64) where D
    if D == 2
        nx, ny = size(domain.fluid_mask)
        rhs_field = zeros(nx, ny)
        solution_field = zeros(nx, ny)
        
        # Use manufactured solution approach
        for j in 1:ny, i in 1:nx
            if domain.interior_mask[i, j]
                x_coord = -1.0 + (i-1) * h
                y_coord = -1.0 + (j-1) * h
                # Manufactured solution: u(x,y) = sin(π*x) * sin(π*y)
                # RHS: f(x,y) = 2π² * sin(π*x) * sin(π*y)
                rhs_field[i, j] = 2π^2 * sin(π*x_coord) * sin(π*y_coord)
            end
        end
    else  # 3D
        nx, ny, nz = size(domain.fluid_mask)
        rhs_field = zeros(nx, ny, nz)
        solution_field = zeros(nx, ny, nz)
        
        # Simple manufactured solution for 3D
        for k in 1:nz, j in 1:ny, i in 1:nx
            if domain.interior_mask[i, j, k]
                x_coord = -1.0 + (i-1) * h
                y_coord = -1.0 + (j-1) * h
                z_coord = -1.0 + (k-1) * h
                rhs_field[i, j, k] = 3π^2 * sin(π*x_coord) * sin(π*y_coord) * sin(π*z_coord)
            end
        end
    end
    
    return rhs_field, solution_field
end

"""
    scale_domain(domain::LShapeDomain{D}, h::Float64) -> LShapeDomain{D} where D

Create a scaled version of the domain for the given grid spacing.
Note: This is a simplified version - real implementation would handle domain scaling properly.
"""
function scale_domain(domain::LShapeDomain{D}, h::Float64) where D
    # For benchmarking, we'll just return the original domain
    # In a real implementation, this would create appropriately sized domains
    return domain
end

"""
    estimate_flops(matrix::SparseMatrixCSC, iterations::Int) -> Float64

Estimate floating-point operations for the solve.
"""
function estimate_flops(matrix::SparseMatrixCSC, iterations::Int)
    n = size(matrix, 1)
    nnz_A = nnz(matrix)
    
    # Rough FLOP estimates for iterative solver
    # Matrix-vector multiply: 2 * nnz per iteration
    # Vector operations: ~6n per iteration (depends on algorithm)
    flops_per_iter = 2 * nnz_A + 6 * n
    
    return Float64(flops_per_iter * iterations)
end

"""
    analyze_benchmark_results(results::Vector{SolverBenchmarkResult}) -> Dict

Analyze benchmark results and provide performance insights.
"""
function analyze_benchmark_results(results::Vector{SolverBenchmarkResult})
    analysis = Dict{String, Any}()
    
    # Group results by solver type
    solver_groups = Dict{String, Vector{SolverBenchmarkResult}}()
    for result in results
        if !haskey(solver_groups, result.solver_type)
            solver_groups[result.solver_type] = SolverBenchmarkResult[]
        end
        push!(solver_groups[result.solver_type], result)
    end
    
    analysis["solver_types"] = collect(keys(solver_groups))
    analysis["total_tests"] = length(results)
    analysis["successful_tests"] = count(r -> r.converged, results)
    
    # Performance statistics by solver
    perf_stats = Dict{String, Dict{String, Float64}}()
    for (solver_name, solver_results) in solver_groups
        converged_results = filter(r -> r.converged, solver_results)
        
        if !isempty(converged_results)
            solve_times = [r.solve_time for r in converged_results]
            iterations = [r.iterations for r in converged_results]
            
            perf_stats[solver_name] = Dict(
                "mean_solve_time" => mean(solve_times),
                "median_solve_time" => median(solve_times),
                "min_solve_time" => minimum(solve_times),
                "max_solve_time" => maximum(solve_times),
                "mean_iterations" => mean(iterations),
                "success_rate" => length(converged_results) / length(solver_results),
                "total_tests" => length(solver_results)
            )
        else
            perf_stats[solver_name] = Dict(
                "success_rate" => 0.0,
                "total_tests" => length(solver_results)
            )
        end
    end
    
    analysis["performance_statistics"] = perf_stats
    
    # Scaling analysis
    problem_sizes = unique([r.problem_size for r in results if r.converged])
    sort!(problem_sizes)
    
    scaling_analysis = Dict{String, Any}()
    for solver_name in keys(solver_groups)
        solver_results = filter(r -> r.converged && r.solver_type == solver_name, results)
        
        if length(solver_results) > 1
            # Simple scaling analysis
            sizes = [r.problem_size for r in solver_results]
            times = [r.solve_time for r in solver_results]
            
            if length(unique(sizes)) > 1
                # Fit simple power law: time ∝ size^α
                log_sizes = log.(sizes)
                log_times = log.(times)
                
                # Simple linear fit
                n_pts = length(log_sizes)
                α_estimate = (n_pts * sum(log_sizes .* log_times) - sum(log_sizes) * sum(log_times)) / 
                             (n_pts * sum(log_sizes .^ 2) - sum(log_sizes)^2)
                
                scaling_analysis[solver_name] = Dict(
                    "scaling_exponent" => α_estimate,
                    "data_points" => n_pts
                )
            end
        end
    end
    
    analysis["scaling_analysis"] = scaling_analysis
    
    return analysis
end

"""
    print_benchmark_summary(results::Vector{SolverBenchmarkResult})

Print a formatted summary of benchmark results.
"""
function print_benchmark_summary(results::Vector{SolverBenchmarkResult})
    analysis = analyze_benchmark_results(results)
    
    println("Benchmark Results Summary")
    println("="^60)
    println("Total tests: $(analysis["total_tests"])")
    println("Successful: $(analysis["successful_tests"])")
    println("Success rate: $(round(100 * analysis["successful_tests"] / analysis["total_tests"], digits=1))%")
    println()
    
    println("Performance by Solver Type:")
    println("-"^40)
    
    perf_stats = analysis["performance_statistics"]
    for (solver_name, stats) in perf_stats
        println("$solver_name:")
        if haskey(stats, "mean_solve_time")
            println("  Success rate: $(round(100 * stats["success_rate"], digits=1))%")
            println("  Mean solve time: $(round(stats["mean_solve_time"], digits=4))s")
            println("  Mean iterations: $(round(stats["mean_iterations"], digits=1))")
            println("  Time range: $(round(stats["min_solve_time"], digits=4)) - $(round(stats["max_solve_time"], digits=4))s")
        else
            println("  Success rate: 0% ($(stats["total_tests"]) tests failed)")
        end
        println()
    end
    
    if !isempty(analysis["scaling_analysis"])
        println("Scaling Analysis:")
        println("-"^40)
        for (solver_name, scaling) in analysis["scaling_analysis"]
            α = scaling["scaling_exponent"]
            println("$solver_name: time ∝ size^$(round(α, digits=2)) ($(scaling["data_points"]) points)")
        end
        println()
    end
end

"""
    export_benchmark_results(results::Vector{SolverBenchmarkResult}, filename::String)

Export benchmark results to a file for further analysis.
"""
function export_benchmark_results(results::Vector{SolverBenchmarkResult}, filename::String)
    # This would export results in a structured format (JSON, CSV, etc.)
    # For now, we'll just save a summary
    
    analysis = analyze_benchmark_results(results)
    
    open(filename, "w") do file
        println(file, "# Solver Benchmark Results")
        println(file, "# Generated: $(Dates.now())")
        println(file)
        
        println(file, "## Summary")
        println(file, "Total tests: $(analysis["total_tests"])")
        println(file, "Successful tests: $(analysis["successful_tests"])")
        println(file)
        
        println(file, "## Detailed Results")
        for (i, result) in enumerate(results)
            println(file, "Test $i:")
            println(file, "  Solver: $(result.solver_type)")
            println(file, "  Problem size: $(result.problem_size)")
            println(file, "  Converged: $(result.converged)")
            println(file, "  Solve time: $(result.solve_time)")
            println(file, "  Iterations: $(result.iterations)")
            println(file, "  Residual: $(result.residual_norm)")
            println(file)
        end
    end
    
    @info "Benchmark results exported to $filename"
end