# Performance Optimization and Scaling Analysis for NSEMSolver.jl
# Tools for analyzing parallel performance, identifying bottlenecks, and optimizing scaling

using Statistics
using Printf

"""
    ParallelPerformanceMetrics

Comprehensive performance metrics for parallel execution.
"""
struct ParallelPerformanceMetrics
    # Timing data
    total_time::Float64
    compute_time::Float64
    communication_time::Float64
    io_time::Float64
    load_balance_time::Float64
    
    # Scaling metrics
    strong_scaling_efficiency::Float64
    weak_scaling_efficiency::Float64
    parallel_efficiency::Float64
    
    # Load balancing
    load_imbalance::Float64
    work_distribution::Vector{Float64}
    
    # Communication analysis
    communication_volume::Float64      # Total bytes communicated
    message_count::Int                # Number of messages
    avg_message_size::Float64         # Average message size
    communication_overhead::Float64    # Communication time / total time
    
    # Memory usage
    peak_memory_per_process::Float64   # Peak memory usage per process (MB)
    memory_efficiency::Float64         # Fraction of useful memory
    
    # Computational metrics
    flops_per_second::Float64         # Floating point operations per second
    bandwidth_utilization::Float64    # Memory bandwidth utilization
    cache_efficiency::Float64         # Cache hit rate (estimated)
    
    # Solver-specific metrics
    linear_solver_time::Float64
    linear_solver_iterations::Int
    nonlinear_iterations::Int
    
    # Process and system information
    num_processes::Int
    processes_per_node::Int
    num_threads_per_process::Int
    processor_name::String
    
    # Problem size information
    local_dofs::Int
    global_dofs::Int
    elements_per_process::Int
    
    function ParallelPerformanceMetrics(;kwargs...)
        # Initialize with defaults and override with provided values
        defaults = Dict(
            :total_time => 0.0, :compute_time => 0.0, :communication_time => 0.0,
            :io_time => 0.0, :load_balance_time => 0.0, :strong_scaling_efficiency => 0.0,
            :weak_scaling_efficiency => 0.0, :parallel_efficiency => 0.0,
            :load_imbalance => 0.0, :work_distribution => Float64[],
            :communication_volume => 0.0, :message_count => 0, :avg_message_size => 0.0,
            :communication_overhead => 0.0, :peak_memory_per_process => 0.0,
            :memory_efficiency => 0.0, :flops_per_second => 0.0,
            :bandwidth_utilization => 0.0, :cache_efficiency => 0.0,
            :linear_solver_time => 0.0, :linear_solver_iterations => 0,
            :nonlinear_iterations => 0, :num_processes => 1, :processes_per_node => 1,
            :num_threads_per_process => 1, :processor_name => "unknown",
            :local_dofs => 0, :global_dofs => 0, :elements_per_process => 0
        )
        
        # Merge defaults with provided kwargs
        for (key, value) in kwargs
            if haskey(defaults, key)
                defaults[key] = value
            end
        end
        
        new(values(defaults)...)
    end
end

"""
    ScalingStudy

Manages scaling studies and performance analysis across multiple processor counts.
"""
mutable struct ScalingStudy
    study_name::String
    problem_description::String
    
    # Study parameters
    processor_counts::Vector{Int}
    problem_sizes::Vector{Int}
    
    # Results storage
    strong_scaling_data::Dict{Int, ParallelPerformanceMetrics}
    weak_scaling_data::Dict{Int, ParallelPerformanceMetrics}
    
    # Baseline reference
    serial_time::Float64
    baseline_metrics::Union{ParallelPerformanceMetrics, Nothing}
    
    # Analysis results
    strong_scaling_fit::Union{Function, Nothing}
    weak_scaling_fit::Union{Function, Nothing}
    optimal_processor_count::Int
    
    function ScalingStudy(name::String, description::String="")
        new(name, description, Int[], Int[], 
            Dict{Int, ParallelPerformanceMetrics}(),
            Dict{Int, ParallelPerformanceMetrics}(),
            0.0, nothing, nothing, nothing, 0)
    end
end

"""
    collect_performance_metrics(pdomain::ParallelDomain, timer::ParallelTimer,
                               comm_profiler::CommunicationProfiler,
                               solver_stats::Dict) -> ParallelPerformanceMetrics

Collect comprehensive performance metrics from a parallel run.
"""
function collect_performance_metrics(pdomain::ParallelDomain, timer::ParallelTimer,
                                   comm_profiler::CommunicationProfiler,
                                   solver_stats::Dict=Dict())
    
    mpi_ctx = pdomain.mpi_ctx
    
    # Gather timing data
    timing_summary = get_timing_summary(timer)
    comm_stats = get_communication_statistics(comm_profiler, mpi_ctx)
    
    # Extract timing information
    total_time = get(timing_summary, "total_time", Dict("global_mean" => 0.0))["global_mean"]
    compute_time = get(timing_summary, "compute", Dict("global_mean" => 0.0))["global_mean"]
    communication_time = get(comm_stats, "avg_comm_time", 0.0)
    io_time = get(timing_summary, "io", Dict("global_mean" => 0.0))["global_mean"]
    
    # Compute derived metrics
    communication_overhead = total_time > 0 ? communication_time / total_time : 0.0
    
    # Load balancing analysis
    work_distribution = pdomain.global_work_distribution
    load_imbalance = compute_load_imbalance(work_distribution)
    
    # Memory usage estimation (simplified)
    local_dofs = pdomain.local_domain.n_block^pdomain.local_domain.dim * pdomain.local_domain.n^pdomain.local_domain.dim
    global_dofs = all_reduce_scalar(local_dofs, +, mpi_ctx)
    
    # Estimate memory usage (simplified)
    bytes_per_dof = 8 * 4  # Assume 8 bytes per double, 4 variables (u,v,w,p)
    peak_memory_mb = (local_dofs * bytes_per_dof) / (1024^2)
    
    # Communication analysis
    communication_volume = get(comm_stats, "local_bytes_sent", 0) + get(comm_stats, "local_bytes_received", 0)
    message_count = get(comm_stats, "local_messages", 0)
    avg_message_size = message_count > 0 ? communication_volume / message_count : 0.0
    
    # Solver statistics
    linear_solver_time = get(solver_stats, "linear_solve_time", 0.0)
    linear_solver_iterations = get(solver_stats, "linear_iterations", 0)
    nonlinear_iterations = get(solver_stats, "nonlinear_iterations", 0)
    
    # System information
    processor_name = get_processor_name()
    
    return ParallelPerformanceMetrics(
        total_time=total_time,
        compute_time=compute_time,
        communication_time=communication_time,
        io_time=io_time,
        load_balance_time=0.0,  # Would be computed if dynamic load balancing is used
        strong_scaling_efficiency=0.0,  # Computed later in scaling analysis
        weak_scaling_efficiency=0.0,
        parallel_efficiency=0.0,
        load_imbalance=load_imbalance,
        work_distribution=work_distribution,
        communication_volume=communication_volume,
        message_count=message_count,
        avg_message_size=avg_message_size,
        communication_overhead=communication_overhead,
        peak_memory_per_process=peak_memory_mb,
        memory_efficiency=0.8,  # Estimated
        flops_per_second=0.0,   # Would require FLOP counting
        bandwidth_utilization=0.0,
        cache_efficiency=0.0,
        linear_solver_time=linear_solver_time,
        linear_solver_iterations=linear_solver_iterations,
        nonlinear_iterations=nonlinear_iterations,
        num_processes=mpi_ctx.size,
        processes_per_node=1,   # Would detect actual value
        num_threads_per_process=Threads.nthreads(),
        processor_name=processor_name,
        local_dofs=local_dofs,
        global_dofs=global_dofs,
        elements_per_process=pdomain.local_domain.n_block^pdomain.local_domain.dim
    )
end

"""
    add_strong_scaling_result!(study::ScalingStudy, num_procs::Int, 
                              metrics::ParallelPerformanceMetrics)

Add strong scaling result to study.
"""
function add_strong_scaling_result!(study::ScalingStudy, num_procs::Int,
                                  metrics::ParallelPerformanceMetrics)
    study.strong_scaling_data[num_procs] = metrics
    push!(study.processor_counts, num_procs)
    
    # Update scaling efficiency if we have baseline
    if study.serial_time > 0 && metrics.total_time > 0
        ideal_speedup = study.serial_time / metrics.total_time
        theoretical_speedup = num_procs
        efficiency = ideal_speedup / theoretical_speedup
        
        # Create updated metrics with efficiency
        updated_metrics = ParallelPerformanceMetrics(
            total_time=metrics.total_time,
            compute_time=metrics.compute_time,
            communication_time=metrics.communication_time,
            io_time=metrics.io_time,
            load_balance_time=metrics.load_balance_time,
            strong_scaling_efficiency=efficiency,
            weak_scaling_efficiency=metrics.weak_scaling_efficiency,
            parallel_efficiency=efficiency,
            load_imbalance=metrics.load_imbalance,
            work_distribution=metrics.work_distribution,
            communication_volume=metrics.communication_volume,
            message_count=metrics.message_count,
            avg_message_size=metrics.avg_message_size,
            communication_overhead=metrics.communication_overhead,
            peak_memory_per_process=metrics.peak_memory_per_process,
            memory_efficiency=metrics.memory_efficiency,
            flops_per_second=metrics.flops_per_second,
            bandwidth_utilization=metrics.bandwidth_utilization,
            cache_efficiency=metrics.cache_efficiency,
            linear_solver_time=metrics.linear_solver_time,
            linear_solver_iterations=metrics.linear_solver_iterations,
            nonlinear_iterations=metrics.nonlinear_iterations,
            num_processes=metrics.num_processes,
            processes_per_node=metrics.processes_per_node,
            num_threads_per_process=metrics.num_threads_per_process,
            processor_name=metrics.processor_name,
            local_dofs=metrics.local_dofs,
            global_dofs=metrics.global_dofs,
            elements_per_process=metrics.elements_per_process
        )
        
        study.strong_scaling_data[num_procs] = updated_metrics
    end
end

"""
    add_weak_scaling_result!(study::ScalingStudy, num_procs::Int, problem_size::Int,
                            metrics::ParallelPerformanceMetrics)

Add weak scaling result to study.
"""
function add_weak_scaling_result!(study::ScalingStudy, num_procs::Int, problem_size::Int,
                                 metrics::ParallelPerformanceMetrics)
    study.weak_scaling_data[num_procs] = metrics
    push!(study.processor_counts, num_procs)
    push!(study.problem_sizes, problem_size)
    
    # Weak scaling efficiency relative to single processor case
    if haskey(study.weak_scaling_data, 1) && metrics.total_time > 0
        baseline_time = study.weak_scaling_data[1].total_time
        efficiency = baseline_time / metrics.total_time
        
        # Update metrics with efficiency
        updated_metrics = ParallelPerformanceMetrics(
            total_time=metrics.total_time,
            compute_time=metrics.compute_time,
            communication_time=metrics.communication_time,
            io_time=metrics.io_time,
            load_balance_time=metrics.load_balance_time,
            strong_scaling_efficiency=metrics.strong_scaling_efficiency,
            weak_scaling_efficiency=efficiency,
            parallel_efficiency=efficiency,
            load_imbalance=metrics.load_imbalance,
            work_distribution=metrics.work_distribution,
            communication_volume=metrics.communication_volume,
            message_count=metrics.message_count,
            avg_message_size=metrics.avg_message_size,
            communication_overhead=metrics.communication_overhead,
            peak_memory_per_process=metrics.peak_memory_per_process,
            memory_efficiency=metrics.memory_efficiency,
            flops_per_second=metrics.flops_per_second,
            bandwidth_utilization=metrics.bandwidth_utilization,
            cache_efficiency=metrics.cache_efficiency,
            linear_solver_time=metrics.linear_solver_time,
            linear_solver_iterations=metrics.linear_solver_iterations,
            nonlinear_iterations=metrics.nonlinear_iterations,
            num_processes=metrics.num_processes,
            processes_per_node=metrics.processes_per_node,
            num_threads_per_process=metrics.num_threads_per_process,
            processor_name=metrics.processor_name,
            local_dofs=metrics.local_dofs,
            global_dofs=metrics.global_dofs,
            elements_per_process=metrics.elements_per_process
        )
        
        study.weak_scaling_data[num_procs] = updated_metrics
    end
end

"""
    analyze_scaling_bottlenecks(study::ScalingStudy) -> Dict

Analyze scaling bottlenecks and identify performance limitations.
"""
function analyze_scaling_bottlenecks(study::ScalingStudy)
    bottlenecks = Dict{String, Any}()
    
    if isempty(study.strong_scaling_data)
        return bottlenecks
    end
    
    # Analyze strong scaling trends
    proc_counts = sort(collect(keys(study.strong_scaling_data)))
    times = [study.strong_scaling_data[p].total_time for p in proc_counts]
    comm_times = [study.strong_scaling_data[p].communication_time for p in proc_counts]
    efficiencies = [study.strong_scaling_data[p].strong_scaling_efficiency for p in proc_counts]
    
    # Identify primary bottleneck
    if length(proc_counts) >= 3
        # Check if communication time is growing significantly
        comm_growth_rate = (comm_times[end] - comm_times[1]) / (proc_counts[end] - proc_counts[1])
        
        if comm_growth_rate > 0.01  # Communication time growing significantly
            bottlenecks["primary_bottleneck"] = "communication"
            bottlenecks["communication_growth_rate"] = comm_growth_rate
        end
        
        # Check for load imbalance
        load_imbalances = [study.strong_scaling_data[p].load_imbalance for p in proc_counts]
        avg_load_imbalance = mean(load_imbalances)
        
        if avg_load_imbalance > 0.2  # 20% load imbalance
            bottlenecks["load_imbalance"] = avg_load_imbalance
            if !haskey(bottlenecks, "primary_bottleneck") || avg_load_imbalance > 0.4
                bottlenecks["primary_bottleneck"] = "load_imbalance"
            end
        end
        
        # Check for memory bandwidth limitations
        memory_pressure = any(p -> study.strong_scaling_data[p].peak_memory_per_process > 1000, proc_counts)
        if memory_pressure
            bottlenecks["memory_pressure"] = true
        end
        
        # Analyze efficiency drop-off
        min_efficiency = minimum(efficiencies)
        if min_efficiency < 0.5  # Less than 50% efficiency
            bottlenecks["poor_efficiency"] = min_efficiency
            
            # Find where efficiency drops below 80%
            good_efficiency_procs = [p for (i, p) in enumerate(proc_counts) if efficiencies[i] >= 0.8]
            if !isempty(good_efficiency_procs)
                bottlenecks["efficiency_cutoff"] = maximum(good_efficiency_procs)
            end
        end
    end
    
    # Recommendations
    recommendations = String[]
    
    if get(bottlenecks, "primary_bottleneck", "") == "communication"
        push!(recommendations, "Consider reducing communication frequency or improving overlap")
        push!(recommendations, "Evaluate domain decomposition strategy for better locality")
    end
    
    if get(bottlenecks, "load_imbalance", 0.0) > 0.2
        push!(recommendations, "Implement dynamic load balancing")
        push!(recommendations, "Consider alternative domain decomposition strategies")
    end
    
    if get(bottlenecks, "memory_pressure", false)
        push!(recommendations, "Consider out-of-core algorithms or problem size reduction")
    end
    
    if get(bottlenecks, "poor_efficiency", 1.0) < 0.5
        push!(recommendations, "Increase problem size for better efficiency (weak scaling)")
        push!(recommendations, "Consider algorithmic improvements to reduce communication")
    end
    
    bottlenecks["recommendations"] = recommendations
    
    return bottlenecks
end

"""
    predict_optimal_processor_count(study::ScalingStudy) -> Int

Predict optimal processor count based on scaling data.
"""
function predict_optimal_processor_count(study::ScalingStudy)
    if length(study.strong_scaling_data) < 3
        return maximum(keys(study.strong_scaling_data))
    end
    
    proc_counts = sort(collect(keys(study.strong_scaling_data)))
    efficiencies = [study.strong_scaling_data[p].strong_scaling_efficiency for p in proc_counts]
    
    # Find processor count where efficiency drops below 80%
    good_efficiency_threshold = 0.8
    
    for (i, eff) in enumerate(efficiencies)
        if eff < good_efficiency_threshold
            return i > 1 ? proc_counts[i-1] : proc_counts[1]
        end
    end
    
    # If all efficiencies are good, return the highest tested count
    return proc_counts[end]
end

"""
    generate_performance_report(study::ScalingStudy, output_file::String="")

Generate comprehensive performance analysis report.
"""
function generate_performance_report(study::ScalingStudy, output_file::String="")
    
    function print_or_write(io, content)
        if output_file == ""
            println(content)
        else
            println(io, content)
        end
    end
    
    function do_report(io)
        print_or_write(io, "="^80)
        print_or_write(io, "PARALLEL PERFORMANCE ANALYSIS REPORT")
        print_or_write(io, "="^80)
        print_or_write(io, "Study: $(study.study_name)")
        print_or_write(io, "Description: $(study.problem_description)")
        print_or_write(io, "Generated: $(now())")
        print_or_write(io, "")
        
        # Strong scaling analysis
        if !isempty(study.strong_scaling_data)
            print_or_write(io, "STRONG SCALING ANALYSIS")
            print_or_write(io, "-"^40)
            
            proc_counts = sort(collect(keys(study.strong_scaling_data)))
            
            @printf(io, "%-12s %-12s %-12s %-12s %-12s %-12s\n",
                   "Processors", "Time (s)", "Speedup", "Efficiency", "Comm Time", "Load Imbal")
            print_or_write(io, "-"^80)
            
            baseline_time = study.serial_time > 0 ? study.serial_time : study.strong_scaling_data[proc_counts[1]].total_time
            
            for p in proc_counts
                metrics = study.strong_scaling_data[p]
                speedup = baseline_time / metrics.total_time
                efficiency = speedup / p
                
                @printf(io, "%-12d %-12.4f %-12.2f %-12.2f%% %-12.4f %-12.2f%%\n",
                       p, metrics.total_time, speedup, efficiency*100,
                       metrics.communication_time, metrics.load_imbalance*100)
            end
            
            print_or_write(io, "")
        end
        
        # Weak scaling analysis
        if !isempty(study.weak_scaling_data)
            print_or_write(io, "WEAK SCALING ANALYSIS")
            print_or_write(io, "-"^40)
            
            proc_counts = sort(collect(keys(study.weak_scaling_data)))
            
            @printf(io, "%-12s %-12s %-12s %-12s %-12s\n",
                   "Processors", "Problem Size", "Time (s)", "Efficiency", "Comm Overhead")
            print_or_write(io, "-"^60)
            
            for p in proc_counts
                metrics = study.weak_scaling_data[p]
                prob_size = length(study.problem_sizes) >= findfirst(==(p), proc_counts) ? 
                           study.problem_sizes[findfirst(==(p), proc_counts)] : 0
                
                @printf(io, "%-12d %-12d %-12.4f %-12.2f%% %-12.2f%%\n",
                       p, prob_size, metrics.total_time, 
                       metrics.weak_scaling_efficiency*100,
                       metrics.communication_overhead*100)
            end
            
            print_or_write(io, "")
        end
        
        # Bottleneck analysis
        bottlenecks = analyze_scaling_bottlenecks(study)
        if !isempty(bottlenecks)
            print_or_write(io, "BOTTLENECK ANALYSIS")
            print_or_write(io, "-"^40)
            
            if haskey(bottlenecks, "primary_bottleneck")
                print_or_write(io, "Primary Bottleneck: $(bottlenecks["primary_bottleneck"])")
            end
            
            if haskey(bottlenecks, "load_imbalance")
                @printf(io, "Average Load Imbalance: %.2f%%\n", bottlenecks["load_imbalance"]*100)
            end
            
            if haskey(bottlenecks, "efficiency_cutoff")
                print_or_write(io, "Efficiency drops below 80% after $(bottlenecks["efficiency_cutoff"]) processors")
            end
            
            if haskey(bottlenecks, "recommendations")
                print_or_write(io, "\nRecommendations:")
                for rec in bottlenecks["recommendations"]
                    print_or_write(io, "  • $rec")
                end
            end
            
            print_or_write(io, "")
        end
        
        # Optimal processor count
        optimal_procs = predict_optimal_processor_count(study)
        print_or_write(io, "PERFORMANCE RECOMMENDATIONS")
        print_or_write(io, "-"^40)
        print_or_write(io, "Recommended processor count: $optimal_procs")
        
        if optimal_procs in keys(study.strong_scaling_data)
            metrics = study.strong_scaling_data[optimal_procs]
            @printf(io, "Expected efficiency at recommended count: %.2f%%\n", 
                   metrics.strong_scaling_efficiency*100)
        end
        
        print_or_write(io, "")
        print_or_write(io, "="^80)
    end
    
    if output_file == ""
        do_report(stdout)
    else
        open(output_file, "w") do io
            do_report(io)
        end
        @info "Performance report written to $output_file"
    end
end

"""
    benchmark_parallel_scaling(solver_function::Function, problem_configs::Vector,
                              processor_counts::Vector{Int}; kwargs...) -> ScalingStudy

Run comprehensive parallel scaling benchmark.
"""
function benchmark_parallel_scaling(solver_function::Function, problem_configs::Vector,
                                   processor_counts::Vector{Int}; 
                                   study_name::String="Scaling Study",
                                   study_description::String="",
                                   num_runs::Int=3)
    
    study = ScalingStudy(study_name, study_description)
    
    # Run strong scaling study (fixed problem size)
    if length(problem_configs) >= 1
        base_config = problem_configs[1]
        @info "Running strong scaling study with $(length(processor_counts)) processor counts"
        
        for num_procs in processor_counts
            @info "  Testing with $num_procs processors..."
            
            # Run multiple times and take average
            run_times = Float64[]
            run_metrics = ParallelPerformanceMetrics[]
            
            for run in 1:num_runs
                # This would run the actual solver with specified processor count
                # For now, simulate the call
                start_time = wtime()
                
                # Simulate solver execution
                sleep(0.1)  # Placeholder for actual solver call
                
                end_time = wtime()
                elapsed_time = end_time - start_time
                push!(run_times, elapsed_time)
                
                # Create mock metrics (would be real metrics from solver)
                mock_metrics = ParallelPerformanceMetrics(
                    total_time=elapsed_time,
                    compute_time=elapsed_time * 0.8,
                    communication_time=elapsed_time * 0.15,
                    io_time=elapsed_time * 0.05,
                    num_processes=num_procs,
                    global_dofs=100000,  # Mock problem size
                    local_dofs=100000 ÷ num_procs
                )
                
                push!(run_metrics, mock_metrics)
            end
            
            # Average the metrics
            avg_time = mean(run_times)
            avg_metrics = run_metrics[1]  # Simplified - would properly average all metrics
            
            add_strong_scaling_result!(study, num_procs, avg_metrics)
        end
    end
    
    # Run weak scaling study if multiple problem sizes provided
    if length(problem_configs) > 1
        @info "Running weak scaling study with $(length(problem_configs)) problem sizes"
        
        for (i, config) in enumerate(problem_configs)
            if i <= length(processor_counts)
                num_procs = processor_counts[i]
                @info "  Testing problem size $i with $num_procs processors..."
                
                # Similar structure as strong scaling
                start_time = wtime()
                sleep(0.1)  # Placeholder
                end_time = wtime()
                elapsed_time = end_time - start_time
                
                mock_metrics = ParallelPerformanceMetrics(
                    total_time=elapsed_time,
                    compute_time=elapsed_time * 0.8,
                    communication_time=elapsed_time * 0.15,
                    io_time=elapsed_time * 0.05,
                    num_processes=num_procs,
                    global_dofs=50000 * i,  # Scaling problem size
                    local_dofs=50000
                )
                
                add_weak_scaling_result!(study, num_procs, 50000 * i, mock_metrics)
            end
        end
    end
    
    # Analyze results
    study.optimal_processor_count = predict_optimal_processor_count(study)
    
    return study
end

# Export public interface
export ParallelPerformanceMetrics, ScalingStudy
export collect_performance_metrics, add_strong_scaling_result!, add_weak_scaling_result!
export analyze_scaling_bottlenecks, predict_optimal_processor_count
export generate_performance_report, benchmark_parallel_scaling