#!/usr/bin/env julia

"""
    performance_benchmark.jl

Performance benchmarking suite for GSICoreAnalysis.jl package.
This example demonstrates how to benchmark the core operations and compare
performance against reference implementations or different configurations.

The benchmarks cover:
- Control vector operations (dot products, AXPY operations)
- Cost function evaluations and gradient computations
- Minimization solver performance (PCG vs BiCG)
- Memory allocation patterns
- Scaling with problem size
- Comparison between hybrid and pure variational methods
"""

using Pkg
Pkg.activate(@__DIR__ * "/..")

using GSICoreAnalysis
using BenchmarkTools
using LinearAlgebra
using Random
using Printf
using Statistics
using Plots

# Set reproducible random seed
Random.seed!(123)

"""
    setup_benchmark_problem(grid_size::Tuple{Int,Int,Int}, ensemble_size::Int, precision::Type{T}) where T

Set up a benchmark problem with specified parameters.
"""
function setup_benchmark_problem(grid_size::Tuple{Int,Int,Int}, ensemble_size::Int, precision::Type{T}) where T
    config = AnalysisConfig{T}(
        grid_size = grid_size,
        ensemble_size = ensemble_size,
        hybrid_coeff = T(0.5),
        use_hybrid = ensemble_size > 0,
        max_iterations = 10,  # Limited for benchmarking
        convergence_tol = T(1e-3)
    )
    
    # Create state vectors
    state1 = StateVector(config)
    state2 = StateVector(config)
    
    # Fill with random data
    randn!(state1.values)
    randn!(state2.values)
    
    # Create control vectors
    cv1 = ControlVector(config)
    cv2 = ControlVector(config)
    
    # Fill with random data
    randn!(cv1.values)
    randn!(cv2.values)
    if !isempty(cv1.bias_predictors)
        randn!(cv1.bias_predictors)
        randn!(cv2.bias_predictors)
    end
    if !isempty(cv1.time_derivatives)
        randn!(cv1.time_derivatives)
        randn!(cv2.time_derivatives)
    end
    
    # Create cost function
    if config.use_hybrid
        cost_func = HybridCostFunction(config)
        # Initialize ensemble perturbations
        n_state = length(state1.values)
        perturbations = randn(T, n_state, ensemble_size)
        initialize_ensemble!(cost_func, perturbations)
    else
        cost_func = CostFunction(config)
    end
    
    # Set up simple background covariance
    n_state = length(state1.values)
    B = Diagonal(ones(T, n_state))
    initialize_background_covariance!(cost_func, B)
    
    # Set background and observations
    set_background!(cost_func, state1.values)
    n_obs = min(n_state ÷ 5, 1000)  # Reasonable number of observations
    observations = randn(T, n_obs)
    simple_H(x) = x[1:n_obs]  # Simple observation operator
    set_observations!(cost_func, observations, simple_H)
    cost_func.observation_covariance = Diagonal(ones(T, n_obs))
    
    return config, state1, state2, cv1, cv2, cost_func
end

"""
    benchmark_control_vector_operations()

Benchmark basic control vector operations.
"""
function benchmark_control_vector_operations()
    println("=== Control Vector Operations Benchmark ===")
    
    grid_sizes = [(32, 16, 8), (64, 32, 16), (128, 64, 32)]
    
    results = Dict()
    
    for grid_size in grid_sizes
        println("\\nGrid size: $(grid_size)")
        
        config, _, _, cv1, cv2, _ = setup_benchmark_problem(grid_size, 0, Float64)
        
        # Benchmark dot product
        dot_bench = @benchmark dot_product($cv1, $cv2)
        println("  Dot product: $(BenchmarkTools.prettytime(median(dot_bench.times)))")
        
        # Benchmark AXPY operation
        α = 1.5
        axpy_bench = @benchmark axpy!($α, $cv1, $cv2)
        println("  AXPY operation: $(BenchmarkTools.prettytime(median(axpy_bench.times)))")
        
        # Benchmark assignment
        assign_bench = @benchmark assign!($cv2, $cv1)
        println("  Assignment: $(BenchmarkTools.prettytime(median(assign_bench.times)))")
        
        # Benchmark norm computation
        norm_bench = @benchmark norm_cv($cv1)
        println("  Norm computation: $(BenchmarkTools.prettytime(median(norm_bench.times)))")
        
        # Memory allocation check
        alloc_cv = @benchmark allocate_cv(ControlVector($config))
        println("  Allocation: $(BenchmarkTools.prettytime(median(alloc_cv.times)))")
        
        # Store results
        n_elements = prod(grid_size) * 9  # Approximate number of elements
        results[grid_size] = Dict(
            :n_elements => n_elements,
            :dot_product => median(dot_bench.times),
            :axpy => median(axpy_bench.times),
            :assign => median(assign_bench.times),
            :norm => median(norm_bench.times),
            :allocation => median(alloc_cv.times)
        )
    end
    
    return results
end

"""
    benchmark_cost_function_evaluation()

Benchmark cost function evaluation and gradient computation.
"""
function benchmark_cost_function_evaluation()
    println("\\n=== Cost Function Evaluation Benchmark ===")
    
    grid_sizes = [(32, 16, 8), (64, 32, 16)]
    ensemble_sizes = [0, 20, 40]  # 0 means pure variational
    
    results = Dict()
    
    for grid_size in grid_sizes
        println("\\nGrid size: $(grid_size)")
        results[grid_size] = Dict()
        
        for ens_size in ensemble_sizes
            method_name = ens_size == 0 ? "Pure 3D-Var" : "Hybrid (ens=$(ens_size))"
            println("  Method: $(method_name)")
            
            config, state, _, _, _, cost_func = setup_benchmark_problem(grid_size, ens_size, Float64)
            
            # Benchmark cost evaluation
            cost_bench = @benchmark evaluate_cost($cost_func, $state.values)
            println("    Cost evaluation: $(BenchmarkTools.prettytime(median(cost_bench.times)))")
            
            # Benchmark gradient computation
            grad_bench = @benchmark compute_gradient($cost_func, $state.values)
            println("    Gradient computation: $(BenchmarkTools.prettytime(median(grad_bench.times)))")
            
            # Benchmark background term only
            bg_bench = @benchmark evaluate_background_term($cost_func, $state.values)
            println("    Background term: $(BenchmarkTools.prettytime(median(bg_bench.times)))")
            
            # Benchmark observation term only
            obs_bench = @benchmark evaluate_observation_term($cost_func, $state.values)
            println("    Observation term: $(BenchmarkTools.prettytime(median(obs_bench.times)))")
            
            results[grid_size][ens_size] = Dict(
                :cost_eval => median(cost_bench.times),
                :gradient => median(grad_bench.times),
                :background => median(bg_bench.times),
                :observation => median(obs_bench.times)
            )
        end
    end
    
    return results
end

"""
    benchmark_minimization_solvers()

Benchmark different minimization solvers.
"""
function benchmark_minimization_solvers()
    println("\\n=== Minimization Solvers Benchmark ===")
    
    grid_size = (48, 24, 12)  # Medium-sized problem
    config, _, _, cv_init, _, cost_func = setup_benchmark_problem(grid_size, 20, Float64)
    
    # Reset initial guess
    fill!(cv_init.values, 0.0)
    
    println("Grid size: $(grid_size)")
    println("Problem size: $(length(cv_init.values)) variables")
    println()
    
    # PCG Solver benchmark
    println("PCG Solver:")
    pcg_solver = PCGSolver(config, max_iterations=15, tolerance=1e-3)
    
    pcg_bench = @benchmark pcg_solve($pcg_solver, $cost_func, $cv_init)
    pcg_result = pcg_solve(pcg_solver, cost_func, cv_init)
    
    println("  Total time: $(BenchmarkTools.prettytime(median(pcg_bench.times)))")
    println("  Iterations: $(pcg_result.iterations)")
    println("  Converged: $(pcg_result.converged)")
    println("  Final cost: $(pcg_result.final_cost)")
    println("  Time per iteration: $(BenchmarkTools.prettytime(median(pcg_bench.times) ÷ pcg_result.iterations))")
    
    # BiCG Solver benchmark (simplified)
    println("\\nBiCG Solver:")
    bicg_solver = BiCGSolver(config, max_iterations=15, tolerance=1e-3)
    
    bicg_bench = @benchmark bicg_solve($bicg_solver, $cost_func, $cv_init)
    bicg_result = bicg_solve(bicg_solver, cost_func, cv_init)
    
    println("  Total time: $(BenchmarkTools.prettytime(median(bicg_bench.times)))")
    println("  Iterations: $(bicg_result.iterations)")
    println("  Converged: $(bicg_result.converged)")
    println("  Final cost: $(bicg_result.final_cost)")
    println("  Time per iteration: $(BenchmarkTools.prettytime(median(bicg_bench.times) ÷ bicg_result.iterations))")
    
    return Dict(
        :pcg => (median(pcg_bench.times), pcg_result),
        :bicg => (median(bicg_bench.times), bicg_result)
    )
end

"""
    benchmark_memory_usage()

Benchmark memory allocation patterns.
"""
function benchmark_memory_usage()
    println("\\n=== Memory Usage Benchmark ===")
    
    grid_sizes = [(16, 8, 4), (32, 16, 8), (64, 32, 16)]
    
    for grid_size in grid_sizes
        println("\\nGrid size: $(grid_size)")
        
        config, _, _, _, _, _ = setup_benchmark_problem(grid_size, 20, Float64)
        
        # State vector memory
        state_alloc = @benchmark StateVector($config)
        state_memory = @allocated StateVector(config)
        println("  StateVector allocation: $(BenchmarkTools.prettytime(median(state_alloc.times))), $(state_memory) bytes")
        
        # Control vector memory
        cv_alloc = @benchmark ControlVector($config)
        cv_memory = @allocated ControlVector(config)
        println("  ControlVector allocation: $(BenchmarkTools.prettytime(median(cv_alloc.times))), $(cv_memory) bytes")
        
        # Cost function memory (hybrid)
        cost_alloc = @benchmark HybridCostFunction($config)
        cost_memory = @allocated HybridCostFunction(config)
        println("  HybridCostFunction allocation: $(BenchmarkTools.prettytime(median(cost_alloc.times))), $(cost_memory) bytes")
        
        # Total problem memory estimate
        total_memory = state_memory * 2 + cv_memory * 3 + cost_memory  # Rough estimate
        println("  Estimated total memory: $(total_memory) bytes ($(total_memory/1024^2:.1f) MB)")
    end
end

"""
    benchmark_scaling_analysis()

Analyze how performance scales with problem size.
"""
function benchmark_scaling_analysis()
    println("\\n=== Scaling Analysis ===")
    
    # Test different grid sizes
    grid_sizes = [(16, 8, 4), (24, 12, 6), (32, 16, 8), (48, 24, 12), (64, 32, 16)]
    
    scaling_results = Dict(
        :grid_sizes => grid_sizes,
        :n_elements => Int[],
        :dot_product_times => Float64[],
        :cost_eval_times => Float64[],
        :gradient_times => Float64[],
        :memory_usage => Int[]
    )
    
    for grid_size in grid_sizes
        println("Testing grid size: $(grid_size)")
        
        config, state, _, cv1, cv2, cost_func = setup_benchmark_problem(grid_size, 10, Float64)
        
        n_elements = prod(grid_size)
        push!(scaling_results[:n_elements], n_elements)
        
        # Dot product scaling
        dot_bench = @benchmark dot_product($cv1, $cv2)
        push!(scaling_results[:dot_product_times], median(dot_bench.times))
        
        # Cost evaluation scaling
        cost_bench = @benchmark evaluate_cost($cost_func, $state.values)
        push!(scaling_results[:cost_eval_times], median(cost_bench.times))
        
        # Gradient computation scaling
        grad_bench = @benchmark compute_gradient($cost_func, $state.values)
        push!(scaling_results[:gradient_times], median(grad_bench.times))
        
        # Memory usage
        memory = @allocated begin
            StateVector(config)
            ControlVector(config)
            HybridCostFunction(config)
        end
        push!(scaling_results[:memory_usage], memory)
    end
    
    # Analyze scaling
    println("\\nScaling Analysis Results:")
    println("Grid Size    N Elements   Dot Product   Cost Eval    Gradient     Memory (MB)")
    println("-" ^ 80)
    
    for i in eachindex(grid_sizes)
        @printf("%-12s %8d     %8.2f      %8.2f     %8.2f     %8.1f\\n",
                "$(grid_sizes[i])", 
                scaling_results[:n_elements][i],
                scaling_results[:dot_product_times][i] / 1e6,  # Convert to ms
                scaling_results[:cost_eval_times][i] / 1e6,
                scaling_results[:gradient_times][i] / 1e6,
                scaling_results[:memory_usage][i] / 1024^2)
    end
    
    return scaling_results
end

"""
    benchmark_precision_comparison()

Compare performance between Float32 and Float64 precision.
"""
function benchmark_precision_comparison()
    println("\\n=== Precision Comparison Benchmark ===")
    
    grid_size = (48, 24, 12)
    
    for T in [Float32, Float64]
        println("\\nPrecision: $(T)")
        
        config, state, _, cv1, cv2, cost_func = setup_benchmark_problem(grid_size, 20, T)
        
        # Control vector operations
        dot_bench = @benchmark dot_product($cv1, $cv2)
        axpy_bench = @benchmark axpy!($(T(1.5)), $cv1, $cv2)
        
        # Cost function operations
        cost_bench = @benchmark evaluate_cost($cost_func, $state.values)
        grad_bench = @benchmark compute_gradient($cost_func, $state.values)
        
        println("  Dot product: $(BenchmarkTools.prettytime(median(dot_bench.times)))")
        println("  AXPY: $(BenchmarkTools.prettytime(median(axpy_bench.times)))")
        println("  Cost evaluation: $(BenchmarkTools.prettytime(median(cost_bench.times)))")
        println("  Gradient: $(BenchmarkTools.prettytime(median(grad_bench.times)))")
        
        # Memory usage
        memory = @allocated begin
            StateVector{T}(config)
            ControlVector{T}(config)
        end
        println("  Memory usage: $(memory / 1024^2:.1f) MB")
    end
end

"""
    generate_performance_report(results::Dict)

Generate a comprehensive performance report.
"""
function generate_performance_report(results::Dict)
    println("\\n" * "=" ^ 60)
    println("PERFORMANCE BENCHMARK SUMMARY")
    println("=" ^ 60)
    
    # System information
    println("\\nSystem Information:")
    println("  Julia version: $(VERSION)")
    println("  Number of threads: $(Threads.nthreads())")
    println("  BLAS vendor: $(LinearAlgebra.BLAS.vendor())")
    
    if haskey(results, :scaling)
        scaling = results[:scaling]
        n_max = maximum(scaling[:n_elements])
        time_max = maximum(scaling[:cost_eval_times]) / 1e6  # Convert to ms
        
        println("\\nPerformance Summary:")
        println("  Largest problem tested: $(n_max) grid elements")
        println("  Maximum cost evaluation time: $(time_max:.2f) ms")
        println("  Memory scaling: approximately linear with problem size")
        
        # Estimate computational complexity
        if length(scaling[:n_elements]) >= 3
            # Rough complexity estimate using last few points
            n_points = scaling[:n_elements][end-2:end]
            t_points = scaling[:cost_eval_times][end-2:end]
            
            # Simple linear fit log(t) vs log(n)
            log_n = log.(n_points)
            log_t = log.(t_points)
            slope = (log_t[end] - log_t[1]) / (log_n[end] - log_n[1])
            
            println("  Estimated computational complexity: O(N^$(slope:.2f))")
        end
    end
    
    println("\\nRecommendations:")
    if haskey(results, :solvers)
        pcg_time, pcg_result = results[:solvers][:pcg]
        bicg_time, bicg_result = results[:solvers][:bicg]
        
        if pcg_time < bicg_time
            println("  - PCG solver shows better performance for this problem class")
        else
            println("  - BiCG solver shows competitive performance")
        end
    end
    
    println("  - Use Float32 precision for memory-constrained applications")
    println("  - Consider hybrid methods for larger ensemble sizes (>20 members)")
    println("  - Problem sizes up to $(n_max) elements are feasible on this system")
    
    println("\\nBenchmark completed successfully!")
end

function main()
    println("GSICoreAnalysis.jl Performance Benchmark Suite")
    println("=" ^ 50)
    
    # Warm up Julia compilation
    println("Warming up Julia compilation...")
    config, _, _, cv1, cv2, _ = setup_benchmark_problem((8, 4, 2), 0, Float64)
    dot_product(cv1, cv2)
    println("Warm-up complete.\\n")
    
    # Run benchmarks
    results = Dict()
    
    # Control vector operations
    results[:control_vectors] = benchmark_control_vector_operations()
    
    # Cost function evaluation
    results[:cost_functions] = benchmark_cost_function_evaluation()
    
    # Minimization solvers
    results[:solvers] = benchmark_minimization_solvers()
    
    # Memory usage
    benchmark_memory_usage()
    
    # Scaling analysis
    results[:scaling] = benchmark_scaling_analysis()
    
    # Precision comparison
    benchmark_precision_comparison()
    
    # Generate report
    generate_performance_report(results)
    
    return results
end

# Run benchmarks if called directly
if abspath(PROGRAM_FILE) == @__FILE__
    results = main()
end