"""
    Performance Regression Tests

Automated tests to detect performance regressions in GSICoreAnalysis.jl.
These tests compare performance against established baselines and flag
any significant degradation in performance.

# Usage

```julia
# Run regression tests
julia benchmarks/performance_regression_tests.jl

# Set new baseline
julia benchmarks/performance_regression_tests.jl --set-baseline

# Custom thresholds
julia benchmarks/performance_regression_tests.jl --time-threshold 0.1 --memory-threshold 0.2
```
"""

using BenchmarkTools
using Test
using JSON3
using Statistics
using Printf
using Dates
using ArgParse

# Add package to path
push!(LOAD_PATH, dirname(@__DIR__))
using GSICoreAnalysis
using GSICoreAnalysis.Performance.Profiling

"""
    RegressionTest

Represents a performance regression test.
"""
struct RegressionTest
    name::String
    test_function::Function
    setup_function::Union{Nothing, Function}
    teardown_function::Union{Nothing, Function}
    baseline_key::String
    
    function RegressionTest(name::String, test_function::Function;
        setup_function = nothing,
        teardown_function = nothing,
        baseline_key = nothing
    )
        key = baseline_key === nothing ? name : baseline_key
        new(name, test_function, setup_function, teardown_function, key)
    end
end

"""
    RegressionTestResult

Results from a regression test.
"""
struct RegressionTestResult
    test_name::String
    current_time::Float64
    baseline_time::Float64
    current_memory::Int64
    baseline_memory::Int64
    time_ratio::Float64
    memory_ratio::Float64
    passed::Bool
    message::String
end

"""
    RegressionTestSuite

Collection of regression tests with configuration.
"""
struct RegressionTestSuite
    tests::Vector{RegressionTest}
    time_threshold::Float64  # Maximum allowed slowdown (e.g., 0.1 = 10% slower)
    memory_threshold::Float64  # Maximum allowed memory increase
    baseline_file::String
    
    function RegressionTestSuite(;
        time_threshold = 0.15,  # 15% slowdown threshold
        memory_threshold = 0.20,  # 20% memory increase threshold
        baseline_file = "benchmark_baseline.json"
    )
        tests = RegressionTest[]
        new(tests, time_threshold, memory_threshold, baseline_file)
    end
end

"""
    create_regression_test_suite()

Create the standard regression test suite.
"""
function create_regression_test_suite()
    suite = RegressionTestSuite()
    
    # Basic data structure tests
    push!(suite.tests, RegressionTest(
        "vector_creation_1000",
        () -> [randn() for _ in 1:1000]
    ))
    
    push!(suite.tests, RegressionTest(
        "vector_filtering_10000", 
        () -> begin
            data = [randn() for _ in 1:10000]
            filter(x -> x > 0, data)
        end
    ))
    
    # Observation processing tests
    push!(suite.tests, RegressionTest(
        "observation_creation_5000",
        () -> begin
            observations = []
            for i in 1:5000
                obs = (
                    id = i,
                    value = 285.0 + 10*randn(),
                    error = 0.5 + 0.3*rand(),
                    location = (lat = 40.0 + 20*randn(), lon = -75.0 + 30*randn()),
                    quality = 0.3 + 0.7*rand()
                )
                push!(observations, obs)
            end
            observations
        end
    ))
    
    # Quality control test
    push!(suite.tests, RegressionTest(
        "quality_control_10000",
        () -> begin
            observations = [(quality = rand(), value = 200 + 100*rand()) for _ in 1:10000]
            filter(obs -> obs.quality > 0.7 && obs.value > 250, observations)
        end
    ))
    
    # Spatial processing test
    push!(suite.tests, RegressionTest(
        "spatial_filtering_5000",
        () -> begin
            observations = [(
                location = (lat = 40 + 10*randn(), lon = -75 + 10*randn()),
                value = 285 + 10*randn()
            ) for _ in 1:5000]
            
            target = (lat = 40.0, lon = -75.0)
            filter(obs -> begin
                lat_diff = obs.location.lat - target.lat
                lon_diff = obs.location.lon - target.lon
                sqrt(lat_diff^2 + lon_diff^2) < 5.0
            end, observations)
        end
    ))
    
    # Matrix operations tests
    push!(suite.tests, RegressionTest(
        "matrix_multiplication_100x100",
        () -> begin
            A = randn(100, 100)
            B = randn(100, 100)
            A * B
        end
    ))
    
    push!(suite.tests, RegressionTest(
        "matrix_inversion_50x50", 
        () -> begin
            A = randn(50, 50)
            inv(A + I)  # Add identity for numerical stability
        end
    ))
    
    # Parallel processing test (if threads available)
    if Threads.nthreads() > 1
        push!(suite.tests, RegressionTest(
            "parallel_sum_50000",
            () -> begin
                data = randn(50000)
                partial_sums = zeros(Threads.nthreads())
                
                Threads.@threads for tid in 1:Threads.nthreads()
                    start_idx = ((tid - 1) * length(data)) ÷ Threads.nthreads() + 1
                    end_idx = (tid * length(data)) ÷ Threads.nthreads()
                    partial_sums[tid] = sum(@view data[start_idx:end_idx])
                end
                
                sum(partial_sums)
            end
        ))
    end
    
    # Memory-intensive tests
    push!(suite.tests, RegressionTest(
        "memory_allocation_large",
        () -> begin
            arrays = [Vector{Float64}(undef, 1000) for _ in 1:100]
            for arr in arrays
                arr .= randn(1000)
            end
            sum(sum(arr) for arr in arrays)
        end
    ))
    
    return suite
end

"""
    run_single_regression_test(test::RegressionTest, baseline::Dict{String, Any}, 
                              thresholds::NamedTuple)

Run a single regression test.
"""
function run_single_regression_test(test::RegressionTest, baseline::Dict{String, Any}, 
                                   thresholds::NamedTuple)
    @info "Running regression test: $(test.name)"
    
    # Setup if needed
    if test.setup_function !== nothing
        test.setup_function()
    end
    
    try
        # Warm up
        test.test_function()
        
        # Run benchmark
        result = @benchmark $(test.test_function)()
        
        # Extract metrics
        current_time = median(result.times) / 1e9  # Convert to seconds
        current_memory = median(result.memory)
        
        # Get baseline metrics
        baseline_data = get(baseline, test.baseline_key, nothing)
        
        if baseline_data === nothing
            @warn "No baseline data found for test: $(test.name)"
            return RegressionTestResult(
                test.name, current_time, 0.0, current_memory, 0,
                Inf, Inf, false, "No baseline data available"
            )
        end
        
        baseline_time = baseline_data["median_time_ns"] / 1e9
        baseline_memory = baseline_data["median_memory_bytes"]
        
        # Calculate ratios
        time_ratio = current_time / baseline_time
        memory_ratio = current_memory / baseline_memory
        
        # Check thresholds
        time_passed = (time_ratio - 1.0) <= thresholds.time_threshold
        memory_passed = (memory_ratio - 1.0) <= thresholds.memory_threshold
        passed = time_passed && memory_passed
        
        message = if passed
            "PASSED"
        else
            issues = []
            if !time_passed
                push!(issues, @sprintf("Time: %.1f%% slower", (time_ratio - 1.0) * 100))
            end
            if !memory_passed  
                push!(issues, @sprintf("Memory: %.1f%% more", (memory_ratio - 1.0) * 100))
            end
            "FAILED - " * join(issues, ", ")
        end
        
        return RegressionTestResult(
            test.name, current_time, baseline_time,
            current_memory, baseline_memory,
            time_ratio, memory_ratio, passed, message
        )
        
    finally
        # Teardown if needed
        if test.teardown_function !== nothing
            test.teardown_function()
        end
    end
end

"""
    run_regression_tests(suite::RegressionTestSuite)

Run all regression tests in the suite.
"""
function run_regression_tests(suite::RegressionTestSuite)
    @info "Starting performance regression tests..." n_tests=length(suite.tests)
    
    # Load baseline data
    if !isfile(suite.baseline_file)
        error("Baseline file not found: $(suite.baseline_file). Run with --set-baseline first.")
    end
    
    baseline_data = JSON3.read(suite.baseline_file, Dict{String, Any})
    thresholds = (time_threshold = suite.time_threshold, memory_threshold = suite.memory_threshold)
    
    # Run all tests
    results = RegressionTestResult[]
    
    for test in suite.tests
        try
            result = run_single_regression_test(test, baseline_data, thresholds)
            push!(results, result)
        catch e
            @error "Regression test failed with exception" test=test.name exception=e
            push!(results, RegressionTestResult(
                test.name, 0.0, 0.0, 0, 0, Inf, Inf, false, "Exception: $e"
            ))
        end
    end
    
    # Generate report
    generate_regression_report(results, suite)
    
    return results
end

"""
    generate_regression_report(results::Vector{RegressionTestResult}, suite::RegressionTestSuite)

Generate regression test report.
"""
function generate_regression_report(results::Vector{RegressionTestResult}, suite::RegressionTestSuite)
    passed_tests = count(r -> r.passed, results)
    total_tests = length(results)
    
    println("\nPerformance Regression Test Report")
    println("==================================")
    println("Baseline file: $(suite.baseline_file)")
    println("Time threshold: $(suite.time_threshold * 100)%")
    println("Memory threshold: $(suite.memory_threshold * 100)%")
    println("Tests passed: $passed_tests/$total_tests")
    println()
    
    # Summary statistics
    if !isempty(results)
        valid_results = filter(r -> r.baseline_time > 0, results)
        if !isempty(valid_results)
            time_ratios = [r.time_ratio for r in valid_results]
            memory_ratios = [r.memory_ratio for r in valid_results]
            
            println("Performance Summary:")
            println("-------------------")
            println(@sprintf("Mean time ratio: %.3f", mean(time_ratios)))
            println(@sprintf("Median time ratio: %.3f", median(time_ratios)))
            println(@sprintf("Max time ratio: %.3f", maximum(time_ratios)))
            println(@sprintf("Mean memory ratio: %.3f", mean(memory_ratios)))
            println(@sprintf("Max memory ratio: %.3f", maximum(memory_ratios)))
            println()
        end
    end
    
    # Detailed results
    println("Detailed Results:")
    println("-----------------")
    
    for result in results
        status_color = result.passed ? "✓" : "✗"
        
        if result.baseline_time > 0
            time_change = (result.time_ratio - 1.0) * 100
            memory_change = (result.memory_ratio - 1.0) * 100
            
            println(@sprintf("%s %-30s | Time: %7.2f ms → %7.2f ms (%+5.1f%%) | Memory: %8.1f KB → %8.1f KB (%+5.1f%%) | %s",
                status_color, result.test_name,
                result.baseline_time * 1000, result.current_time * 1000, time_change,
                result.baseline_memory / 1024, result.current_memory / 1024, memory_change,
                result.message
            ))
        else
            println(@sprintf("%s %-30s | %s", status_color, result.test_name, result.message))
        end
    end
    
    # Failed tests summary
    failed_tests = filter(r -> !r.passed, results)
    if !isempty(failed_tests)
        println()
        println("Failed Tests Summary:")
        println("--------------------")
        for result in failed_tests
            println("  $(result.test_name): $(result.message)")
        end
    end
    
    println()
    
    if passed_tests == total_tests
        @info "All regression tests passed!" 
    else
        @warn "Some regression tests failed" failed_count=(total_tests - passed_tests)
    end
end

"""
    set_baseline(suite::RegressionTestSuite)

Run all tests and set as new baseline.
"""
function set_baseline(suite::RegressionTestSuite)
    @info "Setting new performance baseline..." n_tests=length(suite.tests)
    
    baseline_data = Dict{String, Any}()
    
    for test in suite.tests
        @info "Running baseline test: $(test.name)"
        
        # Setup if needed
        if test.setup_function !== nothing
            test.setup_function()
        end
        
        try
            # Warm up
            test.test_function()
            
            # Run benchmark
            result = @benchmark $(test.test_function)()
            
            # Store baseline data
            baseline_data[test.baseline_key] = Dict(
                "median_time_ns" => median(result.times),
                "mean_time_ns" => mean(result.times),
                "min_time_ns" => minimum(result.times),
                "max_time_ns" => maximum(result.times),
                "median_memory_bytes" => median(result.memory),
                "allocs" => result.allocs,
                "samples" => length(result.times),
                "timestamp" => string(now())
            )
            
        finally
            # Teardown if needed
            if test.teardown_function !== nothing
                test.teardown_function()
            end
        end
    end
    
    # Add metadata
    baseline_data["_metadata"] = Dict(
        "julia_version" => string(VERSION),
        "cpu_threads" => Sys.CPU_THREADS,
        "available_threads" => Threads.nthreads(),
        "total_memory_gb" => round(Sys.total_memory() / 1024^3, digits=2),
        "creation_timestamp" => string(now()),
        "test_count" => length(suite.tests)
    )
    
    # Save baseline
    open(suite.baseline_file, "w") do io
        JSON3.pretty(io, baseline_data)
    end
    
    @info "Baseline saved" file=suite.baseline_file n_tests=length(suite.tests)
end

"""
    parse_command_line_regression()

Parse command line arguments for regression tests.
"""
function parse_command_line_regression()
    s = ArgParseSettings()
    
    @add_arg_table! s begin
        "--set-baseline"
            help = "Set new baseline instead of running tests"
            action = :store_true
        "--baseline-file"
            help = "Baseline file path"
            arg_type = String
            default = "benchmark_baseline.json"
        "--time-threshold"
            help = "Time regression threshold (fraction)"
            arg_type = Float64
            default = 0.15
        "--memory-threshold"
            help = "Memory regression threshold (fraction)"
            arg_type = Float64
            default = 0.20
    end
    
    return parse_args(s)
end

"""
    main_regression()

Main function for regression tests.
"""
function main_regression()
    args = parse_command_line_regression()
    
    # Create test suite
    suite = RegressionTestSuite(
        time_threshold = args["time-threshold"],
        memory_threshold = args["memory-threshold"],
        baseline_file = args["baseline-file"]
    )
    
    # Populate with standard tests
    test_suite = create_regression_test_suite()
    suite.tests = test_suite.tests
    
    if args["set-baseline"]
        set_baseline(suite)
    else
        results = run_regression_tests(suite)
        
        # Exit with error code if tests failed
        failed_count = count(r -> !r.passed, results)
        if failed_count > 0
            exit(1)
        end
    end
end

# Run main function if script is executed directly
if abspath(PROGRAM_FILE) == @__FILE__
    main_regression()
end