#!/bin/bash
# ==============================================================================
# GSICoreAnalysis.jl - Test & Benchmark Runner Script
# ==============================================================================
# This script runs comprehensive tests and benchmarks for the GSI package

set -e

# Color codes
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m'

# Configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
LOG_DIR="$SCRIPT_DIR/logs/tests"
BENCHMARK_DIR="$SCRIPT_DIR/benchmarks"
TEST_DIR="$SCRIPT_DIR/test"

mkdir -p "$LOG_DIR"

configure_eccodes() {
    local overlay_dir="$SCRIPT_DIR/deps/eccodes_overrides"
    if [ ! -d "$overlay_dir" ]; then
        return
    fi

    local defs="$overlay_dir"
    if [ -n "${ECCODES_HOME:-}" ] && [ -d "${ECCODES_HOME}/share/eccodes/definitions" ]; then
        defs="$defs:${ECCODES_HOME}/share/eccodes/definitions"
    fi
    if [ -n "${ECCODES_DEFINITION_PATH:-}" ]; then
        defs="$defs:${ECCODES_DEFINITION_PATH}"
    fi
    export ECCODES_DEFINITION_PATH="$defs"

    local samples=""
    if [ -n "${ECCODES_HOME:-}" ] && [ -d "${ECCODES_HOME}/share/eccodes/samples" ]; then
        samples="${ECCODES_HOME}/share/eccodes/samples"
    fi
    if [ -n "${ECCODES_SAMPLES_PATH:-}" ]; then
        if [ -n "$samples" ]; then
            samples="$samples:${ECCODES_SAMPLES_PATH}"
        else
            samples="${ECCODES_SAMPLES_PATH}"
        fi
    fi
    if [ -n "$samples" ]; then
        export ECCODES_SAMPLES_PATH="$samples"
    fi
}

configure_eccodes

check_log_for_anomalies() {
    local log_file="$1"
    local scenario="$2"
    local failures=0

    if ! grep -q "Done. Assimilated" "$log_file"; then
        echo -e "${RED}❌ [$scenario] assimilation summary missing in ${log_file}${NC}"
        failures=1
    fi

    if grep -qi "Cannot find sub-centre" "$log_file"; then
        echo -e "${RED}❌ [$scenario] NESDIS sub-centre warning detected in ${log_file}${NC}"
        failures=1
    fi

    if grep -qi "^ERROR" "$log_file"; then
        echo -e "${RED}❌ [$scenario] error entries found in ${log_file}${NC}"
        failures=1
    fi

    return $failures
}

create_manifest() {
    local scenario="$1"
    local output_dir="$2"
    local manifest_path="$3"
    local analysis_json="${output_dir}/analysis_metrics.json"

    if [[ ! -f "$analysis_json" ]]; then
        echo -e "${RED}❌ [$scenario] analysis_metrics.json not found at ${analysis_json}${NC}"
        return 1
    fi

    if ! python - "$analysis_json" "$manifest_path" <<'PY'
import json, sys

analysis_path, manifest_path = sys.argv[1:3]
with open(analysis_path, "r", encoding="utf-8") as fh:
    data = json.load(fh)

manifest = {
    "analysis_mode": data.get("analysis_mode"),
    "scenario": data.get("scenario"),
    "wind": data.get("wind_observations", {}).get("counts", {}),
    "radiance": {
        key: value.get("sampled_observations")
        for key, value in data.get("radiance_samples", {}).items()
    },
    "analysis": {}
}

analysis_block = data.get("analysis", {})
for key in (
    "normalized_residual_rms_background",
    "normalized_residual_rms_analysis",
    "cost_reduction",
):
    if key in analysis_block:
        manifest["analysis"][key] = analysis_block[key]

with open(manifest_path, "w", encoding="utf-8") as out:
    json.dump(manifest, out, indent=2, sort_keys=True)
    out.write("\n")
PY
    then
        echo -e "${RED}❌ [$scenario] failed to create manifest ${manifest_path}${NC}"
        return 1
    fi

    echo "   • Manifest (${scenario}): ${manifest_path}"
    return 0
}

compare_manifest() {
    local scenario="$1"
    local manifest_path="$2"
    local baseline_path="$3"

    if [[ ! -f "$baseline_path" ]]; then
        echo -e "${YELLOW}⚠️ Baseline file missing (${baseline_path}); skipping comparison for ${scenario}${NC}"
        return 0
    fi

    if python - "$scenario" "$manifest_path" "$baseline_path" <<'PY'
import json, sys

scenario, manifest_path, baseline_path = sys.argv[1:4]
manifest = json.load(open(manifest_path, "r", encoding="utf-8"))
baseline = json.load(open(baseline_path, "r", encoding="utf-8"))

expected = baseline.get(scenario)
if expected is None:
    print(f"⚠️ Baseline does not contain scenario '{scenario}'; skipping comparison.")
    sys.exit(0)

errors = []

expected_mode = expected.get("analysis_mode")
if expected_mode and manifest.get("analysis_mode") != expected_mode:
    errors.append(
        f"analysis_mode expected {expected_mode} got {manifest.get('analysis_mode')}"
    )

def compare_section(section_name):
    spec = expected.get(section_name, {})
    actual = manifest.get(section_name, {})
    for key, entry in spec.items():
        if isinstance(entry, dict) and "value" in entry:
            value = actual.get(key)
            if value is None:
                errors.append(f"{section_name}.{key} missing")
                continue
            expected_value = entry["value"]
            tolerance = entry.get("tolerance", 0)
            try:
                value_f = float(value)
                expected_f = float(expected_value)
            except (TypeError, ValueError):
                if value != expected_value:
                    errors.append(f"{section_name}.{key} expected {expected_value} got {value}")
                continue
            if abs(value_f - expected_f) > tolerance:
                errors.append(
                    f"{section_name}.{key} expected {expected_f}±{tolerance} got {value_f}"
                )
        else:
            value = actual.get(key)
            if value != entry:
                errors.append(f"{section_name}.{key} expected {entry} got {value}")

compare_section("wind")
compare_section("radiance")
compare_section("analysis")

if errors:
    print("❌ Manifest comparison failed for", scenario)
    for line in errors:
        print("   -", line)
    sys.exit(1)
else:
    print(f"   • {scenario} manifest matches baseline tolerances")
PY
    then
        return 0
    else
        return 1
    fi
}

run_smoke_tests() {
    local repo_root
    repo_root="$(cd "$SCRIPT_DIR/../.." && pwd)"
    local timestamp
    timestamp="$(date +%Y%m%d_%H%M%S)"

    local tutorial_output="results/tutorial_regional_case_smoke_${timestamp}"
    local mwri_output="results/mwri_regional_case_smoke_${timestamp}"
    local tutorial_output_path="$repo_root/$tutorial_output"
    local mwri_output_path="$repo_root/$mwri_output"

    local tutorial_log="$LOG_DIR/${timestamp}_smoke_tutorial.log"
    local mwri_log="$LOG_DIR/${timestamp}_smoke_mwri.log"
    local manifest_dir="$SCRIPT_DIR/results/smoke_manifests"
    local baseline_file="$SCRIPT_DIR/results/baselines/regional_smoke_baseline.json"
    mkdir -p "$manifest_dir"

    local mwri_table="${MWRI_PREPBUFR_TABLE:-/home/docker/GSI/comGSIv3.7_EnKFv1.3-mwri/fix/prepobs_prep.bufrtable}"
    local mwri_thinning="${MWRI_OBS_THINNING_KM:-50}"
    if [[ ! -f "$mwri_table" ]]; then
        echo -e "${YELLOW}⚠️ MWRI PrepBUFR table not found at $mwri_table${NC}"
        echo -e "${YELLOW}   Set MWRI_PREPBUFR_TABLE to a valid prepobs_prep.bufrtable path before running smoke tests.${NC}"
        return 1
    fi

    local mwri_sample="${MWRI_SMOKE_RADIANCE_SAMPLE:-50}"
    local success=true
    local tutorial_prepbufr="${TUTORIAL_PREPBUFR_PATH:-}"
    local mwri_prepbufr="${MWRI_PREPBUFR_PATH:-}"

    local tutorial_args=("--scenario=tutorial" "--analysis-mode=diag" "--radiance-sample=0" "--output=$tutorial_output")
    if [[ -n "$tutorial_prepbufr" ]]; then
        tutorial_args+=("--prepbufr=$tutorial_prepbufr")
    fi

    local mwri_args=("--scenario=mwri" "--analysis-mode=nam" "--radiance-sample=$mwri_sample" "--bufr-table=$mwri_table" "--output=$mwri_output" "--obs-thinning-distance=$mwri_thinning")
    if [[ -n "$mwri_prepbufr" ]]; then
        mwri_args+=("--prepbufr=$mwri_prepbufr")
    fi

    (
        cd "$repo_root" || exit 1
        echo "Running tutorial smoke case → $tutorial_output" > "$tutorial_log"
        if ! timeout "$TIMEOUT" env JULIA_NUM_THREADS=1 OPENBLAS_NUM_THREADS=1 \
            julia --project=. scripts/julia/run_mwri_regional_case.jl \
            "${tutorial_args[@]}" >> "$tutorial_log" 2>&1; then
            success=false
        fi
    )

    (
        cd "$repo_root" || exit 1
        echo "Running MWRI smoke case → $mwri_output" > "$mwri_log"
        if ! timeout "$TIMEOUT" env JULIA_NUM_THREADS=1 OPENBLAS_NUM_THREADS=1 \
            julia --project=. scripts/julia/run_mwri_regional_case.jl \
            "${mwri_args[@]}" >> "$mwri_log" 2>&1; then
            success=false
        fi
    )

    if ! check_log_for_anomalies "$tutorial_log" "tutorial"; then
        success=false
    fi
    if ! check_log_for_anomalies "$mwri_log" "mwri"; then
        success=false
    fi

    local tutorial_manifest="$manifest_dir/${timestamp}_tutorial.json"
    local mwri_manifest="$manifest_dir/${timestamp}_mwri.json"

    if ! create_manifest "tutorial" "$tutorial_output_path" "$tutorial_manifest"; then
        success=false
    fi
    if ! create_manifest "mwri" "$mwri_output_path" "$mwri_manifest"; then
        success=false
    fi

    if ! compare_manifest "tutorial" "$tutorial_manifest" "$baseline_file"; then
        success=false
    fi
    if ! compare_manifest "mwri" "$mwri_manifest" "$baseline_file"; then
        success=false
    fi

    if [[ "$success" == true ]]; then
        echo -e "${GREEN}✅ Regional smoke runs completed${NC}"
        echo "   • Tutorial log : $tutorial_log"
        echo "   • MWRI log     : $mwri_log"
        return 0
    else
        echo -e "${RED}❌ Regional smoke runs failed${NC}"
        echo "   • Tutorial log : $tutorial_log"
        echo "   • MWRI log     : $mwri_log"
        return 1
    fi
}

print_header() {
    echo -e "${CYAN}================================================${NC}"
    echo -e "${CYAN}  GSICoreAnalysis.jl - Test & Benchmark Suite${NC}"
    echo -e "${CYAN}================================================${NC}"
    echo "🧪 Comprehensive Testing and Performance Analysis"
    echo ""
    echo "📁 Script Directory: $SCRIPT_DIR"
    echo "📝 Log Directory: $LOG_DIR"
    echo "🏃 Benchmark Directory: $BENCHMARK_DIR"
    echo "🔬 Test Directory: $TEST_DIR"
    echo ""
}

print_usage() {
    echo "Usage: $0 [OPTIONS] [TEST_TYPE]"
    echo ""
    echo "TEST TYPES:"
    echo "  all            - Run all tests and benchmarks"
    echo "  unit           - Unit tests only"
    echo "  integration    - Integration tests"
    echo "  performance    - Performance benchmarks"
    echo "  lorenz         - Lorenz-96 model tests"
    echo "  solvers        - Solver algorithm tests"
    echo "  advanced       - Advanced solver tests"
    echo "  basic          - Basic functionality tests"
    echo "  minimal        - Minimal test suite (fastest)"
    echo "  regression     - Regression testing"
    echo "  smoke          - Dual-case regional smoke runs (tutorial + MWRI)"
    echo ""
    echo "OPTIONS:"
    echo "  -h, --help     - Show this help message"
    echo "  -v, --verbose  - Verbose test output"
    echo "  -f, --fast     - Skip slow tests"
    echo "  -p, --parallel - Run tests in parallel"
    echo "  -c, --coverage - Generate coverage report"
    echo "  -b, --benchmark- Run performance benchmarks"
    echo "  -r, --report   - Generate detailed test report"
    echo "  -x, --xml      - Generate JUnit XML report"
    echo "  --fail-fast    - Stop on first failure"
    echo "  --timeout N    - Test timeout in seconds (default: 300)"
    echo ""
    echo "EXAMPLES:"
    echo "  $0                     # Run all tests"
    echo "  $0 unit                # Unit tests only"
    echo "  $0 -v -p performance   # Verbose parallel benchmarks"
    echo "  $0 --coverage all      # All tests with coverage"
    echo "  $0 -f minimal          # Fast minimal test suite"
    echo ""
}

run_julia_tests() {
    local test_type="$1"
    local timestamp=$(date +%Y%m%d_%H%M%S)
    local log_file="$LOG_DIR/${timestamp}_julia_${test_type}.log"

    if [[ "$test_type" == "smoke" ]]; then
        run_smoke_tests
        return $?
    fi

    echo -e "${BLUE}🧪 Running Julia $test_type tests...${NC}"
    echo "📝 Log: $log_file"

    local cmd="julia --project=."

    if [[ "$PARALLEL" == "true" ]]; then
        cmd="$cmd --threads=auto"
    fi

    local test_command=""
    local test_files=()

    case "$test_type" in
        "unit")
            test_files=(
                "test/test_basic_functionality.jl"
                "test/test_statevectors.jl"
                "test/test_costfunctions.jl"
                "test/test_observation_processing.jl"
            )
            ;;
        "integration")
            test_files=(
                "test/test_integration.jl"
                "test/test_integration_validation.jl"
                "test/test_drp4dvar_realistic_integration.jl"
            )
            ;;
        "performance")
            test_files=(
                "test/test_performance.jl"
                "test/test_performance_comprehensive.jl"
                "benchmarks/benchmark_suite.jl"
            )
            ;;
        "lorenz")
            test_files=(
                "test/test_lorenz96.jl"
            )
            ;;
        "solvers")
            test_files=(
                "test/test_basic_solvers.jl"
                "test/test_working_solvers.jl"
                "test/test_minimization.jl"
            )
            ;;
        "advanced")
            test_files=(
                "test/test_advanced_solvers.jl"
                "benchmarks/benchmark_advanced_solvers.jl"
            )
            ;;
        "basic")
            test_files=(
                "test/test_basic_functionality.jl"
                "test/test_syntax.jl"
                "test/test_if_syntax.jl"
            )
            ;;
        "minimal")
            test_files=(
                "scripts/simple_test.jl"
                "test/test_basic_functionality.jl"
            )
            ;;
        "regression")
            test_files=(
                "benchmarks/performance_regression_tests.jl"
            )
            ;;
        "all")
            test_command="using Pkg; Pkg.test()"
            ;;
        *)
            echo -e "${RED}ERROR: Unknown test type: $test_type${NC}" >&2
            return 1
            ;;
    esac

    local start_time=$(date +%s)

    if [[ -n "$test_command" ]]; then
        # Run standard Pkg.test()
        if [[ "$COVERAGE" == "true" ]]; then
            cmd="$cmd -e 'using Pkg; Pkg.test(coverage=true)'"
        else
            cmd="$cmd -e '$test_command'"
        fi

        eval timeout "$TIMEOUT" $cmd > "$log_file" 2>&1
        local exit_code=$?
    else
        # Run individual test files
        local total_tests=${#test_files[@]}
        local passed_tests=0
        local failed_tests=0

        echo "📊 Running $total_tests test files..."

        for test_file in "${test_files[@]}"; do
            if [[ ! -f "$test_file" ]]; then
                echo -e "${YELLOW}⚠️ Test file not found: $test_file${NC}"
                continue
            fi

            echo -n "  • $(basename "$test_file"): "

            local file_log="$LOG_DIR/${timestamp}_$(basename "$test_file" .jl).log"

            if timeout "$TIMEOUT" julia --project=. "$test_file" > "$file_log" 2>&1; then
                echo -e "${GREEN}PASSED${NC}"
                ((passed_tests++))
            else
                echo -e "${RED}FAILED${NC}"
                ((failed_tests++))

                if [[ "$FAIL_FAST" == "true" ]]; then
                    echo -e "${RED}❌ Stopping due to --fail-fast${NC}"
                    return 1
                fi
            fi
        done

        local exit_code=0
        if [[ $failed_tests -gt 0 ]]; then
            exit_code=1
        fi
    fi

    local end_time=$(date +%s)
    local duration=$((end_time - start_time))

    echo ""
    if [[ $exit_code -eq 0 ]]; then
        echo -e "${GREEN}✅ Tests completed successfully${NC}"
        if [[ -n "${passed_tests:-}" ]]; then
            echo "📊 Results: $passed_tests passed, $failed_tests failed"
        fi
    else
        echo -e "${RED}❌ Tests failed${NC}"
        if [[ -n "${failed_tests:-}" ]]; then
            echo "📊 Results: $passed_tests passed, $failed_tests failed"
        fi
        echo "📝 Check logs in: $LOG_DIR"
    fi

    echo "⏱️ Duration: ${duration}s"

    return $exit_code
}

run_benchmarks() {
    echo -e "${BLUE}🏃 Running performance benchmarks...${NC}"

    local timestamp=$(date +%Y%m%d_%H%M%S)
    local bench_log="$LOG_DIR/${timestamp}_benchmarks.log"

    # List of benchmark scripts
    local benchmark_files=(
        "benchmarks/benchmark_suite.jl"
        "benchmarks/benchmark_advanced_solvers.jl"
        "demos/performance_demo.jl"
    )

    echo "🏁 Available benchmarks:"
    for bench_file in "${benchmark_files[@]}"; do
        if [[ -f "$bench_file" ]]; then
            echo "  ✓ $(basename "$bench_file")"
        else
            echo -e "  ${YELLOW}⚠️ $(basename "$bench_file") (not found)${NC}"
        fi
    done
    echo ""

    local start_time=$(date +%s)

    for bench_file in "${benchmark_files[@]}"; do
        if [[ ! -f "$bench_file" ]]; then
            continue
        fi

        echo -e "${CYAN}Running $(basename "$bench_file")...${NC}"

        if julia --project=. "$bench_file" >> "$bench_log" 2>&1; then
            echo -e "${GREEN}  ✅ Completed${NC}"
        else
            echo -e "${RED}  ❌ Failed${NC}"
        fi
    done

    local end_time=$(date +%s)
    local duration=$((end_time - start_time))

    echo ""
    echo -e "${GREEN}🏁 Benchmarks completed${NC}"
    echo "⏱️ Duration: ${duration}s"
    echo "📝 Results: $bench_log"
}

generate_test_report() {
    local report_file="$LOG_DIR/test_report_$(date +%Y%m%d_%H%M%S).md"

    echo -e "${BLUE}📊 Generating test report...${NC}"

    cat > "$report_file" << EOF
# GSICoreAnalysis.jl - Test Report

**Generated:** $(date)
**Environment:** $(uname -a)
**Julia Version:** $(julia --version)

## Test Summary

EOF

    # Analyze recent log files
    local recent_logs=($(find "$LOG_DIR" -name "*.log" -mtime -1 | head -10))

    if [[ ${#recent_logs[@]} -gt 0 ]]; then
        echo "## Recent Test Runs" >> "$report_file"
        echo "" >> "$report_file"

        for log_file in "${recent_logs[@]}"; do
            local log_name=$(basename "$log_file")
            local log_size=$(wc -l < "$log_file" 2>/dev/null || echo "0")

            echo "- **$log_name**: $log_size lines" >> "$report_file"
        done

        echo "" >> "$report_file"
    fi

    # Add system information
    cat >> "$report_file" << EOF
## System Information

- **CPU Cores:** $(nproc)
- **Memory:** $(free -h | grep '^Mem:' | awk '{print $2}')
- **Disk Space:** $(df -h . | tail -1 | awk '{print $4}' | tr -d '\n') available
- **Julia Threads:** $JULIA_NUM_THREADS

## Test Configuration

- **Parallel Execution:** $PARALLEL
- **Coverage Enabled:** $COVERAGE
- **Timeout:** ${TIMEOUT}s
- **Fail Fast:** $FAIL_FAST

EOF

    echo -e "${GREEN}📄 Test report generated: $report_file${NC}"
}

# ==============================================================================
# MAIN SCRIPT
# ==============================================================================

# Default values
TEST_TYPE="all"
VERBOSE="false"
FAST="false"
PARALLEL="false"
COVERAGE="false"
BENCHMARK="false"
REPORT="false"
XML="false"
FAIL_FAST="false"
TIMEOUT="300"

# Parse arguments
while [[ $# -gt 0 ]]; do
    case $1 in
        -h|--help)
            print_usage
            exit 0
            ;;
        -v|--verbose)
            VERBOSE="true"
            shift
            ;;
        -f|--fast)
            FAST="true"
            TIMEOUT="60"
            shift
            ;;
        -p|--parallel)
            PARALLEL="true"
            export JULIA_NUM_THREADS=auto
            shift
            ;;
        -c|--coverage)
            COVERAGE="true"
            shift
            ;;
        -b|--benchmark)
            BENCHMARK="true"
            shift
            ;;
        -r|--report)
            REPORT="true"
            shift
            ;;
        -x|--xml)
            XML="true"
            shift
            ;;
        --fail-fast)
            FAIL_FAST="true"
            shift
            ;;
        --timeout)
            TIMEOUT="$2"
            shift 2
            ;;
        all|unit|integration|performance|lorenz|solvers|advanced|basic|minimal|regression|smoke)
            TEST_TYPE="$1"
            shift
            ;;
        *)
            echo -e "${RED}ERROR: Unknown option: $1${NC}" >&2
            print_usage
            exit 1
            ;;
    esac
done

# Main execution
print_header

echo "🔧 Test Configuration:"
echo "   • Test Type: $TEST_TYPE"
echo "   • Verbose Mode: $VERBOSE"
echo "   • Fast Mode: $FAST"
echo "   • Parallel Execution: $PARALLEL"
echo "   • Coverage Analysis: $COVERAGE"
echo "   • Run Benchmarks: $BENCHMARK"
echo "   • Generate Report: $REPORT"
echo "   • Timeout: ${TIMEOUT}s"
echo ""

# Check Julia installation
if ! command -v julia &> /dev/null; then
    echo -e "${RED}ERROR: Julia not found in PATH${NC}" >&2
    exit 1
fi

echo -e "${GREEN}✓ Julia found: $(julia --version)${NC}"

# Run tests
overall_success=true

if ! run_julia_tests "$TEST_TYPE"; then
    overall_success=false
fi

# Run benchmarks if requested
if [[ "$BENCHMARK" == "true" ]]; then
    if ! run_benchmarks; then
        overall_success=false
    fi
fi

# Generate report if requested
if [[ "$REPORT" == "true" ]]; then
    generate_test_report
fi

# Final summary
echo ""
echo -e "${CYAN}================================================${NC}"
if [[ "$overall_success" == "true" ]]; then
    echo -e "${GREEN}🎉 All tests completed successfully!${NC}"
    exit 0
else
    echo -e "${RED}❌ Some tests failed${NC}"
    echo "📝 Check logs in: $LOG_DIR"
    exit 1
fi
