#!/bin/bash

# Batch testing script for GCDBenchmark
# Runs all test scenarios with correct parameter combinations
# Each scenario runs 5 times (ROUND 1-5)
# Supports both iOS and Mac platforms

# Global variables
DEVICE=""
DEVICE_UUID=""
TARGET_TYPE=""  # "ios" or "mac"
BUNDLE_ID="com.ufogxl.GCDBenchmark"
CLI_BIN="$HOME/Library/Developer/Xcode/DerivedData/Build/Products/Debug/GCDBenchmarkCLI"
RESULT_FOLDER=""
MAIN_CSV=""
ANALYSIS_FOLDER=""
trace_file=""

# Test execution globals for refactoring
ROUND_NAME=""
FIRST_RUN=""
TIMEOUT=20
WINDOW="5s"
LAUNCH_ARGS=""

# Function to run xctrace with timeout and retry mechanism
run_xctrace_with_timeout() {
    local timeout_duration="$1"  # 超时时间作为第一个参数传入
    local round_name="$2"      # round_name 作为第二个参数
    shift 2  # 移除前两个参数，剩余参数传递给xctrace
    local max_retries=3
    local retry_count=0

    while [ $retry_count -lt $max_retries ]; do
        echo "⏱️ Starting xctrace (attempt $((retry_count + 1))/$max_retries) with ${timeout_duration}s timeout"

        # 清理可能存在的卡住进程
        pkill -f "xctrace.*--run-name.*$round_name" 2>/dev/null || true

        # 启动xctrace命令并设置超时
        if command -v timeout >/dev/null 2>&1; then
            # 如果系统有timeout命令，使用它
            timeout "$timeout_duration" "${@}" &
            local xctrace_pid=$!

            # 等待进程完成或超时
            if wait $xctrace_pid; then
                echo "✅ xctrace completed successfully"
                return 0
            else
                local exit_code=$?
                if [ $exit_code -eq 124 ]; then
                    echo "⚠️ xctrace timed out after ${timeout_duration}s, killing process..."
                    pkill -f "xctrace.*--run-name.*$round_name" 2>/dev/null || true
                else
                    echo "❌ xctrace failed with exit code $exit_code"
                fi
            fi
        else
            # 如果没有timeout命令，直接运行但记录开始时间
            echo "⚠️ No timeout command available, running without timeout protection"
            local start_time=$(date +%s)
            "${@}" &
            local xctrace_pid=$!

            # 手动超时检查
            while kill -0 $xctrace_pid 2>/dev/null; do
                current_time=$(date +%s)
                elapsed=$((current_time - start_time))
                timeout_seconds=${timeout_duration%s}  # 移除's'后缀
                if [ $elapsed -ge $timeout_seconds ]; then
                    echo "⚠️ xctrace timed out after ${timeout_duration}s, killing process..."
                    kill -TERM $xctrace_pid 2>/dev/null || true
                    sleep 2
                    kill -KILL $xctrace_pid 2>/dev/null || true
                    break
                fi
                sleep 2
            done

            if wait $xctrace_pid; then
                echo "✅ xctrace completed successfully"
                return 0
            else
                echo "❌ xctrace failed"
            fi
        fi

        retry_count=$((retry_count + 1))
        if [ $retry_count -lt $max_retries ]; then
            # 重试前删除trace文件，避免run名称冲突
            for arg in "$@"; do
                if [[ "$arg" == --output* ]]; then
                    # 获取输出文件路径
                    trace_file="${arg#--output }"
                    echo "🧹 Cleaning up trace file before retry: $trace_file"
                    rm -rf "$trace_file" 2>/dev/null || true
                    break
                fi
            done

            echo "🔄 Retrying same scenario in 5 seconds..."
            sleep 2
        fi
    done

    echo "❌ Scenario failed after $max_retries attempts"
    return 1
}

# Function to get active devices from "== Devices ==" section
get_active_devices() {
    local in_active_section=false
    xcrun xctrace list devices 2>/dev/null | while IFS= read -r line; do
        if [[ "$line" == "== Devices ==" ]]; then
            in_active_section=true
            continue
        elif [[ "$line" == "== Devices Offline ==" ]] || [[ "$line" == "== Simulators ==" ]]; then
            break
        fi

        if [ "$in_active_section" = true ] && [[ -n "$line" && ! "$line" =~ ^[[:space:]]*$ ]]; then
            echo "$line"
        fi
    done
}

# Function to select device interactively
select_device() {
    echo "=== Available Active Devices ===" >&2
    local -a devices
    local count=0

    while IFS= read -r device; do
        devices+=("$device")
        count=$((count + 1))
        echo "[$count] $device" >&2
    done < <(get_active_devices)

    if [ $count -eq 0 ]; then
        echo "No active devices found!" >&2
        exit 1
    fi

    echo "" >&2
    read -p "Please select a device (1-$count): " selection

    # Validate selection
    if ! [[ "$selection" =~ ^[0-9]+$ ]] || [ "$selection" -lt 1 ] || [ "$selection" -gt $count ]; then
        echo "Invalid selection!" >&2
        exit 1
    fi

    echo "${devices[$((selection-1))]}"
}

# Function to parse trace and append to CSV
parse_and_append_results() {
    local trace_file="$1"
    local csv_file="$2"
    local scenario_name="$3"
    local analysis_dir="$4"

    echo "📊 Parsing trace: $trace_file"
    echo "📋 Scenario: $scenario_name"

    # Create scenario-specific analysis subdirectory
    local scenario_analysis_dir="$analysis_dir/${scenario_name}_analysis"
    mkdir -p "$scenario_analysis_dir"
    echo "📁 Scenario analysis dir: $scenario_analysis_dir"

    # Run parser
    if python3 "$PROJECT_DIR/trace_parser.py" "$trace_file" 2>/dev/null; then
        # Move generated files to scenario analysis directory
        find "$(dirname "$trace_file")" -maxdepth 1 -name "*.csv" -o -name "*.txt" | while read file; do
            if [ -f "$file" ]; then
                mv "$file" "$scenario_analysis_dir/"
            fi
        done

        # Check if CSV was created
        local parsed_csv=$(find "$scenario_analysis_dir" -name "trace_metrics.csv" -type f | head -1)
        if [ -f "$parsed_csv" ]; then
            echo "✅ Parsing successful, appending results to $csv_file"

            # Skip header and append directly (no need to add scenario column)
            if [ ! -f "$csv_file" ]; then
                # Create CSV with header if it doesn't exist
                echo "benchmark_case,parameters,sched_delay,worker_count,wakeup_count,wakeup_per_execute,first_delay,max_delay" > "$csv_file"
            fi

            # Skip header and append directly
            tail -n +2 "$parsed_csv" >> "$csv_file"

            echo "✅ Results appended successfully"
        else
            echo "❌ Parsing failed - no CSV generated"
        fi
    else
        echo "❌ Parsing failed for $trace_file"
    fi
    echo ""
}

# Function to run iOS test (uses global variables)
run_ios_test() {
    if [ "$FIRST_RUN" = true ]; then
        run_xctrace_with_timeout "$TIMEOUT" "$ROUND_NAME" xcrun xctrace record \
            --instrument 'System Call Trace' \
            --instrument 'Thread State Trace' \
            --instrument 'os_signpost' \
            --device "$DEVICE" \
            --output "$trace_file" \
            --run-name "$ROUND_NAME" \
            --window "$WINDOW" \
            --no-prompt \
            --launch "$BUNDLE_ID" -- $LAUNCH_ARGS
    else
        run_xctrace_with_timeout "$TIMEOUT" "$ROUND_NAME" xcrun xctrace record \
            --append-run \
            --device "$DEVICE" \
            --output "$trace_file" \
            --run-name "$ROUND_NAME" \
            --window "$WINDOW" \
            --no-prompt \
            --launch "$BUNDLE_ID" -- $LAUNCH_ARGS
    fi
}

# Function to run Mac test (uses global variables)
run_mac_test() {
    if [ "$FIRST_RUN" = true ]; then
        echo "Starting trace collection for CLI..."
        run_xctrace_with_timeout "$TIMEOUT" "$ROUND_NAME" xcrun xctrace record \
            --instrument 'System Call Trace' \
            --instrument 'Thread State Trace' \
            --instrument 'os_signpost' \
            --output "$trace_file" \
            --run-name "$ROUND_NAME" \
            --window "$WINDOW" \
            --no-prompt \
            --launch -- "$CLI_BIN" $LAUNCH_ARGS
    else
        run_xctrace_with_timeout "$TIMEOUT" "$ROUND_NAME" xcrun xctrace record \
            --append-run \
            --output "$trace_file" \
            --run-name "$ROUND_NAME" \
            --window "$WINDOW" \
            --no-prompt \
            --launch -- "$CLI_BIN" $LAUNCH_ARGS
    fi
}

# Function to run single test (unified interface)
run_single_test() {
    if [ "$TARGET_TYPE" = "ios" ]; then
        if ! run_ios_test; then
            echo "❌ Failed to complete scenario after retries, skipping remaining rounds"
            return 1
        fi
    else
        if ! run_mac_test; then
            echo "❌ Failed to complete scenario after retries, skipping remaining rounds"
            return 1
        fi
    fi
    return 0
}

# Helper function to run scenario with 5 rounds - NO LONGER USED
# We now run each round individually to avoid parameter conflicts

# Configuration
OUTPUT_DIR="${1:-~/Desktop}"
DEVICE_INPUT="${2}"

# Expand and convert to absolute path
OUTPUT_DIR=$(eval echo "$OUTPUT_DIR")
OUTPUT_DIR=$(cd "$OUTPUT_DIR" 2>/dev/null && pwd || echo "$OUTPUT_DIR")

# If device is not provided as argument, prompt user to select
if [ -z "$DEVICE_INPUT" ]; then
    DEVICE_FULL=$(select_device)
    # Extract simple device name (e.g., "iPad" from "iPad (18.5) (UUID)")
    DEVICE=$(echo "$DEVICE_FULL" | sed 's/ *(.*//')
    # Extract UUID for installation (last UUID in parentheses)
    DEVICE_UUID=$(echo "$DEVICE_FULL" | sed 's/.*(\([^)]*\))$/\1/')

    # Check if Mac to set target type
    if [[ "$DEVICE_FULL" == *"Mac"* ]]; then
        TARGET_TYPE="mac"
    else
        TARGET_TYPE="ios"
    fi
else
    # If input contains "mac" treat as Mac, otherwise as iOS device
    if [[ "$DEVICE_INPUT" =~ -i ]] || [[ "$DEVICE_INPUT" =~ "Mac" ]]; then
        TARGET_TYPE="mac"
        DEVICE="My Mac"
        DEVICE_FULL="Mac"
    else
        TARGET_TYPE="ios"
        DEVICE="$DEVICE_INPUT"
        DEVICE_FULL="$DEVICE_INPUT"
        DEVICE_UUID="$DEVICE_INPUT"
    fi
fi

echo ""
echo "=== GCDBenchmark Batch Test ==="
echo "Target: $DEVICE_FULL"
echo "Type: $TARGET_TYPE (CLI + Trace)"
echo "Output Dir: $OUTPUT_DIR"
echo ""

# Project configuration
PROJECT_DIR="."
PROJECT_FILE="$PROJECT_DIR/GCDBenchmark.xcodeproj"
SCHEME="GCDBenchmark"

if [ "$TARGET_TYPE" = "ios" ]; then
    echo "=== Building and Installing App ==="
    echo "Project: $PROJECT_FILE"
    echo "Scheme: $SCHEME"
    echo "Device: $DEVICE ($DEVICE_UUID)"
    echo ""

    # Build for device
    echo "Building app for device..."
    xcodebuild\
        -project "$PROJECT_FILE"\
        -scheme "$SCHEME"\
        -configuration Release\
        build-for-testing\
        -destination "generic/platform=iOS"\
        -derivedDataPath "$HOME/Library/Developer/Xcode/DerivedData" 2>&1 | tail -10

    # Find the built .app
    APP_BUNDLE=$(find "$HOME/Library/Developer/Xcode/DerivedData" -name "GCDBenchmark.app" -type d -path "*Release-iphoneos*" 2>/dev/null | tail -1)

    if [ -z "$APP_BUNDLE" ]; then
        echo "Error: Could not find built app"
        exit 1
    fi

    echo "Found app at: $APP_BUNDLE"
    echo ""
    echo "Installing app to device..."

    # Install app to device using devicectl
    xcrun devicectl device install app\
        --device "$DEVICE_UUID"\
        "$APP_BUNDLE" 2>&1 | grep -E "(installed|bundleID|error)" | head -5

    echo ""
    echo "=== Build and Installation Complete ==="
    echo ""

    # Bundle identifier
    BUNDLE_ID="com.ufogxl.GCDBenchmark"
else
    echo "=== Building CLI on Mac ==="
    echo "Building GCDBenchmarkCLI..."
    xcodebuild\
        -project "$PROJECT_FILE"\
        -scheme "GCDBenchmarkCLI"\
        -configuration Debug\
        build\
        -derivedDataPath "$HOME/Library/Developer/Xcode/DerivedData" 2>&1 | tail -10
    echo ""

    # Copy playback files to DerivedData directory (TCC doesn't restrict this location)
    echo "Copying playback files to DerivedData..."
    PLAYBACK_SRC_DIR="$PROJECT_DIR/PlaybackCases"
    PLAYBACK_DEST_DIR="$HOME/Library/Developer/Xcode/DerivedData/Build/Products/PlaybackCases"
    if [ -d "$PLAYBACK_SRC_DIR" ]; then
        mkdir -p "$PLAYBACK_DEST_DIR"
        cp "$PLAYBACK_SRC_DIR"/*.playback "$PLAYBACK_DEST_DIR/" 2>/dev/null || true
        echo "Playback files copied to: $PLAYBACK_DEST_DIR"
        ls -la "$PLAYBACK_DEST_DIR"
    fi
    echo ""

    # For Mac mode, we don't use bundle ID
    BUNDLE_ID="CLI-Binary"
fi

echo "Bundle ID/Target: $BUNDLE_ID"
echo ""

# Create result folder
RESULT_FOLDER="$OUTPUT_DIR/GCDBenchmark_result_$(date +%Y%m%d_%H%M%S)"
mkdir -p "$RESULT_FOLDER"
echo "Created result folder: $RESULT_FOLDER"

# Create analysis folder for all parsed results
ANALYSIS_FOLDER="$RESULT_FOLDER/analysis"
mkdir -p "$ANALYSIS_FOLDER"
echo "Created analysis folder: $ANALYSIS_FOLDER"

# Create main CSV file in analysis folder
MAIN_CSV="$ANALYSIS_FOLDER/all_results.csv"
echo "Creating main CSV: $MAIN_CSV"
echo "benchmark_case,parameters,sched_delay,worker_count,wakeup_count,wakeup_per_execute,first_delay,max_delay" > "$MAIN_CSV"

echo ""

# Prevent system sleep during long-running test
echo "🔌 Preventing system sleep during testing..."
caffeinate -d -i -m -u -s &
CAFFEINATE_PID=$!
echo "   Caffeinate process started (PID: $CAFFEINATE_PID)"
echo ""
echo "=== Starting Batch Test Recording ==="
echo ""

# Cleanup function to kill caffeinate when script exits
cleanup() {
    if [ -n "$CAFFEINATE_PID" ]; then
        echo ""
        echo "🔌 Restoring system sleep settings..."
        kill $CAFFEINATE_PID 2>/dev/null
        echo "   Caffeinate process stopped"
    fi
}
# Set up trap to cleanup on script exit
trap cleanup EXIT INT TERM

# ============================================================================
# fork_join: TYPE0/1, CNT=512, COST=10us/100us/1000us, QOS=2/UNSPECIFIED
# ============================================================================
echo "=== Fork-Join Scenarios ==="
for type in 0 1; do
    for cost in 10 100 1000; do
        for qos_val in 2 -1; do
            run_name="fork_join_TYPE_${type}_CNT_512_COST_${cost}_QOS_${qos_val}"

            # Create scenario-specific trace file (replace : with _)
            trace_file="$RESULT_FOLDER/${run_name}.trace"
            echo "📁 Trace file: $trace_file"

            first_run=true
            for round in {1..5}; do
                ROUND_NAME="${run_name}_ROUND_${round}"
                FIRST_RUN="$first_run"
                TIMEOUT=20
                WINDOW="5s"
                LAUNCH_ARGS="--auto-test fork_join --task-type $type --task-count 512 --task-duration ${cost}us --qos-class $qos_val --parameters $ROUND_NAME"
                echo "📍 [${round}/5] Running: $ROUND_NAME"

                if ! run_single_test; then
                    break
                fi

                if [ "$first_run" = true ]; then
                    first_run=false
                fi
                echo ""
            done

            echo "✅ Scenario completed: $run_name"

            # Parse trace immediately and append to main CSV
            # parse_and_append_results "$trace_file" "$MAIN_CSV" "$run_name" "$ANALYSIS_FOLDER"

            echo "✅ Results processed and appended"
            echo ""
        done
    done
done
echo ""

# ============================================================================
# fibonacci: TYPE0/1, FIB=10, COST=10us/100us/1000us, QOS=2/UNSPECIFIED
# ============================================================================
echo "=== Fibonacci Scenarios ==="
for type in 0 1; do
    for cost in 10 100 1000; do
        for qos_val in 2 -1; do
            run_name="fibonacci_TYPE_${type}_FIB_10_COST_${cost}_QOS_${qos_val}"

            # Create scenario-specific trace file (replace : with _)
            trace_file="$RESULT_FOLDER/${run_name}.trace"
            echo "📁 Trace file: $trace_file"

            first_run=true
            for round in {1..5}; do
                ROUND_NAME="${run_name}_ROUND_${round}"
                FIRST_RUN="$first_run"
                TIMEOUT=20
                WINDOW="5s"
                LAUNCH_ARGS="--auto-test fibonacci --task-type $type --depth 10 --task-duration ${cost}us --qos-class $qos_val --parameters $ROUND_NAME"
                echo "📍 [${round}/5] Running: $ROUND_NAME"

                if ! run_single_test; then
                    break
                fi

                if [ "$first_run" = true ]; then
                    first_run=false
                fi
                echo ""
            done

            echo "✅ Scenario completed: $run_name"

            # Parse trace immediately and append to main CSV
            # parse_and_append_results "$trace_file" "$MAIN_CSV" "$run_name" "$ANALYSIS_FOLDER"

            echo "✅ Results processed and appended"
            echo ""
        done
    done
done
echo ""

# ============================================================================
# airaw: TYPE0/1, BUFFER=1/4/8, SLICE=512, COST=200us, QOS=2/UNSPECIFIED
# ============================================================================
echo "=== Airaw Scenarios ==="
for type in 0 1; do
    for buffer in 1 4 8; do
        for qos_val in 2 -1; do
            run_name="airaw_TYPE_${type}_BUFFER_${buffer}_SLICE_512_COST_200_QOS_${qos_val}"

            # Create scenario-specific trace file (replace : with _)
            trace_file="$RESULT_FOLDER/${run_name}.trace"
            echo "📁 Trace file: $trace_file"

            first_run=true
            for round in {1..5}; do
                ROUND_NAME="${run_name}_ROUND_${round}"
                FIRST_RUN="$first_run"
                TIMEOUT=20
                WINDOW="5s"
                LAUNCH_ARGS="--auto-test airaw --task-type $type --buffer-count $buffer --slice-count 512 --task-duration 200us --qos-class $qos_val --parameters $ROUND_NAME"
                echo "📍 [${round}/5] Running: $ROUND_NAME"

                if ! run_single_test; then
                    break
                fi

                if [ "$first_run" = true ]; then
                    first_run=false
                fi
                echo ""
            done

            echo "✅ Scenario completed: $run_name"

            # Parse trace immediately and append to main CSV
            # parse_and_append_results "$trace_file" "$MAIN_CSV" "$run_name" "$ANALYSIS_FOLDER"

            echo "✅ Results processed and appended"
            echo ""
        done
    done
done
echo ""

# ============================================================================
# serial_queue: TYPE0/1, INTERVAL=100us, CNT=512, COST=10us/200us, QOS=2/UNSPECIFIED
# ============================================================================
echo "=== Serial Queue Scenarios ==="
for type in 0 1; do
    for cost in 10 200; do
        for qos_val in 2 -1; do
            run_name="serial_queue_TYPE_${type}_INTERVAL_100_CNT_512_COST_${cost}_QOS_${qos_val}"

            # Create scenario-specific trace file (replace : with _)
            trace_file="$RESULT_FOLDER/${run_name}.trace"
            echo "📁 Trace file: $trace_file"

            first_run=true
            for round in {1..5}; do
                ROUND_NAME="${run_name}_ROUND_${round}"
                FIRST_RUN="$first_run"
                TIMEOUT=20
                WINDOW="5s"
                LAUNCH_ARGS="--auto-test serial_queue --task-type $type --task-count 512 --submit-interval 100us --task-duration ${cost}us --qos-class $qos_val --parameters $ROUND_NAME"
                echo "📍 [${round}/5] Running: $ROUND_NAME"

                if ! run_single_test; then
                    break
                fi

                if [ "$first_run" = true ]; then
                    first_run=false
                fi
                echo ""
            done

            echo "✅ Scenario completed: $run_name"

            # Parse trace immediately and append to main CSV
            # parse_and_append_results "$trace_file" "$MAIN_CSV" "$run_name" "$ANALYSIS_FOLDER"

            echo "✅ Results processed and appended"
            echo ""
        done
    done
done
echo ""

# ============================================================================
# concurrent_queue: TYPE0/1, CONCURRENCY=2/4/8, INTERVAL=100us, CNT=512, COST=1000us, QOS=2/UNSPECIFIED
# ============================================================================
echo "=== Concurrent Queue Scenarios ==="
for type in 0 1; do
    for concurrency in 2 4 8; do
        for qos_val in 2 -1; do
            run_name="concurrent_queue_TYPE_${type}_CONCURRENCY_${concurrency}_INTERVAL_100_CNT_512_COST_1000_QOS_${qos_val}"

            # Create scenario-specific trace file (replace : with _)
            trace_file="$RESULT_FOLDER/${run_name}.trace"
            echo "📁 Trace file: $trace_file"

            first_run=true
            for round in {1..5}; do
                ROUND_NAME="${run_name}_ROUND_${round}"
                FIRST_RUN="$first_run"
                TIMEOUT=20
                WINDOW="5s"
                LAUNCH_ARGS="--auto-test concurrent_queue --task-type $type --max-concurrency $concurrency --submit-interval 100us --task-count 512 --task-duration 1000us --qos-class $qos_val --parameters $ROUND_NAME"
                echo "📍 [${round}/5] Running: $ROUND_NAME"

                if ! run_single_test; then
                    break
                fi

                if [ "$first_run" = true ]; then
                    first_run=false
                fi
                echo ""
            done

            echo "✅ Scenario completed: $run_name"

            # Parse trace immediately and append to main CSV
            # parse_and_append_results "$trace_file" "$MAIN_CSV" "$run_name" "$ANALYSIS_FOLDER"

            echo "✅ Results processed and appended"
            echo ""
        done
    done
done
echo ""

# ============================================================================
# periodic: TYPE0/1, CNT=100, COST=100us, QOS=2/UNSPECIFIED
# Updated format: task_type:0/1, cnt:100, cost:100us, qos:2/-1
# ============================================================================
echo "=== Periodic Scenarios ==="
for type in 0 1; do
    for qos_val in 2 -1; do
        run_name="periodic_TYPE_${type}_INTERVAL_10_CNT_100_COST_100_QOS_${qos_val}"

        # Create scenario-specific trace file (replace : with _)
        trace_file="$RESULT_FOLDER/${run_name}.trace"
        echo "📁 Trace file: $trace_file"

        first_run=true
        for round in {1..5}; do
            ROUND_NAME="${run_name}_ROUND_${round}"
            FIRST_RUN="$first_run"
            TIMEOUT=20
            WINDOW="5s"
            LAUNCH_ARGS="--auto-test periodic --task-type $type --task-count 100 --task-duration 100us --qos-class $qos_val --parameters $ROUND_NAME"
            echo "📍 [${round}/5] Running: $ROUND_NAME"

            if ! run_single_test; then
                break
            fi

            if [ "$first_run" = true ]; then
                first_run=false
            fi
            echo ""
        done

        echo "✅ Scenario completed: $run_name"

        # Parse trace immediately and append to main CSV
        # parse_and_append_results "$trace_file" "$MAIN_CSV" "$run_name" "$ANALYSIS_FOLDER"

        echo "✅ Results processed and appended"
        echo ""
    done
done
echo ""

# ============================================================================
# playback: 6 files × 5 rounds each
# ============================================================================
echo "=== Playback Scenarios ==="
PROJECT_DIR="."


PLAYBACK_FILES=(
    "camera_photo_preview.playback"
    "camera_video_record.playback"
    "douyin_swipe_app.playback"
    "douyin_swipe_sceneboard.playback"
    "taobao_swipe_app.playback"
    "taobao_swipe_rs.playback"
)

for playback_file in "${PLAYBACK_FILES[@]}"; do
    # Remove .playback extension for run name and replace : with _
    file_name="${playback_file%.playback}"
    run_name="playback_${file_name}"

    # Create scenario-specific trace file (replace : with _)
    trace_file="$RESULT_FOLDER/${run_name}.trace"
    echo "📁 Trace file: $trace_file"

    first_run=true
    for ((i=1; i<=5; i++)); do
        ROUND_NAME="${run_name}_ROUND_$i"
        FIRST_RUN="$first_run"
        TIMEOUT=30
        WINDOW="20s"

        # For playback tests, pass the file path based on platform
        if [ "$TARGET_TYPE" = "ios" ]; then
            # iOS: only pass filename (bundle has the file)
            LAUNCH_ARGS="--auto-test playback --playback-file $playback_file --parameters $ROUND_NAME"
        else
            # Mac: pass full path to PlaybackCases directory in DerivedData (TCC unrestricted)
            PLAYBACK_PATH="$HOME/Library/Developer/Xcode/DerivedData/Build/Products/PlaybackCases/$playback_file"
            LAUNCH_ARGS="--auto-test playback --playback-file $PLAYBACK_PATH --parameters $ROUND_NAME"
        fi

        echo "  🔄 Round $i: $ROUND_NAME"

        if ! run_single_test; then
            break
        fi

        if [ "$first_run" = true ]; then
            first_run=false
        fi
        echo ""
    done

    echo "✅ Scenario completed: $run_name"

    # Parse trace immediately and append to main CSV
    # parse_and_append_results "$trace_file" "$MAIN_CSV" "$run_name" "$ANALYSIS_FOLDER"

    echo "✅ Results processed and appended"
    echo ""
done

echo ""
echo "=== Test Matrix Complete ==="
echo ""

# Batch process all trace files using batch_parse_traces.py
echo "🚀 Starting batch processing of all trace files..."
python3 "$PROJECT_DIR/batch_parse_traces.py" "$RESULT_FOLDER"

echo ""
echo "✅ All tasks completed!"

# Find actual trace file
ACTUAL_TRACE=$(ls -t "$RESULT_FOLDER"/*.trace 2>/dev/null | head -1)
if [ -z "$ACTUAL_TRACE" ]; then
    ACTUAL_TRACE="$OUTPUT_FILE"
fi

echo "Trace file: $ACTUAL_TRACE"
echo ""
echo "Checking trace contents..."

xcrun xctrace export --input "$ACTUAL_TRACE" --toc 2>&1 | grep 'schema=' | grep -o 'schema="[^"]*"' | sort -u

echo ""
echo "Done!"
echo ""
echo "Usage:"
echo "  ./batch_test.sh [device] [output_dir]"
echo "  Example: ./batch_test.sh 'iPad' ~/Desktop"
echo ""
