#!/bin/bash

DEFAULT_DATAX_PATH="/opt/datax/bin/datax.py"
DEFAULT_BASE_DIR="/opt/datax"
DEFAULT_MAX_WORKERS=4

JOB_TYPE=$1
MAX_WORKERS=${2:-$DEFAULT_MAX_WORKERS}

JOB_DIR="${DEFAULT_BASE_DIR}/${JOB_TYPE}/job"
LOG_DIR="${DEFAULT_BASE_DIR}/${JOB_TYPE}/logs/$(date +%Y%m%d)"
FAILED_FILE="${LOG_DIR}/failed_tables.txt"

# Validate parameters
if [ -z "$JOB_TYPE" ]; then
    echo "Usage: ./run_sync.sh [JOB_TYPE] [MAX_WORKERS]"
    exit 1
fi

mkdir -p "$LOG_DIR"
rm -f "$FAILED_FILE"

echo "[$(date +'%Y-%m-%d %H:%M:%S')] Starting sync job type: $JOB_TYPE"
echo "  Job directory: $JOB_DIR"
echo "  Log directory: $LOG_DIR"
echo "  Concurrency level: $MAX_WORKERS"
start_time=$(date +%s)

parallel_sync() {
    local job_file=$1
    local table=$(basename "$job_file" .json)
    local log_file="$LOG_DIR/${table}.log"

    echo "[$(date +'%Y-%m-%d %H:%M:%S')] Processing table: $table (PID: $$)..."

    python "$DEFAULT_DATAX_PATH" "$job_file" > "$log_file" 2>&1

    if [ $? -ne 0 ]; then
        echo "[ERROR] Sync failed: $table"
        # Atomic write to avoid concurrency issues
        ( flock -x 200; echo "$table" >> "$FAILED_FILE"; ) 200>"$FAILED_FILE.lock"
        return 1
    else
        echo "[SUCCESS] Sync completed: $table"
        return 0
    fi
}

export -f parallel_sync
export DEFAULT_DATAX_PATH LOG_DIR FAILED_FILE

# Run with maxdepth=1 to avoid duplicate processing
find "$JOB_DIR" -maxdepth 1 -name '*.json' -print0 | \
    xargs -0 -P "$MAX_WORKERS" -I {} bash -c 'parallel_sync "{}"'

end_time=$(date +%s)
duration=$((end_time - start_time))
printf "Total execution time: %02dh %02dm %02ds\n" $((duration/3600)) $((duration%3600/60)) $((duration%60))

if [ -f "$FAILED_FILE" ]; then
    failed_count=$(sort -u "$FAILED_FILE" | wc -l)
    echo "[WARNING] Number of failed tables: $failed_count"
    echo "Failed tables list:"
    sort -u "$FAILED_FILE"
    exit 1
else
    echo "All tables synced successfully"
    exit 0
fi