#!/bin/bash

# SPDX-FileCopyrightText: 2025 Qingcheng.AI
#
# SPDX-License-Identifier: Apache-2.0

# DP=2 Contiguous Grouping + Router Hybrid Mode Test Script
# Architecture: 1 Router + 2 Contiguous DP Groups, 2 GPUs per group
# Router: Provides HTTP service, load balances to DP groups
# DP Groups: Use torch.distributed.new_group + group rank 0~1

# Configuration parameters
MODEL_CONFIG=${1:-"Qwen3-32B"}
MODEL_CKPT_DIR=${2:-"/data/nfs/Qwen3-32B"}

echo "=== DP=2 Contiguous Grouping + Router Hybrid Mode Test Starting ==="
echo "Model config: $MODEL_CONFIG"
echo "Model path: $MODEL_CKPT_DIR"
echo "Architecture: 1 Router + 2 Contiguous DP Groups"

# SLURM configuration
NUM_GPUS=4  # 4 GPUs: 2 DP groups, 2 GPUs per group
SLURM_PARTITION=debug
CPUS_PER_GPU=24
MEM_PER_GPU=242144

NUM_CPUS=$(($NUM_GPUS * $CPUS_PER_GPU))
NUM_MEMS=$(($NUM_GPUS * $MEM_PER_GPU))

# Set key environment variables
export CHITU_USE_CONTIGUOUS_DP_GROUPS=1
export CUDA_LAUNCH_BLOCKING=1

echo "=== Launch Parameters ==="
echo "GPU count: $NUM_GPUS"
echo "CPU count: $NUM_CPUS"
echo "Memory: ${NUM_MEMS}MB"
echo "Environment: CHITU_USE_CONTIGUOUS_DP_GROUPS=$CHITU_USE_CONTIGUOUS_DP_GROUPS"

srun --partition=${SLURM_PARTITION} \
     --gres=gpu:${NUM_GPUS} \
     --cpus-per-task=${NUM_CPUS} \
     --mem=${NUM_MEMS}MB \
     --nodes=1 \
     --ntasks=1 \
     --job-name=dp_2_contiguous_router \
     --time=01:00:00 \
     bash -c "
        set -e
        
        # Function to start DP group
        start_dp_group() {
            local group_id=\$1
            local gpu_start=\$2
            local gpu_end=\$3
            local master_port=\$4
            local scheduler_port=\$5
            
            echo \"=== Starting DP Group $group_id (GPU \$gpu_start-\$gpu_end) ===\"
            CUDA_VISIBLE_DEVICES=\$gpu_start,\$gpu_end torchrun --nproc_per_node=2 \
                    --master_port=\$master_port \
                    -m chitu \
                    models=${MODEL_CONFIG} \
                    models.ckpt_dir=${MODEL_CKPT_DIR} \
                    infer.tp_size=2 \
                    infer.pp_size=1 \
                    infer.cache_type=paged \
                    infer.max_seq_len=2048 \
                    infer.max_reqs=128 \
                    request.max_new_tokens=1200 \
                    dp_config.enabled=True \
                    dp_config.dp_id=\$group_id \
                    dp_config.scheduler_base_host=0.0.0.0 \
                    dp_config.scheduler_base_port=\$scheduler_port \
                    infer.use_cuda_graph=false
        }

        # Ensure environment variables take effect in subprocesses
        export CHITU_USE_CONTIGUOUS_DP_GROUPS=1
        
        # Display environment information
        echo '=== Environment Information ==='
        echo \"CUDA_VISIBLE_DEVICES: \$CUDA_VISIBLE_DEVICES\"
        echo \"SLURM_PROCID: \$SLURM_PROCID\"
        echo \"SLURM_LOCALID: \$SLURM_LOCALID\"
        echo \"CHITU_USE_CONTIGUOUS_DP_GROUPS: \$CHITU_USE_CONTIGUOUS_DP_GROUPS\"
        nvidia-smi --query-gpu=index,name,memory.used,memory.total --format=csv
        echo \"Available GPU count: \$(nvidia-smi -L | wc -l)\"
        echo \"Torch visible GPU count: \$(python -c 'import torch; print(torch.cuda.device_count())')\"
        
        # Step 1: Start independent Router process
        echo '=== Step 1: Starting Independent Router Process ==='
        echo 'Features: Provides HTTP service, load balances to DP groups'
        CHITU_INDEPENDENT_ROUTER=1 CHITU_ROUTER_PROCESS=1 python -m chitu \
                 models=${MODEL_CONFIG} \
                 models.ckpt_dir=${MODEL_CKPT_DIR} \
                 dp_config.router.host=0.0.0.0 \
                 dp_config.router.port=21003 \
                 dp_config.enabled=True \
                 dp_config.dp_size=2 \
                 dp_config.router.stats_port=29600 \
                 dp_config.router.token_port=29700 \
                 dp_config.router.dp_addresses.0.host=0.0.0.0 \
                 dp_config.router.dp_addresses.0.port=29610 \
                 dp_config.router.dp_addresses.1.host=0.0.0.0 \
                 dp_config.router.dp_addresses.1.port=29611 \
                 dp_config.router.is_router=True &
        
        ROUTER_PID=\$!
        echo \"Router process started, PID: \$ROUTER_PID, HTTP port: 21003, ZMQ ports: 29600(stats), 29700(token)\"
        sleep 20
        
        # Check Router process
        if kill -0 \$ROUTER_PID 2>/dev/null; then
            echo \"Router process running normally\"
        else
            echo \"Router process startup failed\"
            exit 1
        fi
        
        # Check ports
        if nc -z localhost 29600 && nc -z localhost 29700 && nc -z localhost 21003; then
            echo \"Router port check passed\"
        else
            echo \"Router port check failed\"
            exit 1
        fi

        # Step 2: Start unified DP groups (abstracted as single call)
        echo '=== Step 2: Starting Unified DP Groups (All 4 GPUs) ==='
        
        # Start DP Group 0 (background)
        start_dp_group 0 0 1 29502 29610 &
        DP_GROUP_0_PID=\$!
        echo \"DP Group 0 process started (background), PID: \$DP_GROUP_0_PID, using GPU 0,1\"
        sleep 30

        # Start DP Group 1 (foreground)
        start_dp_group 1 2 3 29503 29611
        
        # Wait for all background processes
        echo \"Waiting for all processes to complete...\"
        wait \$DP_GROUP_0_PID
        wait \$ROUTER_PID
     "

echo ""
echo "=== Test Command ==="
echo "curl -X POST http://localhost:21003/v1/chat/completions \\"
echo "  -H 'Content-Type: application/json' \\"
echo "  -d '{\"messages\":[{\"role\":\"user\",\"content\":\"Hello, world!\"}],\"max_tokens\":50}'" 
