File size: 2,414 Bytes
d50283e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
DATETIME=$(date '+%Y-%m-%d-%H')
RUN_NAME="timesearch-r-retrain"
OUTPUT_DIR=/data/shuimu.chen/TimeSearch-R/experiment/$RUN_NAME/$DATETIME
mkdir -p $OUTPUT_DIR
export WANDB_PROJECT=TimeSearch-R-ColdStart
export WANDB_NAME=$RUN_NAME
export LOG_PATH=${OUTPUT_DIR}/log.txt
export DEBUG=true
export CUDA_VISIBLE_DEVICES=0,1,2,3,4,6
# export TOKENIZERS_PARALLELISM=false

# export OMP_NUM_THREADS=2
# export MKL_NUM_THREADS=2


export PYTHONPATH=".:$PYTHONPATH"
export SIGLIP_URL=grpc://127.0.0.1:51000
# export LLM_AS_A_JUDGE_BASE=http://127.0.0.1:18901/v1

# Local training configuration
NUM_GPUS=2
MASTER_PORT=29500

echo "Local training mode: ${NUM_GPUS} GPUs on localhost:${MASTER_PORT}"

TRAIN_PATH=configs/dataset.yaml

VIDEO_ROOT=/data/shuimu.chen/LongVideoBench/LongVideoHaystack/videos

# MODEL_BASE=/data/shuimu.chen/Qwen2.5-VL-Instruct
MODEL_BASE=/data/shuimu.chen/Qwen2.5-VL-3B-Instruct
# MODEL_BASE=/data/shuimu.chen/Video-R1/Qwen2.5-VL_COT_SFT_offitial

    # --max_prompt_length 18000 \
    # --max_completion_length 16000 \
torchrun --nproc_per_node=${NUM_GPUS} --nnodes=1 --node_rank=0 \
    --master_addr=localhost --master_port=${MASTER_PORT} \
    time_r1/train.py \
    --deepspeed scripts/zero3.json \
    --output_dir $OUTPUT_DIR \
    --model_name_or_path $MODEL_BASE \
    --train_data_path $TRAIN_PATH \
    --video_folder $VIDEO_ROOT \
    --reward_func v8 \
    --prompt_template v4 \
    --tool_name_list seek_video_frames \
    --max_interaction_turns 8 \
    --max_prompt_length 18000 \
    --max_completion_length 16000 \
    --max_completion_length_per_turn 256 \
    --total_video_tokens 10240 \
    --max_frames 30 \
    --min_per_frame_tokens 12 \
    --max_per_frame_tokens 128 \
    --num_generations 4 \
    --scale_rewards false \
    --beta 0.005 \
    --per_device_train_batch_size 2 \
    --gradient_accumulation_steps 2 \
    --steps_per_generation 1 \
    --dataloader_num_workers 2 \
    --logging_steps 1 \
    --bf16 \
    --torch_dtype bfloat16 \
    --data_seed 42 \
    --gradient_checkpointing true \
    --attn_implementation flash_attention_2 \
    --num_train_epochs 2 \
    --run_name $RUN_NAME \
    --report_to wandb \
    --save_steps 200 \
    --save_only_model true \
    --use_vllm false \
    --vllm_mode colocate \
    --vllm_gpu_memory_utilization 0.8 \
    --shuffle_dataset true \
    --replay_buffer_type dapo \
    --use_counterfactual_reasoning true