#!/bin/bash
#SBATCH -J EAwPoseVideo
#SBATCH -N 1
#SBATCH -p a01
#SBATCH --nodelist=g46,g50,g78,g14
#SBATCH -o stdout.%j.txt
#SBATCH -e stderr.%j.txt
#SBATCH --no-requeue #作业失败后不会重新排序
#SBATCH --cpus-per-task=8 # 每个任务分配8个CPU核心（对应num_workers）
#SBATCH --ntasks-per-node=2 # 每个节点8个任务(对应8个GPU)，它只是告诉 SLURM 你想要 8 个任务（任务 ≠ 进程），而非自动运行你的 Python 脚本 8 次
#SBATCH --gres=gpu:2 #根据需要设置，只使用CPU此行省略

source ~/.bashrc
source /home/fit/liuyebin/WORK/miniconda3/bin/activate
conda deactivate
conda deactivate
conda deactivate
conda activate multiviewcogvideo

export TOKENIZERS_PARALLELISM=false
export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True
export CUDA_LAUNCH_BLOCKING=1


export MODEL_NAME="/WORK/PUBLIC/liuyebin_work/lingweidang/model_zoos/PAI/EasyAnimateV5.1-7b-zh-InP"
export DATASET_NAME="/home/fit/liuyebin/WORK/lingweidang/datas/TACO_Data_20250314/full_20k_plus"
# export DATASET_META_NAME="/WORK/PUBLIC/liuyebin_work/lingweidang/datas/TACO_Data_20250314/preprocessed_for_easy_animate/base_2500_train_VLMEnhanced.json"
export DATASET_META_NAME="/home/fit/liuyebin/WORK/lingweidang/datas/TACO_Data_20250314/preprocessed_for_easy_animate/ablation_train_50_color_video_and_corresponding_pose_video.json"
export output_dir="/home/fit/liuyebin/WORK/lingweidang/outputs/for_paper/rebuttal/easyanimate_lora_videoandpose"

export NCCL_IB_DISABLE=1
export NCCL_P2P_DISABLE=1
NCCL_DEBUG=INFO

# When train model with multi machines, use "--config_file accelerate.yaml" instead of "--mixed_precision='bf16'".
CUDA_VISIBLE_DEVICES=0,1 accelerate launch \
  --use_deepspeed --deepspeed_config_file config/zero_stage2_config.json --main_process_port 29504 --deepspeed_multinode_launcher standard \
  scripts/train_lora.py \
  --pretrained_model_name_or_path=$MODEL_NAME \
  --train_data_dir=$DATASET_NAME \
  --train_data_meta=$DATASET_META_NAME \
  --config_path "config/easyanimate_video_v5.1_magvit_qwen.yaml" \
  --image_sample_size=384 \
  --video_sample_size=256 \
  --token_sample_size=512 \
  --video_sample_stride=3 \
  --video_sample_n_frames=49 \
  --train_batch_size=4 \
  --video_repeat=1 \
  --gradient_accumulation_steps=1 \
  --dataloader_num_workers=8 \
  --num_train_epochs=800 \
  --checkpointing_steps=250 \
  --learning_rate=1e-04 \
  --seed=42 \
  --low_vram \
  --output_dir=$output_dir \
  --gradient_checkpointing \
  --mixed_precision="bf16" \
  --adam_weight_decay=5e-3 \
  --adam_epsilon=1e-10 \
  --vae_mini_batch=12 \
  --max_grad_norm=0.05 \
  --random_hw_adapt \
  --training_with_video_token_length \
  --loss_type="flow" \
  --enable_bucket \
  --use_deepspeed \
  --uniform_sampling \
  --train_mode="inpaint"
#    \
#   > log_lora.out 2>&1 &