task_flag="base_style_draw_lora_check_uncond0"                   # task flag 
pretrained_model_name_or_path=/llmcapagroup1/test-bucket/xinyu/pretrained_weights/Kolors                                    # checkpoint root for resume
index_file=/llmcapagroup1/test-bucket/xinyu/code/Hunyuan-1.1/dataset/style/caption_draw.json
results_dir=./log_EXP_kolors_try                                     # save root for results
batch_size=2                                            # training batch size
image_size=1024                                         # training image resolution
grad_accu_steps=1                                      # gradient accumulation steps
warmup_num_steps=0                                      # warm-up steps
lr=0.00001                                               # learning rate  PYTHONPATH=./ sh hydit/train_kolors_lora.sh > logfile/base_style_draw_lora_textmask.log 2>&1
ckpt_every=10                                         # create a ckpt every a few steps.
ckpt_latest_every=5000                                  # create a ckpt named `latest.pt` every a few steps.
rank=64                                                 # rank of lora
max_training_steps=200000                                 # Maximum training iteration steps


# single solution no is-textmask
# PYTHONPATH=./ deepspeed hydit/train_deepspeed_kolors_textmask.py \
PYTHONPATH=./ deepspeed hydit/train_deepspeed_kolors.py \
    --task-flag ${task_flag} \
    --training-parts lora \
    --rank ${rank} \
    --lr ${lr} \
    --noise-schedule scaled_linear --beta-start 0.00085 --beta-end 0.03 \
    --predict-type v_prediction \
    --uncond-p 0 \
    --index-file ${index_file} \
    --random-flip \
    --batch-size ${batch_size} \
    --image-size ${image_size} \
    --global-seed 999 \
    --grad-accu-steps ${grad_accu_steps} \
    --warmup-num-steps ${warmup_num_steps} \
    --use-flash-attn \
    --use-fp16 \
    --results-dir ${results_dir} \
    --pretrained_model_name_or_path ${pretrained_model_name_or_path} \
    --ckpt-every ${ckpt_every} \
    --max-training-steps ${max_training_steps}\
    --ckpt-latest-every ${ckpt_latest_every} \
    --log-every 10 \
    --deepspeed \
    --deepspeed-optimizer \
    --use-zero-stage 2 \
    "$@"
