# sd1.5-512-1card (default) : bash train_lora.sh

# sd1.5-512-1card : bash train_lora.sh -sp /workspace/br_stable_diffusion/examples/text_to_image/train_text_to_image_lora.py -v 1.5 -r 512 -c 1
# sd1.5-512-1card : bash train_lora.sh -sp /workspace/br_stable_diffusion/examples/text_to_image/train_text_to_image_lora.py -v 1.5 -r 512 -c 1 -hp 1
# sd2.1-512-1card : bash train_lora.sh -sp /workspace/br_stable_diffusion/examples/text_to_image/train_text_to_image_lora.py -v 2.1 -r 512 -c 1
# sd2.1-512-1card : bash train_lora.sh -sp /workspace/br_stable_diffusion/examples/text_to_image/train_text_to_image_lora.py -v 2.1 -r 512 -c 1 -hp 1

# sd1.5-256-1card : bash train_lora.sh -sp /workspace/br_stable_diffusion/examples/text_to_image/train_text_to_image_lora.py -v 1.5 -r 256 -c 1
# sd1.5-256-1card : bash train_lora.sh -sp /workspace/br_stable_diffusion/examples/text_to_image/train_text_to_image_lora.py -v 1.5 -r 256 -c 1 -hp 1
# sd2.1-256-1card : bash train_lora.sh -sp /workspace/br_stable_diffusion/examples/text_to_image/train_text_to_image_lora.py -v 2.1 -r 256 -c 1
# sd2.1-256-1card : bash train_lora.sh -sp /workspace/br_stable_diffusion/examples/text_to_image/train_text_to_image_lora.py -v 2.1 -r 256 -c 1 -hp 1

# sd1.5-960-1card : bash train_lora.sh -sp /workspace/br_stable_diffusion/examples/text_to_image/train_text_to_image_lora.py -v 1.5 -r 960 -c 1
# sd2.1-960-1card : bash train_lora.sh -sp /workspace/br_stable_diffusion/examples/text_to_image/train_text_to_image_lora.py -v 2.1 -r 960 -c 1

ulimit -n 65535
mount -o remount,size=64G /dev/shm

NUM_NODES=1
NUM_PROCESS=1
RESOLUTION=512
MASTER_ADDRESS=""
MASTER_PORT=12345
NODE_RANK=0
MODEL_VERSION="1.5"
HIGH_PRECISION=0
PROFILER=0
WRITE_LOG=1
VALIDATION=0
EVAL_CACHE=1
SCRIPT_PATH="/workspace/br_stable_diffusion/examples/text_to_image/train_text_to_image_lora.py"

BATCH_SIZE=16
OP_MODE="fused"  # "eager" or "fused", different env variables
VISIBLE_DEVICE="0,1,2,3,4,5,6,7"
user_prompt="cute dragon creature"

usage() {
  echo -e "train_lora.sh [OPTION]"
  echo -e "\t-n       node number"
  echo -e "\t-sp      script path: path to sciprt, default is /workspace/br_stable_diffusion/examples/text_to_image/train_text_to_image_lora.py"
  echo -e "\t-c       number of process(card number)"
  echo -e "\t-r       resolution: 256, 512 or 960 suggested"
  echo -e "\t-i       master address, need to be set when running multi nodes"
  echo -e "\t-d       node rank, default is 0, need to be set when running multi nodes"
  echo -e "\t-v       model version, only support sd1.5 and sd2.1 now"
  echo -e "\t-p       enable torch_supti_profiler, 0 or 1, default is 0"
  echo -e "\t-w       write log to log file, 0 or 1, default is 1"
  echo -e "\t-hp      high precision optimizer, 0 or 1, default is 0"
  echo -e "\t-vl      whether to run validation, 0 or 1, default is 1"
  echo -e "\t-ec      whether to use eval cache for clip encoder and vae"

  echo -e "\t-b       batch size: 15 or 16 suggested"
  echo -e "\t-m       op running mode, support eager or fused"
  echo -e "\t-vd      set supa visible devices, default is 0,1,2,3,4,5,6,7"
  echo -e "\n"

  echo -e "If you want to run 8-card training, you can execute:"
  echo -e "\t bash train_without_lora.sh -c 8"
  echo -e "If you want to run resolution 512, you can execute:"
  echo -e "\t bash train_lora.sh -r 512"
  echo -e "If you want to run sd2.1 with resolution 960, you can execute:"
  echo -e "\t bash train_lora.sh -v 2.1 -r 960"
  echo -e "If you want to profiler the training, you can execute:"
  echo -e "\t bash train_without_lora.sh -p 1"
  echo -e "If you don't want to write log to default log file, you can execute:"
  echo -e "\t bash train_without_lora.sh -w 0"
  echo -e "If you want to train model with fp32 lora, you can execute:"
  echo -e "\t bash train_without_lora.sh -hp 1"
  echo -e "If you want to run clip and vae part without cache, you can execute:"
  echo -e "\t bash train_without_lora.sh -ec 0"
  echo -e "If you want to run with eager mode, you can execute:"
  echo -e "\t bash train_lora.sh -m eager"
  echo -e "If you want to set supa visible devices, you can execute:"
  echo -e "\t bash train_lora.sh -vd 0,1,2,3,4,5,6,7"
  exit $1
}

get_args() {
  while [ $# -gt 0 ]
  do
    case $1 in
    -sp) SCRIPT_PATH=$2 ; shift;;
    -n) NUM_NODES=$2 ; shift;;
    -c) NUM_PROCESS=$2 ; shift;;
    -r) RESOLUTION=$2 ; shift;;
    -i) MASTER_ADDRESS="$2" ; shift;;
    -d) NODE_RANK=$2 ; shift;;
    -v) MODEL_VERSION=$2 ; shift;;
    -p) PROFILER=$2 ; shift;;
    -hp) HIGH_PRECISION=$2; shift;;
    -w) WRITE_LOG=$2;shift;;
    -vl) VALIDATION=$2 ; shift;;
    -m) OP_MODE=$2 ; shift;;
    -b) BATCH_SIZE=$2 ; shift;;
    -vd) VISIBLE_DEVICE=$2 ; shift;;
    -ec) EVAL_CACHE=$2;shift;;

    (-*) usage 0;;

    (--) shift; break;;
    (*) break;;
    esac
    shift
  done
}

set_env() {
  # export SULIB_LOG_LEVEL=off
  export SUDNN_KERNEL_CACHE_CAPACITY=30000
  export SUDNN_KERNEL_CACHE_EXCLUDE_UID=1
  export SUDNN_KERNEL_CACHE_FOLDER=/tmp/kernel_cache  # input yourself kernel cache path
  export SUDNN_KERNEL_CACHE_DISK_LEVEL=3
  export SUDNN_KERNEL_CACHE_MAX_SIZE_MB=10240
  if [ ! -d "$SUDNN_KERNEL_CACHE_FOLDER" ]; then
    mkdir -p "$SUDNN_KERNEL_CACHE_FOLDER"
  fi

  export SUPA_VISIBLE_DEVICES=$VISIBLE_DEVICE
  export ENABLE_FORCE_SYNC=1
  export BRTB_ENABLE_FORCE_SUDNN_CONV2d=1
  export DYNAMIC_SHAPE_CODEGEN=1
  export ENABLE_HIGH_PRECISION=0

  # env variables to save memory while running with numa
  export BRTB_ENABLE_NUMA_SPLIT=1
  export BRTB_ENABLE_NUMA_ALIGN_4K=1
  # export PYTORCH_NO_SUPA_MEMORY_CACHING=1
  # export PYTORCH_SUPA_ALLOC_CONF=max_split_size_mb:30

  if [[ $OP_MODE == "fused" ]]; then
    # TODO: test if these env variables can infect the performance of eager mode
    export BRTB_DISABLE_L2_FLUSH=1

    # disable op clean in frame to save time
    export BRTB_ENABLE_FUSED_BIASADD=1
    export BRTB_DISABLE_ZERO_OUTPUT_NUMA=1
    export BRTB_DISABLE_ZERO_WS=1
    export BRTB_DISABLE_ZERO_REORDER=1
    export BRTB_DISABLE_ZERO_OUTPUT_UMA=1
    export BRTB_ENABLE_FUSED_BIASADD=1
    export ENABLE_CLEAN_TENSOR=1
    # export DISABLE_WRITE_THROUGH=1
    # env variables to support attention lora bwd
  fi

  if [ $RESOLUTION -gt 512 ]; then
    export RESNET_BLOCK_EVAL_LEVEL=2
  else
    export RESNET_BLOCK_EVAL_LEVEL=1
  fi

  if [ $VALIDATION -eq 0 ]; then
    export ENABLE_VAE_DECODER=0
  else
    export ENABLE_VAE_DECODER=1 # decoder is needed for validation
  fi

  if [[ $RESOLUTION == 960 ]]; then
    export SULIB_KERNEL_MODE="fused_op"
    export PYTORCH_SUPA_ALLOC_CONF=max_split_size_mb:30 # need this to solve oom issue
    export SUDNN_KERNEL_CACHE_VERSION_CHECK=0 # use sudnn self kernel cache
    export BRTB_ENABLE_REGISTER_BEFORE_D2H=1 # for act offload async
  fi

  if [[ $MODEL_VERSION == "1.5" ]]; then
    export MODEL_NAME="runwayml/stable-diffusion-v1-5"
  elif [[ $MODEL_VERSION == "2.1" ]]; then
    export ENABLE_HIGH_PRECISION_ATTENTION=1
    export MODEL_NAME="stabilityai/stable-diffusion-2-1"
  else
    echo "unsupported model version: ${MODEL_VERSION}" && exit
  fi

  export DATASET_NAME="reach-vb/pokemon-blip-captions"
}

MAX_TRAIN_STEPS=${MAX_TRAIN_STEPS:-15000}

run() {
  script_name=$SCRIPT_PATH
  launcher="accelerate launch"
  # launcher="gdb --args python3" # for debug

  args="--pretrained_model_name_or_path=$MODEL_NAME
--dataset_name=$DATASET_NAME --caption_column=text
--resolution=$RESOLUTION --random_flip
--seed=42
--train_batch_size=$BATCH_SIZE
--gradient_accumulation_steps=1
--max_train_steps=$MAX_TRAIN_STEPS
--num_train_epochs=100 --checkpointing_steps=5000
--learning_rate=1e-04
--lr_scheduler=constant --lr_warmup_steps=0
--dataloader_num_workers=4
--output_dir=sd-pokemon-model-lora"

  if [ $PROFILER -gt 0 ]; then
    echo "torch_supti_profiler is enabled"
    args="$args --torch_supti_profiler"
  fi

  if [ $HIGH_PRECISION -gt 0 ]; then
    echo "enable high precision lora"
    args="$args --mixed_precision=bf16"
  fi

  if [[ $OP_MODE == "fused" ]]; then
    args="$args --use_br_fused_op"
  fi

  if [[ $RESOLUTION == 960 ]]; then
    args="$args --use_act_offload"
  fi

  if [ $EVAL_CACHE -eq 1 ]; then
    echo "enable eval cache for clip encoder and vae."
    args="$args --enable_eval_cache"
  fi

  launcher_args="--mixed_precision=bf16"

  if [ $NUM_PROCESS -gt 1 ]; then
    launcher_args="$launcher_args --multi_gpu --num_processes=$NUM_PROCESS"
  fi

  if [ $NUM_NODES -gt 1 ]; then
    launcher_args="$launcher_args --num_machines=$NUM_NODES --machine_rank=$NODE_RANK"
  fi

  cmd="$launcher $launcher_args $script_name $args"

  validation_prompt="--validation_prompt="

  if [ $VALIDATION -eq 1 ]; then
    validation_prompt="$validation_prompt$user_prompt"
  fi

  echo -e $cmd $validation_prompt

  if [ $WRITE_LOG -gt 0 ]; then
    mkdir -p output
    $cmd "$validation_prompt" |& tee output/sd_lora_v${MODEL_VERSION}_${RESOLUTION}_N${NUM_NODES}C${NUM_PROCESS}_$(date +%Y%m%d_%H%M%S).log
  else
    $cmd "$validation_prompt"
  fi
}

get_args $@

set_env

run

# other args for debug
# NOTICE: remove agrs dataloader_num_workers or set as 1 during overfit, because golden is unset
# --max_train_steps=300
# --overfit
# --overfit_input=sd15_lora/bs15/res512/
# training
# --cpu_noise --disable_dataloader_shuffle
