#sd1.5-512-8card:  bash train_without_lora.sh -sp /workspace/br_stable_diffusion/examples/text_to_image/train_text_to_image.py -c 8 -r 512 -v 1.5
#sd2.1-512-8card:  bash train_without_lora.sh -sp /workspace/br_stable_diffusion/examples/text_to_image/train_text_to_image.py -c 8 -r 512 -v 2.1
#sd1.5-512-1card:  bash train_without_lora.sh -sp /workspace/br_stable_diffusion/examples/text_to_image/train_text_to_image.py -c 1 -r 512 -v 1.5
#sd2.1-512-1card:  bash train_without_lora.sh -sp /workspace/br_stable_diffusion/examples/text_to_image/train_text_to_image.py -c 1 -r 512 -v 2.1
#sd1.5-512-8card-hp:  bash train_without_lora.sh -sp /workspace/br_stable_diffusion/examples/text_to_image/train_text_to_image.py -c 8 -r 512 -v 1.5 -hp 1
#sd2.1-512-8card-hp:  bash train_without_lora.sh -sp /workspace/br_stable_diffusion/examples/text_to_image/train_text_to_image.py -c 8 -r 512 -v 2.1 -hp 1
#sd1.5-512 run with valition:  bash train_without_lora.sh -sp /workspace/br_stable_diffusion/examples/text_to_image/train_text_to_image.py -r 512 -v 1.5 -vl 1

ulimit -n 65535
mount -o remount,size=64G /dev/shm
export SULIB_LOG_LEVEL=off
export SUDNN_KERNEL_CACHE_CAPACITY=30000
export SUDNN_KERNEL_CACHE_EXCLUDE_UID=1
export SUDNN_KERNEL_CACHE_FOLDER=/tmp/kernel_cache
export SUDNN_KERNEL_CACHE_DISK_LEVEL=3
export SUDNN_KERNEL_CACHE_MAX_SIZE_MB=10240

if [ ! -d "$SUDNN_KERNEL_CACHE_FOLDER" ]; then
  mkdir -p "$SUDNN_KERNEL_CACHE_FOLDER"
fi

NUM_NODES=1
NUM_PROCESS=1
RESOLUTION=512
MASTER_ADDRESS=""
MASTER_PORT=12345
NODE_RANK=0
MODEL_VERSION="1.5"
PROFILER=0
WRITE_LOG=1
HIGH_PRECISION=0
VALIDATION=0
EVAL_CACHE=1
SCRIPT_PATH="/workspace/br_stable_diffusion/examples/text_to_image/train_text_to_image.py"

user_prompt="cute dragon creature"

usage() {
  echo -e "train_without_lora.sh [OPTION]"
  echo -e "\t-sp      script path: path to sciprt, default is /workspace/br_stable_diffusion/examples/text_to_image/train_text_to_image.py"
  echo -e "\t-n       node number"
  echo -e "\t-c       number of process(card number)"
  echo -e "\t-r       resolution: 256, 512 or 960 suggested"
  echo -e "\t-i       master address, need to be set when running multi nodes"
  echo -e "\t-d       node rank, default is 0, need to be set when running multi nodes"
  echo -e "\t-v       model version, only support sd1.5 and sd2.1 now"
  echo -e "\t-p       enable torch_supti_profiler, 0 or 1, default is 0"
  echo -e "\t-w       write log to log file, 0 or 1, default is 1"
  echo -e "\t-hp      high precision optimizer, 0 or 1, default is 0"
  echo -e "\t-vl      whether to run with validation, 0 or 1, default is 1"
  echo -e "\t-ec      whether to use eval cache for clip encoder and vae"
  echo -e "\n"

  echo -e "If you want to run 8-card training, you can execute:"
  echo -e "\tbash train_without_lora.sh -c 8"
  echo -e "For example, if you want to run resolution 512, you can execute:"
  echo -e "\tbash train_without_lora.sh -r 512"
  echo -e "If you want to run sd2.1 with resolution 960, you can execute:"
  echo -e "\tbash train_without_lora.sh -v 2.1 -r 960"
  echo -e "If you want to profiler the training, you can execute:"
  echo -e "\t bash train_without_lora.sh -p 1"
  echo -e "If you want to train model with high precision optimizer, you can execute:"
  echo -e "\t bash train_without_lora.sh -w 0"
  echo -e "If you want to run multi-nodes training, please refer to run_cmd_in_multi_nodes_async.py"
  echo -e "\t bash train_without_lora.sh -hp 1"
  echo -e "If you don't want to write log to default log file, you can execute:"
  echo -e "\t bash train_without_lora.sh -w 0"
  echo -e "If you want to run clip and vae part with cache, you can execute:"
  echo -e "\t bash train_without_lora.sh -ec 1"
  exit $1
}

get_args() {
  while [ $# -gt 0 ]
  do
    case $1 in
    -sp) SCRIPT_PATH=$2 ; shift;;
    -n) NUM_NODES=$2 ; shift;;
    -c) NUM_PROCESS=$2 ; shift;;
    -r) RESOLUTION=$2 ; shift;;
    -i) MASTER_ADDRESS="$2" ; shift;;
    -d) NODE_RANK=$2 ; shift;;
    -v) MODEL_VERSION=$2 ; shift;;
    -p) PROFILER=$2 ; shift;;
    -hp) HIGH_PRECISION=$2; shift;;
    -w) WRITE_LOG=$2;shift;;
    -vl) VALIDATION=$2;shift;;
    -ec) EVAL_CACHE=$2;shift;;

    (-*) usage 0;;

    (--) shift; break;;
    (*) break;;
    esac
    shift
  done
}

set_env() {
  export SULIB_KERNEL_MODE="fused_op" # performance is not affected obvious
  export ENABLE_FORCE_SYNC=1
  export GMBPASS=close
  export SUPA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
  export BRTB_ENABLE_FORCE_SUDNN_CONV2d=1
  export DYNAMIC_SHAPE_CODEGEN=1
  export SUCCL_BUFFSIZE=16777216
  export SUCCL_PARALLEL_NUM=16
  # workaroud for suMemcpyAsync
  export BR_UMD_DEBUG_STAGING_MEMORY_POOL_SIZE=256
  #export ENABLE_HIGH_PRECISION_ATTENTION=1 # important for sd2.1 accuracy
  #export BR_UMD_DEBUG_P2P_ACCESS_CHECK=0 # use for pcie machine ddp

  #env variables to save memory while running with numa
  export BRTB_ENABLE_NUMA_SPLIT=1
  export BRTB_ENABLE_NUMA_ALIGN_4K=1
  #export PYTORCH_SUPA_ALLOC_CONF=max_split_size_mb:30
  export PYTORCH_SUPA_ALLOC_CONF=block_alignment_to_umd:1

  export BRTB_ENABLE_FUSED_BIASADD=1 # biasadd may cause error in validation
  export BRTB_DISABLE_ZERO_OUTPUT_NUMA=1 # no effect on accuracy
  export BRTB_DISABLE_ZERO_WS=1 # no effect on accuracy
  export BRTB_DISABLE_ZERO_REORDER=1 # no effect on accuracy
  export BRTB_DISABLE_ZERO_OUTPUT_UMA=1 # no effect on accuracy
  export ENABLE_CLEAN_TENSOR=1
  export BRTB_LOG_BACKEND=empty
  #export SUDNN_EAGER_ENABLE_CBA_FWD=false # improve performance in high precision
  export ENABLE_HIGH_PRECISION=0 # 30ms performance improve and no obvious accuracy drop
  export ENABLE_RESBLOCK_CHECKPOINTING=0 # default: 0
  # export BRTB_LOG_BACKEND=stdout
  # export BRTB_LOG_LEVEL=debug

  if [ $RESOLUTION -ge 512 ] && [ $HIGH_PRECISION -gt 0 ]; then
    export RESNET_BLOCK_EVAL_LEVEL=2
  elif [ $RESOLUTION -le 512 ]; then
    export RESNET_BLOCK_EVAL_LEVEL=1
  else
    export RESNET_BLOCK_EVAL_LEVEL=0
  fi

  if [ $VALIDATION -eq 0 ]; then
    export ENABLE_VAE_DECODER=0
  else
    export ENABLE_VAE_DECODER=1 # decoder is needed for validation
  fi

  if [[ $MODEL_VERSION == "1.5" ]]; then
    export MODEL_NAME="runwayml/stable-diffusion-v1-5"
  elif [[ $MODEL_VERSION == "2.1" ]]; then
    export MODEL_NAME="stabilityai/stable-diffusion-2-1"
    export ENABLE_HIGH_PRECISION_ATTENTION=1 # important for sd2.1 accuracy
  else
    echo "unsupported model version: ${MODEL_VERSION}" && exit
  fi

  export DATASET_NAME="reach-vb/pokemon-blip-captions"
}

MAX_TRAIN_STEPS=${MAX_TRAIN_STEPS:-5000}

run() {
  script_name=$SCRIPT_PATH
  launcher="accelerate launch"
  launcher_args=""
  args="--pretrained_model_name_or_path=$MODEL_NAME
--dataset_name=$DATASET_NAME
--resolution=$RESOLUTION --center_crop --random_flip
--seed=42
--train_batch_size=16
--max_train_steps=$MAX_TRAIN_STEPS
--gradient_accumulation_steps=2
--checkpointing_steps=50000
--learning_rate=1e-05
--max_grad_norm=1
--mixed_precision=bf16
--use_br_fused_op
--lr_scheduler=constant --lr_warmup_steps=0
--dataloader_num_workers=2
--output_dir=sd-pokemon-model"

  if [ $PROFILER -gt 0 ]; then
    echo "torch_supti_profiler is enabled"
    args="$args --torch_supti_profiler"
  fi

  if [ $HIGH_PRECISION -gt 0 ]; then
    echo "use high precision optimizer"
    if [ $RESOLUTION -gt 256 ]; then
      args="$args --use_act_offload"
    fi
    args="$args --enable_high_precision_optim"
  fi

  if [ $EVAL_CACHE -eq 1 ]; then
    echo "enable eval cache for clip encoder and vae."
    args="$args --enable_eval_cache"
  fi

  if [ $NUM_PROCESS -gt 1 ]; then
    launcher_args="$launcher_args --multi_gpu --num_processes=$NUM_PROCESS"
  fi

  if [ $NUM_NODES -gt 1 ]; then
    launcher_args="$launcher_args --num_machines=$NUM_NODES --machine_rank=$NODE_RANK"
  fi

  if [[ "$MASTER_ADDRESS" != "" ]]; then
    launcher_args="$launcher_args --main_process_ip=$MASTER_ADDRESS --main_process_port=$MASTER_PORT"
  fi

  cmd="$launcher $launcher_args $script_name $args"

  validation_prompt="--validation_prompts="

  if [ $VALIDATION -eq 1 ]; then
    validation_prompt="$validation_prompt$user_prompt"
  fi

  echo -e $cmd $validation_prompt

  if [ $WRITE_LOG -gt 0 ]; then
    mkdir -p output
    $cmd "$validation_prompt" |& tee output/sd_without_lora_v${MODEL_VERSION}_${RESOLUTION}_N${NUM_NODES}_C${NUM_PROCESS}_$(date +%Y%m%d_%H%M%S).log
  else
    $cmd "$validation_prompt"
  fi
}

get_args $@

set_env

run

# other args for debug
# --cpu_noise \
# --gradient_checkpointing \
# --use_ema \
# --use_flatten_optim \
# --torch_supti_profiler \
# --mixed_precision="bf16" \
# --gradient_accumulation_steps=2 \
# --overfit \
# --overfit_input="/root/overfit/fix_tensors_sd15_lora/bs16/res256" \
# --torch_supti_profiler \
# --enable_offload_optimizer_states
