###
# @Author: MosRat work@whl.moe
# @Date: 2025-05-07 13:53:45
# @LastEditors: MosRat work@whl.moe
# @LastEditTime: 2025-05-18 23:18:03
# @Description: Qat launcher
# 
# Copyright (c) 2025 by MosRat, All Rights Reserved. 
### 
export OMP_NUM_THREADS=16
export HF_ENDPOINT=https://hf-mirror.com
export TRITON_CACHE_DIR="/tmp/triton_cache"
export FORCE_COLOR=1
# export PYTHONDONTWRITEBYTECODE=1
# export LD_LIBRARY_PATH="/home/wenhongli/miniforge3/x86_64-conda-linux-gnu/lib/"
# export PYTHONOPTIMIZE=1
export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True
export TORCHINDUCTOR_SEARCH_AUTOTUNE_CACHE=1
export TF_CPP_MIN_LOG_LEVEL=3

learning_rate=1e-3
max_steps=750
# warm_steps=$((max_steps * 5 / 100))
warmup_ratio=0.05


# model_path="/home/wenhongli/workspace/openvla_q/model/casia/checkpoints/pick_banana_224/model/openvla-7b+casia_franka+b16+lr-0.0005+lora-r32+dropout-0.0--image_aug"
model_path="/home/wenhongli/workspace/vla-quant/model/casia/checkpoints/franka_kitchen_clip/openvla-7b+franka_kitchen+b16+lr-0.0005+lora-r32+dropout-0.0--image_aug"
# dataset_name="casia_franka"
dataset_name="franka_kitchen"

LOG_DIR="/home/wenhongli/workspace/vla-quant/logs"

# 获取第一个参数
first_arg=$1

# 使用 tr 将逗号替换为换行符，然后使用 wc -l 统计行数
count=$(echo "$first_arg" | tr ',' '\n' | wc -l)

LOG_FILE="${LOG_DIR}/whl_$(date +"%Y%m%d_%H%M%S").txt"
touch $LOG_FILE


# 检查目录是否存在，如果不存在则创建
if [ ! -d "$LOG_DIR" ]; then
mkdir -p "$LOG_DIR"
fi

# 初始化变量
TEST_FLAG=""

# 解析参数
# while [[ "$#" -gt 0 ]]; do
#     case $1 in
#         --test) TEST_FLAG="--test"; shift ;;
#         *) echo "Unknown parameter: $1"; exit 1 ;;
#     esac
#     shift
# done

# TEST_FLAG="--test"

CUDA_VISIBLE_DEVICES=$first_arg uv run torchrun  --nproc_per_node $count --rdzv_backend=c10d --rdzv_endpoint="localhost:0"  main.py \
  --mode "qat" \
  --max_steps $max_steps \
  --max_grad_norm 1. \
  --per_device_train_batch_size 15 \
  --gradient_accumulation_steps 64 \
  --gradient_checkpointing False \
  --do_train True \
  --eval_fp False \
  --eval_quant_fp False \
  --eval_quant_int True \
  --save_int_tensors False \
  --warmup_ratio   $warmup_ratio  \
  --learning_rate $learning_rate \
  --lr_scheduler_type "cosine" \
  --bf16 True \
  --fp16 False \
  --logging_steps 2 \
  --remove_unused_columns True \
  --output_dir "./logs"  \
  --optim "adamw_torch_fused" \
  --report_to "tensorboard" \
  --save_strategy  "steps" \
  --save_safetensors False \
  --save_total_limit 2 \
  --save_steps 100 \
  --torch_compile False \
  --do_eval False \
  --bf16_full_eval  True\
  --per_device_eval_batch_size 4 \
  --ddp_find_unused_parameters False \
  --dataloader_num_workers 8 \
  \
  --model_path $model_path \
  --dataset_name $dataset_name \
  $TEST_FLAG \
  2>&1 | tee -a "$LOG_FILE"

