logical-reasoning / scripts /eval-mgtv-llama3_8b.sh
dh-mc's picture
llama3 epoch 6 shows promising results; adding 4 epochs
16974cb
raw
history blame
1.1 kB
#!/bin/sh
BASEDIR=$(dirname "$0")
cd $BASEDIR/..
echo Current Directory:
pwd
BASEDIR=`pwd`
nvidia-smi
uname -a
cat /etc/os-release
lscpu
grep MemTotal /proc/meminfo
#pip install -r requirements.txt
#cd ../LLaMA-Factory && pip install -e .[torch,bitsandbytes]
export LOGICAL_REASONING_DATA_PATH=datasets/mgtv
export RESIZE_TOKEN_EMBEDDINGS=true
export START_EPOCH=7
#export MODEL_NAME=FlagAlpha/Llama3-Chinese-8B-Instruct
export MODEL_NAME=shenzhi-wang/Llama3-8B-Chinese-Chat
export MODEL_PREFIX=llama3-8b_lora_sft_bf16
export LOGICAL_REASONING_RESULTS_PATH=results/$MODEL_PREFIX-p1.csv
export ADAPTER_PATH_BASE=llama-factory/saves/llama3-8b/lora/sft_bf16_p1_full
export USING_P1_PROMPT_TEMPLATE=true
echo "Eval $MODEL_NAME with $ADAPTER_PATH_BASE"
python llm_toolkit/eval_logical_reasoning_all_epochs.py
export LOGICAL_REASONING_RESULTS_PATH=results/$MODEL_PREFIX-p2.csv
export ADAPTER_PATH_BASE=llama-factory/saves/llama3-8b/lora/sft_bf16_p2_full
export USING_P1_PROMPT_TEMPLATE=false
echo "Eval $MODEL_NAME with $ADAPTER_PATH_BASE"
python llm_toolkit/eval_logical_reasoning_all_epochs.py