#!/bin/sh BASEDIR=$(dirname "$0") cd $BASEDIR/.. echo Current Directory: pwd BASEDIR=`pwd` nvidia-smi uname -a cat /etc/os-release lscpu grep MemTotal /proc/meminfo #pip install -r requirements.txt #cd ../LLaMA-Factory && pip install -e .[torch,bitsandbytes] export LOGICAL_REASONING_DATA_PATH=datasets/mgtv export RESIZE_TOKEN_EMBEDDINGS=true export START_EPOCH=0 export USING_LLAMA_FACTORY=true export MODEL_NAME=shenzhi-wang/Llama3-8B-Chinese-Chat # export MODEL_NAME=hfl/llama-3-chinese-8b-instruct-v3 export MODEL_PREFIX=llama3-8b_lora_sft_bf16 export LOGICAL_REASONING_RESULTS_PATH=results/$MODEL_PREFIX-p1_r4.csv export ADAPTER_PATH_BASE=llama-factory/saves/llama3-8b/lora/sft_bf16_p1_full_r4 export USING_P1_PROMPT_TEMPLATE=true echo "Eval $MODEL_NAME with $ADAPTER_PATH_BASE" python llm_toolkit/eval_logical_reasoning_all_epochs.py # export LOGICAL_REASONING_RESULTS_PATH=results/$MODEL_PREFIX-p2_r4.csv # export ADAPTER_PATH_BASE=llama-factory/saves/llama3-8b/lora/sft_bf16_p2_full_r4 # export USING_P1_PROMPT_TEMPLATE=false # echo "Eval $MODEL_NAME with $ADAPTER_PATH_BASE" # python llm_toolkit/eval_logical_reasoning_all_epochs.py