File size: 1,152 Bytes
c81ec3a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4203a9b
40ad559
d7a6c9d
c81ec3a
fcc0071
 
dab24b4
c81ec3a
 
fcc0071
 
387046f
c81ec3a
c4388ab
c81ec3a
fcc0071
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
#!/bin/sh

BASEDIR=$(dirname "$0")
cd $BASEDIR/..
echo Current Directory:
pwd

BASEDIR=`pwd`

nvidia-smi
uname -a
cat /etc/os-release
lscpu
grep MemTotal /proc/meminfo

#pip install -r requirements.txt
#cd ../LLaMA-Factory && pip install -e .[torch,bitsandbytes]

export LOGICAL_REASONING_DATA_PATH=datasets/mgtv
export RESIZE_TOKEN_EMBEDDINGS=true
export START_EPOCH=0
export USING_LLAMA_FACTORY=true

export MODEL_NAME=shenzhi-wang/Llama3-8B-Chinese-Chat
# export MODEL_NAME=hfl/llama-3-chinese-8b-instruct-v3

export MODEL_PREFIX=llama3-8b_lora_sft_bf16

export LOGICAL_REASONING_RESULTS_PATH=results/$MODEL_PREFIX-p1_r4.csv
export ADAPTER_PATH_BASE=llama-factory/saves/llama3-8b/lora/sft_bf16_p1_full_r4
export USING_P1_PROMPT_TEMPLATE=true
echo "Eval $MODEL_NAME with $ADAPTER_PATH_BASE"
python llm_toolkit/eval_logical_reasoning_all_epochs.py

# export LOGICAL_REASONING_RESULTS_PATH=results/$MODEL_PREFIX-p2_r4.csv
# export ADAPTER_PATH_BASE=llama-factory/saves/llama3-8b/lora/sft_bf16_p2_full_r4
# export USING_P1_PROMPT_TEMPLATE=false
# echo "Eval $MODEL_NAME with $ADAPTER_PATH_BASE"
# python llm_toolkit/eval_logical_reasoning_all_epochs.py