Text Generation
Transformers
PyTorch
English
mixtral
conversational
Inference Endpoints
text-generation-inference
ehartford commited on
Commit
56e15b8
1 Parent(s): d5b78a3

Upload eval.sh with huggingface_hub

Browse files
Files changed (1) hide show
  1. eval.sh +41 -0
eval.sh ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ MODEL_PATH="/workspace/text-generation-webui/models/dolphin-mixtral"
4
+ MODEL_NAME="dolphin-2.7-mixtral-8x7b"
5
+ RESULTS_PATH="/workspace/results/$MODEL_NAME"
6
+ mkdir -p "$RESULTS_PATH"
7
+
8
+ PRETRAINED_ARGS="$MODEL_PATH,tensor_parallel_size=4,dtype=auto,trust_remote_code=True,gpu_memory_utilization=0.8"
9
+ MODEL_ARGS="pretrained=$PRETRAINED_ARGS"
10
+
11
+ tasks=(
12
+ "mmlu"
13
+ "truthfulqa"
14
+ "gsm8k"
15
+ "hellaswag"
16
+ "arc_challenge"
17
+ "winogrande")
18
+
19
+ # Function to get the number of fewshot for a given task
20
+ get_num_fewshot() {
21
+ case "$1" in
22
+ "mmlu") echo 5 ;;
23
+ "truthfulqa") echo 0 ;;
24
+ "gsm8k") echo 5 ;;
25
+ "hellaswag") echo 10 ;;
26
+ "arc_challenge") echo 25 ;;
27
+ "winogrande") echo 5 ;;
28
+ *) echo 0 ;;
29
+ esac
30
+ }
31
+
32
+ for TASK in "${tasks[@]}"; do
33
+ echo lm_eval --model vllm --model_args "$MODEL_ARGS" --task="$TASK" --num_fewshot "$(get_num_fewshot "$TASK")" --batch_size 8 --output_path "$RESULTS_PATH/$TASK.json"
34
+ lm_eval --model vllm --model_args "$MODEL_ARGS" --task="$TASK" --num_fewshot "$(get_num_fewshot "$TASK")" --batch_size 8 --output_path "$RESULTS_PATH/$TASK.json"
35
+ done
36
+
37
+
38
+ jq -s '[.[]]' $RESULTS_PATH/*.json > $RESULTS_PATH/eval_results.json
39
+
40
+ huggingface-cli upload cognitivecomputations/$MODEL_NAME $RESULTS_PATH/eval_results.json
41
+ huggingface-cli upload cognitivecomputations/$MODEL_NAME eval.sh