Text Generation
Transformers
Safetensors
English
mistral
conversational
Eval Results
Inference Endpoints
text-generation-inference
ehartford's picture
Upload eval.sh with huggingface_hub
8a7827c verified
#!/bin/bash
MODEL_PATH="cognitivecomputations/dolphin-2.8-mistral-7b-v02"
MODEL_NAME="dolphin-2.8-mistral-7b-v02"
RESULTS_PATH="/workspace/results/$MODEL_NAME"
mkdir -p "$RESULTS_PATH"
MODEL_ARGS="pretrained=$MODEL_PATH,dtype=auto"
tasks=(
"truthfulqa"
"winogrande"
"gsm8k"
"hellaswag"
"arc_challenge"
"mmlu"
)
# Function to get the number of fewshot for a given task
get_num_fewshot() {
case "$1" in
"mmlu") echo 5 ;;
"truthfulqa") echo 0 ;;
"gsm8k") echo 5 ;;
"hellaswag") echo 10 ;;
"arc_challenge") echo 25 ;;
"winogrande") echo 5 ;;
*) echo 0 ;;
esac
}
for TASK in "${tasks[@]}"; do
lm_eval --model hf --model_args "$MODEL_ARGS" --task="$TASK" --num_fewshot "$(get_num_fewshot "$TASK")" --device cuda:0 --batch_size 8 --output_path "$RESULTS_PATH/$TASK.json"
# lm_eval --model vllm --model_args "$MODEL_ARGS" --task="$TASK" --num_fewshot "$(get_num_fewshot "$TASK")" --batch_size 8 --output_path "$RESULTS_PATH/$TASK.json"
done
jq -s '[.[]]' $RESULTS_PATH/*.json > $RESULTS_PATH/eval_results.json
huggingface-cli upload cognitivecomputations/$MODEL_NAME $RESULTS_PATH/eval_results.json
huggingface-cli upload cognitivecomputations/$MODEL_NAME eval.sh
# docker run -it --network=host --group-add=video --ipc=host --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --device /dev/kfd --device /dev/dri -v /workspace/models/dolphin-phi-kensho:/app/model embeddedllminfo/vllm-rocm bash