Text Generation
Transformers
PyTorch
English
mixtral
conversational
Inference Endpoints
text-generation-inference
ehartford's picture
Upload eval.sh with huggingface_hub
56e15b8
#!/bin/bash
MODEL_PATH="/workspace/text-generation-webui/models/dolphin-mixtral"
MODEL_NAME="dolphin-2.7-mixtral-8x7b"
RESULTS_PATH="/workspace/results/$MODEL_NAME"
mkdir -p "$RESULTS_PATH"
PRETRAINED_ARGS="$MODEL_PATH,tensor_parallel_size=4,dtype=auto,trust_remote_code=True,gpu_memory_utilization=0.8"
MODEL_ARGS="pretrained=$PRETRAINED_ARGS"
tasks=(
"mmlu"
"truthfulqa"
"gsm8k"
"hellaswag"
"arc_challenge"
"winogrande")
# Function to get the number of fewshot for a given task
get_num_fewshot() {
case "$1" in
"mmlu") echo 5 ;;
"truthfulqa") echo 0 ;;
"gsm8k") echo 5 ;;
"hellaswag") echo 10 ;;
"arc_challenge") echo 25 ;;
"winogrande") echo 5 ;;
*) echo 0 ;;
esac
}
for TASK in "${tasks[@]}"; do
echo lm_eval --model vllm --model_args "$MODEL_ARGS" --task="$TASK" --num_fewshot "$(get_num_fewshot "$TASK")" --batch_size 8 --output_path "$RESULTS_PATH/$TASK.json"
lm_eval --model vllm --model_args "$MODEL_ARGS" --task="$TASK" --num_fewshot "$(get_num_fewshot "$TASK")" --batch_size 8 --output_path "$RESULTS_PATH/$TASK.json"
done
jq -s '[.[]]' $RESULTS_PATH/*.json > $RESULTS_PATH/eval_results.json
huggingface-cli upload cognitivecomputations/$MODEL_NAME $RESULTS_PATH/eval_results.json
huggingface-cli upload cognitivecomputations/$MODEL_NAME eval.sh