Datasets:

leaderboard_dev / czechbench_leaderboard /Meta-Llama-3.1-8B-Instruct_eval_request.json
davidadamczyk's picture
Add Meta-Llama-3.1-8B-Instruct to eval queue
832fa3f verified
raw
history blame
651 Bytes
{"eval_name": "Meta-Llama-3.1-8B-Instruct", "precision": "bfloat16", "hf_model_id": "meta-llama/Meta-Llama-3.1-8B-Instruct", "contact_email": "jirkoada@cvut.cz", "agree_cs": 0.5311004784688995, "anli_cs": 0.48333333333333334, "arc_challenge_cs": 0.6544368600682594, "arc_easy_cs": 0.7882996632996633, "belebele_cs": 0.8279329608938547, "ctkfacts_cs": 0.6577060931899642, "czechnews_cs": 0.792, "fb_comments_cs": 0.745, "gsm8k_cs": 0.15390447308567096, "klokanek_cs": 0.24257425742574257, "mall_reviews_cs": 0.6523333333333333, "mmlu_cs": 0.5072533849129593, "sqad_cs": 0.7034400948991696, "subjectivity_cs": 0.845, "truthfulqa_cs": 0.4248768472906404}