jhao commited on
Commit
55aff67
1 Parent(s): 79769b6

Add GTBench results

Browse files
.gitignore CHANGED
@@ -5,3 +5,4 @@ __pycache__/
5
  *.pyc
6
  *ipynb
7
  .vscode/
 
 
5
  *.pyc
6
  *ipynb
7
  .vscode/
8
+ .idea/
app.py CHANGED
@@ -24,14 +24,14 @@ from src.display.utils import (
24
  Precision
25
  )
26
  from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, H4_TOKEN, IS_PUBLIC, QUEUE_REPO, REPO_ID, RESULTS_REPO
27
- from src.populate import get_evaluation_queue_df, get_leaderboard_df
28
- from src.submission.submit import add_new_eval
29
- from src.tools.collections import update_collections
30
- from src.tools.plots import (
31
- create_metric_plot_obj,
32
- create_plot_df,
33
- create_scores_df,
34
- )
35
  from dummydatagen import dummy_data_for_plot, create_metric_plot_obj_1, dummydf
36
  import copy
37
 
@@ -222,7 +222,7 @@ with demo:
222
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
223
 
224
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
225
- with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
226
  with gr.Row():
227
  with gr.Column():
228
  with gr.Row():
 
24
  Precision
25
  )
26
  from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, H4_TOKEN, IS_PUBLIC, QUEUE_REPO, REPO_ID, RESULTS_REPO
27
+ # from src.populate import get_evaluation_queue_df, get_leaderboard_df
28
+ # from src.submission.submit import add_new_eval
29
+ # from src.tools.collections import update_collections
30
+ # from src.tools.plots import (
31
+ # create_metric_plot_obj,
32
+ # create_plot_df,
33
+ # create_scores_df,
34
+ # )
35
  from dummydatagen import dummy_data_for_plot, create_metric_plot_obj_1, dummydf
36
  import copy
37
 
 
222
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
223
 
224
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
225
+ with gr.TabItem("🏅 GTBench", elem_id="llm-benchmark-tab-table", id=0):
226
  with gr.Row():
227
  with gr.Column():
228
  with gr.Row():
assets/gtbench_results.csv ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Model,Agent,Opponent Model,Opponent Agent,Tic-Tac-Toe,Connect Four,Breakthrough,Liar's Dice,Blind Auction,Negotiation,Kuhn Poker,Nim,Pig,Iterated Prisoner's Dilemma
2
+ GPT-3.5-turbo,Prompt,GPT-3.5-turbo-1106,prompt agent,0,0,0,0,0,0,0,0,0,0
3
+ GPT-4,Prompt,GPT-3.5-turbo-1106,prompt agent,-0.1111111111,0.08,0.32,0.8,0.04,-0.2807881773,0.4,0.08,-0.04,0.004191114837
4
+ GPT-4,CoT,GPT-3.5-turbo-1106,prompt agent,-0.02222222222,-0.08,0.56,0.24,0.06896551724,0.1345565749,0.44,0.04,0.04,-0.1601239669
5
+ GPT-3.5-turbo,CoT,GPT-3.5-turbo-1106,prompt agent,0.2765957447,-0.32,-0.12,0.44,0.1153846154,-0.2069767442,0.12,-0.04,-0.16,0.1261829653
6
+ GPT-3.5-turbo,SC-CoT,GPT-3.5-turbo-1106,prompt agent,0.4090909091,-0.04,-0.16,0.52,-0.12,-0.3146796431,-0.08,0,-0.08,-0.1554828151
7
+ GPT-3.5-turbo,ToT,GPT-3.5-turbo-1106,prompt agent,-0.04545454545,0.24,0.16,0,-0.12,0.1827697262,0,0.12,-0.4,-0.1914893617
8
+ Codellama-34b-instruct,Prompt,GPT-3.5-turbo-1106,prompt agent,0.3333333333,-0.1,-0.8,-0.4,-0.25,0.2162849873,-0.16,0.36,0.12,0.6
9
+ Llama-2-70b-chat,SC-CoT,GPT-3.5-turbo-1106,prompt agent,-0.4693877551,-0.16,-0.68,0.16,-0.04,0.05194593714,0.12,0.04,0.04,0.2961783439
10
+ Codellama-34b-instruct,CoT,GPT-3.5-turbo-1106,prompt agent,0.3157894737,-0.36,-0.76,-0.32,-0.2682926829,0.0849338455,0,0.48,-0.08,0.03158844765
11
+ Llama-2-70b-chat,CoT,GPT-3.5-turbo-1106,prompt agent,-0.5,0.08,-0.8,0.2653061224,-0.08641975309,0.1280026324,-0.2,0.0612244898,-0.16,0.3242677824
12
+ Mistral-7b-Orca,CoT,GPT-3.5-turbo-1106,prompt agent,-0.07692307692,-0.12,-0.32,-0.56,0.1333333333,0.07843137255,0,0.36,-0.68,0.05470459519
13
+ Codellama-34b-instruct,SC-CoT,GPT-3.5-turbo-1106,prompt agent,0.1219512195,-0.6,-0.56,-0.28,-0.3483146067,0.09466811752,0,0.16,0.12,0.007955449483
14
+ Mistral-7b-Orca,SC-CoT,GPT-3.5-turbo-1106,prompt agent,-0.2,-0.08,-0.4,-0.64,0.08196721311,0.3636363636,-0.04,0.44,-0.84,0.01265822785
15
+ Codellama-34b-instruct,ToT,GPT-3.5-turbo-1106,prompt agent,-0.02127659574,-0.16,-0.6,-0.52,-0.3043478261,0.09764918626,0,-0.04,-0.16,0.2366609294
16
+ Llama-2-70b-chat,Prompt,GPT-3.5-turbo-1106,prompt agent,-0.3658536585,-1,-0.44,-0.16,-0.07462686567,-0.03333333333,-0.04,0.8,-0.02040816327,-0.7118834081
17
+ Mistral-7b-Orca,ToT,GPT-3.5-turbo-1106,prompt agent,-0.1794871795,-0.8,-0.32,-0.44,-0.04651162791,0.2987012987,-0.2,-0.08,-0.84,0.1615445232
18
+ Mistral-7b-Orca,Prompt,GPT-3.5-turbo-1106,prompt agent,-0.4285714286,-0.84,-0.68,-0.68,-0.06896551724,-0.1138211382,-0.04,-0.08,0,-0.1818181818
19
+ GPT-4,Prompt,GPT-4,prompt agent,0,0,0,0,0,0,0,0,0,0
20
+ Codellama-34b-instruct,Prompt,GPT-4,prompt agent,-0.06382978723,0.72,-0.6,-0.64,-0.1481481481,0,0.08,0.16,0.04,0.3424657534
21
+ Codellama-34b-instruct,CoT,GPT-4,prompt agent,0.02222222222,0.56,-1,-0.8,0.4489795918,-0.07765344184,0.08,0.2,-0.08,0.2237654321
22
+ Llama-2-70b-chat,Prompt,GPT-4,prompt agent,-0.9375,0.96,-0.92,-0.72,-0.25,0,-0.04,0.36,0.2,0.3333333333
23
+ Llama-2-70b-chat,CoT,GPT-4,prompt agent,-0.2857142857,0.2,-0.88,-0.9166666667,-0.4166666667,0.2011982027,0,-0.02564102564,-0.36,0.1729106628
assets/logo.pdf ADDED
Binary file (149 kB). View file
 
dummydatagen.py CHANGED
@@ -95,65 +95,65 @@ def create_metric_plot_obj_1(
95
 
96
 
97
  def dummydf():
98
- data = [{"Model": "gpt-35-turbo-1106",
99
- "Agent": "prompt agent",
100
- "Opponent Model": "gpt-4",
101
- "Opponent Agent": "prompt agent",
102
- 'Breakthrough': 0,
103
- 'Connect Four': 0,
104
- 'Blind Auction': 0,
105
- 'Kuhn Poker': 0,
106
- "Liar's Dice": 0,
107
- 'Negotiation': 0,
108
- 'Nim': 0,
109
- 'Pig': 0,
110
- 'Iterated Prisoners Dilemma': 0,
111
- 'Tic-Tac-Toe': 0
112
- },
113
- {"Model": "Llama-2-70b-chat-hf",
114
- "Agent": "prompt agent",
115
- "Opponent Model": "gpt-4",
116
- "Opponent Agent": "prompt agent",
117
- 'Breakthrough': 1,
118
- 'Connect Four': 0,
119
- 'Blind Auction': 0,
120
- 'Kuhn Poker': 0,
121
- "Liar's Dice": 0,
122
- 'Negotiation': 0,
123
- 'Nim': 0,
124
- 'Pig': 0,
125
- 'Iterated Prisoners Dilemma': 0,
126
- 'Tic-Tac-Toe': 0
127
- },
128
- {"Model": "gpt-35-turbo-1106",
129
- "Agent": "ToT agent",
130
- "Opponent Model": "gpt-4",
131
- "Opponent Agent": "prompt agent",
132
- 'Breakthrough': 0,
133
- 'Connect Four': 0,
134
- 'Blind Auction': 0,
135
- 'Kuhn Poker': 0,
136
- "Liar's Dice": 0,
137
- 'Negotiation': 0,
138
- 'Nim': 0,
139
- 'Pig': 0,
140
- 'Iterated Prisoners Dilemma': 0,
141
- 'Tic-Tac-Toe': 0
142
- },
143
- {"Model": "Llama-2-70b-chat-hf",
144
- "Agent": "CoT agent",
145
- "Opponent Model": "gpt-4",
146
- "Opponent Agent": "prompt agent",
147
- 'Breakthrough': 0,
148
- 'Connect Four': 0,
149
- 'Blind Auction': 0,
150
- 'Kuhn Poker': 0,
151
- "Liar's Dice": 0,
152
- 'Negotiation': 0,
153
- 'Nim': 0,
154
- 'Pig': 0,
155
- 'Iterated Prisoners Dilemma': 0,
156
- 'Tic-Tac-Toe': 0
157
- }]
158
- df = pd.DataFrame(data)
159
  return df
 
95
 
96
 
97
  def dummydf():
98
+ # data = [{"Model": "gpt-35-turbo-1106",
99
+ # "Agent": "prompt agent",
100
+ # "Opponent Model": "gpt-4",
101
+ # "Opponent Agent": "prompt agent",
102
+ # 'Breakthrough': 0,
103
+ # 'Connect Four': 0,
104
+ # 'Blind Auction': 0,
105
+ # 'Kuhn Poker': 0,
106
+ # "Liar's Dice": 0,
107
+ # 'Negotiation': 0,
108
+ # 'Nim': 0,
109
+ # 'Pig': 0,
110
+ # 'Iterated Prisoners Dilemma': 0,
111
+ # 'Tic-Tac-Toe': 0
112
+ # },
113
+ # {"Model": "Llama-2-70b-chat-hf",
114
+ # "Agent": "prompt agent",
115
+ # "Opponent Model": "gpt-4",
116
+ # "Opponent Agent": "prompt agent",
117
+ # 'Breakthrough': 1,
118
+ # 'Connect Four': 0,
119
+ # 'Blind Auction': 0,
120
+ # 'Kuhn Poker': 0,
121
+ # "Liar's Dice": 0,
122
+ # 'Negotiation': 0,
123
+ # 'Nim': 0,
124
+ # 'Pig': 0,
125
+ # 'Iterated Prisoners Dilemma': 0,
126
+ # 'Tic-Tac-Toe': 0
127
+ # },
128
+ # {"Model": "gpt-35-turbo-1106",
129
+ # "Agent": "ToT agent",
130
+ # "Opponent Model": "gpt-4",
131
+ # "Opponent Agent": "prompt agent",
132
+ # 'Breakthrough': 0,
133
+ # 'Connect Four': 0,
134
+ # 'Blind Auction': 0,
135
+ # 'Kuhn Poker': 0,
136
+ # "Liar's Dice": 0,
137
+ # 'Negotiation': 0,
138
+ # 'Nim': 0,
139
+ # 'Pig': 0,
140
+ # 'Iterated Prisoners Dilemma': 0,
141
+ # 'Tic-Tac-Toe': 0
142
+ # },
143
+ # {"Model": "Llama-2-70b-chat-hf",
144
+ # "Agent": "CoT agent",
145
+ # "Opponent Model": "gpt-4",
146
+ # "Opponent Agent": "prompt agent",
147
+ # 'Breakthrough': 0,
148
+ # 'Connect Four': 0,
149
+ # 'Blind Auction': 0,
150
+ # 'Kuhn Poker': 0,
151
+ # "Liar's Dice": 0,
152
+ # 'Negotiation': 0,
153
+ # 'Nim': 0,
154
+ # 'Pig': 0,
155
+ # 'Iterated Prisoners Dilemma': 0,
156
+ # 'Tic-Tac-Toe': 0
157
+ # }]
158
+ df = pd.read_csv('./assets/gtbench_results.csv')
159
  return df
src/display/about.py CHANGED
@@ -1,171 +1,56 @@
1
  # from src.display.utils import ModelType
2
 
3
- TITLE = """<h1 align="center" id="space-title">Game Theory Bench</h1>"""
 
 
4
 
5
  INTRODUCTION_TEXT = """
6
- 📐 The 🤗 Open LLM Leaderboard aims to track, rank and evaluate open LLMs and chatbots.
 
7
 
8
- 🤗 Submit a model for automated evaluation on the 🤗 GPU cluster on the "Submit" page!
9
- The leaderboard's backend runs the great [Eleuther AI Language Model Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness) - read more details in the "About" page!
 
10
  """
11
 
12
  LLM_BENCHMARKS_TEXT = f"""
13
  # Context
14
- With the plethora of large language models (LLMs) and chatbots being released week upon week, often with grandiose claims of their performance, it can be hard to filter out the genuine progress that is being made by the open-source community and which model is the current state of the art.
15
 
16
  ## How it works
17
 
18
- 📈 We evaluate models on 7 key benchmarks using the <a href="https://github.com/EleutherAI/lm-evaluation-harness" target="_blank"> Eleuther AI Language Model Evaluation Harness </a>, a unified framework to test generative language models on a large number of different evaluation tasks.
19
-
20
- - <a href="https://arxiv.org/abs/1803.05457" target="_blank"> AI2 Reasoning Challenge </a> (25-shot) - a set of grade-school science questions.
21
- - <a href="https://arxiv.org/abs/1905.07830" target="_blank"> HellaSwag </a> (10-shot) - a test of commonsense inference, which is easy for humans (~95%) but challenging for SOTA models.
22
- - <a href="https://arxiv.org/abs/2009.03300" target="_blank"> MMLU </a> (5-shot) - a test to measure a text model's multitask accuracy. The test covers 57 tasks including elementary mathematics, US history, computer science, law, and more.
23
- - <a href="https://arxiv.org/abs/2109.07958" target="_blank"> TruthfulQA </a> (0-shot) - a test to measure a model's propensity to reproduce falsehoods commonly found online. Note: TruthfulQA in the Harness is actually a minima a 6-shots task, as it is prepended by 6 examples systematically, even when launched using 0 for the number of few-shot examples.
24
- - <a href="https://arxiv.org/abs/1907.10641" target="_blank"> Winogrande </a> (5-shot) - an adversarial and difficult Winograd benchmark at scale, for commonsense reasoning.
25
- - <a href="https://arxiv.org/abs/2110.14168" target="_blank"> GSM8k </a> (5-shot) - diverse grade school math word problems to measure a model's ability to solve multi-step mathematical reasoning problems.
26
-
27
- For all these evaluations, a higher score is a better score.
28
- We chose these benchmarks as they test a variety of reasoning and general knowledge across a wide variety of fields in 0-shot and few-shot settings.
29
-
30
- ## Details and logs
31
- You can find:
32
- - detailed numerical results in the `results` Hugging Face dataset: https://huggingface.co/datasets/open-llm-leaderboard/results
33
- - details on the input/outputs for the models in the `details` of each model, that you can access by clicking the 📄 emoji after the model name
34
- - community queries and running status in the `requests` Hugging Face dataset: https://huggingface.co/datasets/open-llm-leaderboard/requests
35
-
36
- ## Reproducibility
37
- To reproduce our results, here is the commands you can run, using [this version](https://github.com/EleutherAI/lm-evaluation-harness/tree/b281b0921b636bc36ad05c0b0b0763bd6dd43463) of the Eleuther AI Harness:
38
- `python main.py --model=hf-causal-experimental --model_args="pretrained=<your_model>,use_accelerate=True,revision=<your_model_revision>"`
39
- ` --tasks=<task_list> --num_fewshot=<n_few_shot> --batch_size=1 --output_path=<output_path>`
40
-
41
- The total batch size we get for models which fit on one A100 node is 8 (8 GPUs * 1). If you don't use parallelism, adapt your batch size to fit.
42
- *You can expect results to vary slightly for different batch sizes because of padding.*
43
-
44
- The tasks and few shots parameters are:
45
- - ARC: 25-shot, *arc-challenge* (`acc_norm`)
46
- - HellaSwag: 10-shot, *hellaswag* (`acc_norm`)
47
- - TruthfulQA: 0-shot, *truthfulqa-mc* (`mc2`)
48
- - MMLU: 5-shot, *hendrycksTest-abstract_algebra,hendrycksTest-anatomy,hendrycksTest-astronomy,hendrycksTest-business_ethics,hendrycksTest-clinical_knowledge,hendrycksTest-college_biology,hendrycksTest-college_chemistry,hendrycksTest-college_computer_science,hendrycksTest-college_mathematics,hendrycksTest-college_medicine,hendrycksTest-college_physics,hendrycksTest-computer_security,hendrycksTest-conceptual_physics,hendrycksTest-econometrics,hendrycksTest-electrical_engineering,hendrycksTest-elementary_mathematics,hendrycksTest-formal_logic,hendrycksTest-global_facts,hendrycksTest-high_school_biology,hendrycksTest-high_school_chemistry,hendrycksTest-high_school_computer_science,hendrycksTest-high_school_european_history,hendrycksTest-high_school_geography,hendrycksTest-high_school_government_and_politics,hendrycksTest-high_school_macroeconomics,hendrycksTest-high_school_mathematics,hendrycksTest-high_school_microeconomics,hendrycksTest-high_school_physics,hendrycksTest-high_school_psychology,hendrycksTest-high_school_statistics,hendrycksTest-high_school_us_history,hendrycksTest-high_school_world_history,hendrycksTest-human_aging,hendrycksTest-human_sexuality,hendrycksTest-international_law,hendrycksTest-jurisprudence,hendrycksTest-logical_fallacies,hendrycksTest-machine_learning,hendrycksTest-management,hendrycksTest-marketing,hendrycksTest-medical_genetics,hendrycksTest-miscellaneous,hendrycksTest-moral_disputes,hendrycksTest-moral_scenarios,hendrycksTest-nutrition,hendrycksTest-philosophy,hendrycksTest-prehistory,hendrycksTest-professional_accounting,hendrycksTest-professional_law,hendrycksTest-professional_medicine,hendrycksTest-professional_psychology,hendrycksTest-public_relations,hendrycksTest-security_studies,hendrycksTest-sociology,hendrycksTest-us_foreign_policy,hendrycksTest-virology,hendrycksTest-world_religions* (average of all the results `acc`)
49
- - Winogrande: 5-shot, *winogrande* (`acc`)
50
- - GSM8k: 5-shot, *gsm8k* (`acc`)
51
-
52
- Side note on the baseline scores:
53
- - for log-likelihood evaluation, we select the random baseline
54
- - for GSM8K, we select the score obtained in the paper after finetuning a 6B model on the full GSM8K training set for 50 epochs
55
-
56
- ## Icons
57
- - model: new, base models, trained on a given corpora
58
- - model: pretrained models finetuned on more data
59
- Specific fine-tune subcategories (more adapted to chat):
60
- - model: instruction fine-tunes, which are model fine-tuned specifically on datasets of task instruction
61
- - model: reinforcement fine-tunes, which usually change the model loss a bit with an added policy.
62
- If there is no icon, we have not uploaded the information on the model yet, feel free to open an issue with the model information!
63
-
64
- "Flagged" indicates that this model has been flagged by the community, and should probably be ignored! Clicking the link will redirect you to the discussion about the model.
65
-
66
- ## Quantization
67
- To get more information about quantization, see:
68
- - 8 bits: [blog post](https://huggingface.co/blog/hf-bitsandbytes-integration), [paper](https://arxiv.org/abs/2208.07339)
69
- - 4 bits: [blog post](https://huggingface.co/blog/4bit-transformers-bitsandbytes), [paper](https://arxiv.org/abs/2305.14314)
70
-
71
- ## Useful links
72
- - [Community resources](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/174)
73
- - [Collection of best models](https://huggingface.co/collections/open-llm-leaderboard/llm-leaderboard-best-models-652d6c7965a4619fb5c27a03)
74
- """
75
-
76
- FAQ_TEXT = """
77
- ---------------------------
78
- # FAQ
79
- Below are some common questions - if this FAQ does not answer you, feel free to create a new issue, and we'll take care of it as soon as we can!
80
-
81
- ## 1) Submitting a model
82
- My model requires `trust_remote_code=True`, can I submit it?
83
- - *We only support models that have been integrated in a stable version of the `transformers` library for automatic submission, as we don't want to run possibly unsage code on our cluster.*
84
-
85
- What about models of type X?
86
- - *We only support models that have been integrated in a stable version of the `transformers` library for automatic submission.*
87
-
88
- How can I follow when my model is launched?
89
- - *You can look for its request file [here](https://huggingface.co/datasets/open-llm-leaderboard/requests) and follow the status evolution, or directly in the queues above the submit form.*
90
-
91
- My model disappeared from all the queues, what happened?
92
- - *A model disappearing from all the queues usually means that there has been a failure. You can check if that is the case by looking for your model [here](https://huggingface.co/datasets/open-llm-leaderboard/requests).*
93
-
94
- What causes an evaluation failure?
95
- - *Most of the failures we get come from problems in the submissions (corrupted files, config problems, wrong parameters selected for eval ...), so we'll be grateful if you first make sure you have followed the steps in `About`. However, from time to time, we have failures on our side (hardware/node failures, problem with an update of our backend, connectivity problem ending up in the results not being saved, ...).*
96
-
97
- How can I report an evaluation failure?
98
- - *As we store the logs for all models, feel free to create an issue, **where you link to the requests file of your model** (look for it [here](https://huggingface.co/datasets/open-llm-leaderboard/requests/tree/main)), so we can investigate! If the model failed due to a problem on our side, we'll relaunch it right away!*
99
- *Note: Please do not re-upload your model under a different name, it will not help*
100
-
101
- ## 2) Model results
102
- What kind of information can I find?
103
- - *Let's imagine you are interested in the Yi-34B results. You have access to 3 different information categories:*
104
- - *The [request file](https://huggingface.co/datasets/open-llm-leaderboard/requests/blob/main/01-ai/Yi-34B_eval_request_False_bfloat16_Original.json): it gives you information about the status of the evaluation*
105
- - *The [aggregated results folder](https://huggingface.co/datasets/open-llm-leaderboard/results/tree/main/01-ai/Yi-34B): it gives you aggregated scores, per experimental run*
106
- - *The [details dataset](https://huggingface.co/datasets/open-llm-leaderboard/details_01-ai__Yi-34B/tree/main): it gives you the full details (scores and examples for each task and a given model)*
107
-
108
 
109
- Why do models appear several times in the leaderboard?
110
- - *We run evaluations with user selected precision and model commit. Sometimes, users submit specific models at different commits and at different precisions (for example, in float16 and 4bit to see how quantization affects performance). You should be able to verify this by displaying the `precision` and `model sha` columns in the display. If, however, you see models appearing several time with the same precision and hash commit, this is not normal.*
 
 
 
 
 
 
 
 
111
 
112
- What is this concept of "flagging"?
113
- - *This mechanism allows user to report models that have unfair performance on the leaderboard. This contains several categories: exceedingly good results on the leaderboard because the model was (maybe accidentally) trained on the evaluation data, models that are copy of other models not atrributed properly, etc.*
114
 
115
- My model has been flagged improperly, what can I do?
116
- - *Every flagged model has a discussion associated with it - feel free to plead your case there, and we'll see what to do together with the community.*
117
 
118
- ## 3) Editing a submission
119
- I upgraded my model and want to re-submit, how can I do that?
120
- - *Please open an issue with the precise name of your model, and we'll remove your model from the leaderboard so you can resubmit. You can also resubmit directly with the new commit hash!*
 
 
121
 
122
- I need to rename my model, how can I do that?
123
- - *You can use @Weyaxi 's [super cool tool](https://huggingface.co/spaces/Weyaxi/open-llm-leaderboard-renamer) to request model name changes, then open a discussion where you link to the created pull request, and we'll check them and merge them as needed.*
124
-
125
- ## 4) Other
126
- Why don't you display closed source model scores?
127
- - *This is a leaderboard for Open models, both for philosophical reasons (openness is cool) and for practical reasons: we want to ensure that the results we display are accurate and reproducible, but 1) commercial closed models can change their API thus rendering any scoring at a given time incorrect 2) we re-run everything on our cluster to ensure all models are run on the same setup and you can't do that for these models.*
128
 
129
- I have an issue about accessing the leaderboard through the Gradio API
130
- - *Since this is not the recommended way to access the leaderboard, we won't provide support for this, but you can look at tools provided by the community for inspiration!*
131
  """
132
 
133
 
134
  EVALUATION_QUEUE_TEXT = """
135
- # Evaluation Queue for the 🤗 Open LLM Leaderboard
136
-
137
- Agents added here will be automatically evaluated on the 🤗 cluster.
138
-
139
- ## First steps before submitting a agent
140
-
141
- ### 1) Make sure you can load your model and tokenizer using AutoClasses:
142
- ```python
143
- from transformers import AutoConfig, AutoModel, AutoTokenizer
144
- config = AutoConfig.from_pretrained("your model name", revision=revision)
145
- model = AutoModel.from_pretrained("your model name", revision=revision)
146
- tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
147
- ```
148
- If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
149
-
150
- Note: make sure your model is public!
151
- Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!
152
-
153
- ### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
154
- It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
155
-
156
- ### 3) Make sure your model has an open license!
157
- This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗
158
-
159
- ### 4) Fill up your model card
160
- When we add extra information about models to the leaderboard, it will be automatically taken from the model card
161
-
162
- ### 5) Select the correct precision
163
- Not all models are converted properly from `float16` to `bfloat16`, and selecting the wrong precision can sometimes cause evaluation error (as loading a `bf16` model in `fp16` can sometimes generate NaNs, depending on the weight range).
164
 
165
- ## In case of model failure
166
- If your model is displayed in the `FAILED` category, its execution stopped.
167
- Make sure you have followed the above steps first.
168
- If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task).
169
  """
170
 
171
  CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
 
1
  # from src.display.utils import ModelType
2
 
3
+ TITLE = """
4
+ <embed src="../../assets/logo.pdf" width="200px" height="200px" />
5
+ <h1 align="center" id="space-title">GTBench: Uncovering the Strategic Reasoning Limitation of LLMs via Game-Theoretic Evaluations</h1>"""
6
 
7
  INTRODUCTION_TEXT = """
8
+ GTBench aims to evaluate and rank LLMs’ reasoning abilities in competitive environments through game-theoretic tasks, e.g., board and card games.
9
+ It utilizes 10 widely recognized games supported by <a href="https://github.com/google-deepmind/open_spiel">OpenSpiel</a> and evaluate well-recognized LLM agents in a language-driven manner. The evaluation code and prompt templates can be found in <a href="https://github.com/jinhaoduan/GTBench" target="_blank" >GTBench</a>.
10
 
11
+ Please refer to `About` for more details of games and metrics.
12
+
13
+ The template is borrowed from <a href="https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard" target="_blank">Open LLM Leaderboard</a>.
14
  """
15
 
16
  LLM_BENCHMARKS_TEXT = f"""
17
  # Context
 
18
 
19
  ## How it works
20
 
21
+ We evaluate LLMs on 10 widely recognized game-theoretic tasks, including
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
+ - <a href="https://en.wikipedia.org/wiki/Tic-tac-toe" target="_blank"> Tic-Tac-Toe</a>
24
+ - <a href="https://en.wikipedia.org/wiki/Connect_Four" target="_blank"> Connect-4 </a>
25
+ - <a href="https://en.wikipedia.org/wiki/Breakthrough_(board_game)" target="_blank"> Breakthrough</a>
26
+ - <a href="https://en.wikipedia.org/wiki/Nim" target="_blank"> Nim</a>
27
+ - <a href="https://en.wikipedia.org/wiki/First-price_sealed-bid_auction" target="_blank"> Blind Auction</a>
28
+ - <a href="https://en.wikipedia.org/wiki/Kuhn_poker" target="_blank"> Kuhn Poker</a>
29
+ - <a href="https://en.wikipedia.org/wiki/Liar\%27s_dice" target="_blank"> Liar's Dice</a>
30
+ - <a href="https://arxiv.org/pdf/1706.05125.pdf" target="_blank"> Negotiation</a>
31
+ - <a href="https://en.wikipedia.org/wiki/Pig_(dice_game)" target="_blank"> Pig</a>
32
+ - <a href="https://en.wikipedia.org/wiki/Prisoner\%27s_dilemma" target="_blank"> Prisoner's Dilemma</a>
33
 
34
+ ## Metric
35
+ We use Normalized Relative Advantage (NRA) to evaluation the performance of LLM agents. NRA(agent1, agent2) > 0 means agent1 has higher win rate/earn more rewards than the opponent agent2.
36
 
37
+ Please refer to GTBench paper for more detail.
 
38
 
39
+ ## Takeaways
40
+ - LLM agents failed in the complete-information and deterministic games
41
+ - LLM agents are competitive in the probabilistic games
42
+ - CodePretraining benefits game-theoretic tasks.
43
+ - Advanced Reasoning Methods Do Not Always Help.
44
 
45
+ """
 
 
 
 
 
46
 
47
+ FAQ_TEXT = """
 
48
  """
49
 
50
 
51
  EVALUATION_QUEUE_TEXT = """
52
+ # Evaluation for the GTBench leaderboard
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
 
 
 
 
54
  """
55
 
56
  CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
src/leaderboard/read_evals.py CHANGED
@@ -11,7 +11,7 @@ from huggingface_hub import ModelCard
11
 
12
  from src.display.formatting import make_clickable_model
13
  from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType
14
- from src.submission.check_validity import is_model_on_hub
15
 
16
 
17
  @dataclass
 
11
 
12
  from src.display.formatting import make_clickable_model
13
  from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType
14
+ # from src.submission.check_validity import is_model_on_hub
15
 
16
 
17
  @dataclass
src/submission/check_validity.py CHANGED
@@ -7,8 +7,8 @@ from datetime import datetime, timedelta, timezone
7
  import huggingface_hub
8
  from huggingface_hub import ModelCard
9
  from huggingface_hub.hf_api import ModelInfo
10
- from transformers import AutoConfig, AutoTokenizer
11
- from transformers.models.auto.tokenization_auto import tokenizer_class_from_name, get_tokenizer_config
12
 
13
  from src.envs import HAS_HIGHER_RATE_LIMIT
14
 
@@ -36,32 +36,32 @@ def check_model_card(repo_id: str) -> tuple[bool, str]:
36
 
37
  return True, ""
38
 
39
-
40
- def is_model_on_hub(model_name: str, revision: str, token: str = None, trust_remote_code=False, test_tokenizer=False) -> tuple[bool, str]:
41
- try:
42
- config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
43
- if test_tokenizer:
44
- try:
45
- tk = AutoTokenizer.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
46
- except ValueError as e:
47
- return (
48
- False,
49
- f"uses a tokenizer which is not in a transformers release: {e}",
50
- None
51
- )
52
- except Exception as e:
53
- return (False, "'s tokenizer cannot be loaded. Is your tokenizer class in a stable transformers release, and correctly configured?", None)
54
- return True, None, config
55
-
56
- except ValueError:
57
- return (
58
- False,
59
- "needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.",
60
- None
61
- )
62
-
63
- except Exception as e:
64
- return False, "was not found on hub!", None
65
 
66
 
67
  def get_model_size(model_info: ModelInfo, precision: str):
 
7
  import huggingface_hub
8
  from huggingface_hub import ModelCard
9
  from huggingface_hub.hf_api import ModelInfo
10
+ # from transformers import AutoConfig, AutoTokenizer
11
+ # from transformers.models.auto.tokenization_auto import tokenizer_class_from_name, get_tokenizer_config
12
 
13
  from src.envs import HAS_HIGHER_RATE_LIMIT
14
 
 
36
 
37
  return True, ""
38
 
39
+ #
40
+ # def is_model_on_hub(model_name: str, revision: str, token: str = None, trust_remote_code=False, test_tokenizer=False) -> tuple[bool, str]:
41
+ # try:
42
+ # config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
43
+ # if test_tokenizer:
44
+ # try:
45
+ # tk = AutoTokenizer.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
46
+ # except ValueError as e:
47
+ # return (
48
+ # False,
49
+ # f"uses a tokenizer which is not in a transformers release: {e}",
50
+ # None
51
+ # )
52
+ # except Exception as e:
53
+ # return (False, "'s tokenizer cannot be loaded. Is your tokenizer class in a stable transformers release, and correctly configured?", None)
54
+ # return True, None, config
55
+ #
56
+ # except ValueError:
57
+ # return (
58
+ # False,
59
+ # "needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.",
60
+ # None
61
+ # )
62
+ #
63
+ # except Exception as e:
64
+ # return False, "was not found on hub!", None
65
 
66
 
67
  def get_model_size(model_info: ModelInfo, precision: str):