Renming Zhang commited on
Commit
43feb65
β€’
1 Parent(s): 55aff67

added image

Browse files
Files changed (3) hide show
  1. app.py +11 -2
  2. assets/logo.png +0 -0
  3. src/display/about.py +0 -1
app.py CHANGED
@@ -24,6 +24,7 @@ from src.display.utils import (
24
  Precision
25
  )
26
  from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, H4_TOKEN, IS_PUBLIC, QUEUE_REPO, REPO_ID, RESULTS_REPO
 
27
  # from src.populate import get_evaluation_queue_df, get_leaderboard_df
28
  # from src.submission.submit import add_new_eval
29
  # from src.tools.collections import update_collections
@@ -207,7 +208,7 @@ class LLM_Model:
207
 
208
 
209
  games = ["Breakthrough", "Connect Four", "Blind Auction", "Kuhn Poker",
210
- "Liar's Dice", "Negotiation", "Nim", "Pig", "Iterated Prisoners Dilemma", "Tic-Tac-Toe"]
211
 
212
  # models = ["gpt-35-turbo-1106", "gpt-4", "Llama-2-70b-chat-hf", "CodeLlama-34b-Instruct-hf",
213
  # "CodeLlama-70b-Instruct-hf", "Mistral-7B-Instruct-v01", "Mistral-7B-OpenOrca"]
@@ -217,10 +218,18 @@ games = ["Breakthrough", "Connect Four", "Blind Auction", "Kuhn Poker",
217
 
218
  demo = gr.Blocks(css=custom_css)
219
 
 
 
 
 
 
 
220
  with demo:
 
 
221
  gr.HTML(TITLE)
222
- gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
223
 
 
224
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
225
  with gr.TabItem("πŸ… GTBench", elem_id="llm-benchmark-tab-table", id=0):
226
  with gr.Row():
 
24
  Precision
25
  )
26
  from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, H4_TOKEN, IS_PUBLIC, QUEUE_REPO, REPO_ID, RESULTS_REPO
27
+ from PIL import Image
28
  # from src.populate import get_evaluation_queue_df, get_leaderboard_df
29
  # from src.submission.submit import add_new_eval
30
  # from src.tools.collections import update_collections
 
208
 
209
 
210
  games = ["Breakthrough", "Connect Four", "Blind Auction", "Kuhn Poker",
211
+ "Liar's Dice", "Negotiation", "Nim", "Pig", "Iterated Prisoner's Dilemma", "Tic-Tac-Toe"]
212
 
213
  # models = ["gpt-35-turbo-1106", "gpt-4", "Llama-2-70b-chat-hf", "CodeLlama-34b-Instruct-hf",
214
  # "CodeLlama-70b-Instruct-hf", "Mistral-7B-Instruct-v01", "Mistral-7B-OpenOrca"]
 
218
 
219
  demo = gr.Blocks(css=custom_css)
220
 
221
+
222
+ def load_image(image_path):
223
+ image = Image.open(image_path)
224
+ return image
225
+
226
+
227
  with demo:
228
+ gr.Image("./assets/logo.png", height="200px", width="200px",
229
+ show_download_button=False, container=False)
230
  gr.HTML(TITLE)
 
231
 
232
+ gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
233
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
234
  with gr.TabItem("πŸ… GTBench", elem_id="llm-benchmark-tab-table", id=0):
235
  with gr.Row():
assets/logo.png ADDED
src/display/about.py CHANGED
@@ -1,7 +1,6 @@
1
  # from src.display.utils import ModelType
2
 
3
  TITLE = """
4
- <embed src="../../assets/logo.pdf" width="200px" height="200px" />
5
  <h1 align="center" id="space-title">GTBench: Uncovering the Strategic Reasoning Limitation of LLMs via Game-Theoretic Evaluations</h1>"""
6
 
7
  INTRODUCTION_TEXT = """
 
1
  # from src.display.utils import ModelType
2
 
3
  TITLE = """
 
4
  <h1 align="center" id="space-title">GTBench: Uncovering the Strategic Reasoning Limitation of LLMs via Game-Theoretic Evaluations</h1>"""
5
 
6
  INTRODUCTION_TEXT = """