Spaces:
Runtime error
Runtime error
fix submodule
Browse files- .gitignore +165 -1
- .gitmodules +3 -0
- README.md +5 -5
- app.py +138 -54
- formatted_data.csv +23 -8
- olas-predict-benchmark +1 -0
- requirements.txt +0 -1
- start.py +66 -0
- tabs/dashboard.py +15 -0
- tabs/faq.py +48 -0
- tabs/howto_benchmark.py +26 -0
- tabs/run_benchmark.py +37 -0
.gitignore
CHANGED
@@ -1 +1,165 @@
|
|
1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
|
6 |
+
# C extensions
|
7 |
+
*.so
|
8 |
+
|
9 |
+
# Distribution / packaging
|
10 |
+
.Python
|
11 |
+
build/
|
12 |
+
develop-eggs/
|
13 |
+
dist/
|
14 |
+
downloads/
|
15 |
+
eggs/
|
16 |
+
.eggs/
|
17 |
+
lib/
|
18 |
+
lib64/
|
19 |
+
parts/
|
20 |
+
sdist/
|
21 |
+
var/
|
22 |
+
wheels/
|
23 |
+
share/python-wheels/
|
24 |
+
*.egg-info/
|
25 |
+
.installed.cfg
|
26 |
+
*.egg
|
27 |
+
MANIFEST
|
28 |
+
|
29 |
+
# PyInstaller
|
30 |
+
# Usually these files are written by a python script from a template
|
31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
32 |
+
*.manifest
|
33 |
+
*.spec
|
34 |
+
|
35 |
+
# Installer logs
|
36 |
+
pip-log.txt
|
37 |
+
pip-delete-this-directory.txt
|
38 |
+
|
39 |
+
# Unit test / coverage reports
|
40 |
+
htmlcov/
|
41 |
+
.tox/
|
42 |
+
.nox/
|
43 |
+
.coverage
|
44 |
+
.coverage.*
|
45 |
+
.cache
|
46 |
+
nosetests.xml
|
47 |
+
coverage.xml
|
48 |
+
*.cover
|
49 |
+
*.py,cover
|
50 |
+
.hypothesis/
|
51 |
+
.pytest_cache/
|
52 |
+
cover/
|
53 |
+
|
54 |
+
# Translations
|
55 |
+
*.mo
|
56 |
+
*.pot
|
57 |
+
|
58 |
+
# Django stuff:
|
59 |
+
*.log
|
60 |
+
local_settings.py
|
61 |
+
db.sqlite3
|
62 |
+
db.sqlite3-journal
|
63 |
+
|
64 |
+
# Flask stuff:
|
65 |
+
instance/
|
66 |
+
.webassets-cache
|
67 |
+
|
68 |
+
# Scrapy stuff:
|
69 |
+
.scrapy
|
70 |
+
|
71 |
+
# Sphinx documentation
|
72 |
+
docs/_build/
|
73 |
+
|
74 |
+
# PyBuilder
|
75 |
+
.pybuilder/
|
76 |
+
target/
|
77 |
+
|
78 |
+
# Jupyter Notebook
|
79 |
+
.ipynb_checkpoints
|
80 |
+
|
81 |
+
# IPython
|
82 |
+
profile_default/
|
83 |
+
ipython_config.py
|
84 |
+
|
85 |
+
# pyenv
|
86 |
+
# For a library or package, you might want to ignore these files since the code is
|
87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
88 |
+
# .python-version
|
89 |
+
|
90 |
+
# pipenv
|
91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
94 |
+
# install all needed dependencies.
|
95 |
+
#Pipfile.lock
|
96 |
+
|
97 |
+
# poetry
|
98 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
100 |
+
# commonly ignored for libraries.
|
101 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
102 |
+
#poetry.lock
|
103 |
+
|
104 |
+
# pdm
|
105 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
106 |
+
#pdm.lock
|
107 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
108 |
+
# in version control.
|
109 |
+
# https://pdm.fming.dev/#use-with-ide
|
110 |
+
.pdm.toml
|
111 |
+
|
112 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
113 |
+
__pypackages__/
|
114 |
+
|
115 |
+
# Celery stuff
|
116 |
+
celerybeat-schedule
|
117 |
+
celerybeat.pid
|
118 |
+
|
119 |
+
# SageMath parsed files
|
120 |
+
*.sage.py
|
121 |
+
|
122 |
+
# Environments
|
123 |
+
.env
|
124 |
+
.venv
|
125 |
+
env/
|
126 |
+
venv/
|
127 |
+
ENV/
|
128 |
+
env.bak/
|
129 |
+
venv.bak/
|
130 |
+
|
131 |
+
# Spyder project settings
|
132 |
+
.spyderproject
|
133 |
+
.spyproject
|
134 |
+
|
135 |
+
# Rope project settings
|
136 |
+
.ropeproject
|
137 |
+
|
138 |
+
# mkdocs documentation
|
139 |
+
/site
|
140 |
+
|
141 |
+
# mypy
|
142 |
+
.mypy_cache/
|
143 |
+
.dmypy.json
|
144 |
+
dmypy.json
|
145 |
+
|
146 |
+
# Pyre type checker
|
147 |
+
.pyre/
|
148 |
+
|
149 |
+
# pytype static type analyzer
|
150 |
+
.pytype/
|
151 |
+
|
152 |
+
# Cython debug symbols
|
153 |
+
cython_debug/
|
154 |
+
|
155 |
+
# PyCharm
|
156 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
157 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
158 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
159 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
160 |
+
#.idea/
|
161 |
+
|
162 |
+
flagged/
|
163 |
+
results/
|
164 |
+
uploads/
|
165 |
+
__pycache__/
|
.gitmodules
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
[submodule "olas-predict-benchmark"]
|
2 |
+
path = olas-predict-benchmark
|
3 |
+
url = https://github.com/valory-xyz/olas-predict-benchmark.git
|
README.md
CHANGED
@@ -1,10 +1,10 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 4.
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: apache-2.0
|
|
|
1 |
---
|
2 |
+
title: Leaderboard Gradio
|
3 |
+
emoji: π¦
|
4 |
+
colorFrom: pink
|
5 |
+
colorTo: blue
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 4.25.0
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: apache-2.0
|
app.py
CHANGED
@@ -1,61 +1,145 @@
|
|
|
|
1 |
import gradio as gr
|
2 |
import pandas as pd
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
|
|
|
5 |
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
The content of the filtered URLs has been scraped using various libraries, depending on the source:
|
48 |
-
- `pypdf2` for PDF URLs.
|
49 |
-
- `wikipediaapi` for Wikipedia pages.
|
50 |
-
- `requests`, `readability-lxml`, and `html2text` for most other sources.
|
51 |
-
- `requests`, `beautifulsoup`, and `html2text` for BBC links.
|
52 |
-
"""
|
53 |
-
|
54 |
-
|
55 |
-
with gr.Blocks() as demo:
|
56 |
-
gr.Markdown("# Olas Predict Benchmark")
|
57 |
gr.Markdown("Leaderboard showing the performance of Olas Predict tools on the Autocast dataset and overview of the project.")
|
58 |
-
gr.DataFrame(df)
|
59 |
-
gr.Markdown(markdown_text)
|
60 |
|
61 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import start
|
2 |
import gradio as gr
|
3 |
import pandas as pd
|
4 |
+
from glob import glob
|
5 |
+
from pathlib import Path
|
6 |
+
from tabs.dashboard import df
|
7 |
+
from tabs.faq import (
|
8 |
+
about_olas_predict_benchmark,
|
9 |
+
about_olas_predict,
|
10 |
+
about_the_dataset,
|
11 |
+
about_the_tools
|
12 |
+
)
|
13 |
+
from tabs.howto_benchmark import how_to_run
|
14 |
+
from tabs.run_benchmark import run_benchmark_main
|
15 |
|
16 |
|
17 |
+
demo = gr.Blocks()
|
18 |
|
19 |
+
|
20 |
+
def run_benchmark_gradio(tool_name, model_name, num_questions, openai_api_key, anthropic_api_key):
|
21 |
+
"""Run the benchmark using inputs."""
|
22 |
+
if tool_name is None:
|
23 |
+
return "Please enter the name of your tool."
|
24 |
+
if openai_api_key is None and anthropic_api_key is None:
|
25 |
+
return "Please enter either OpenAI or Anthropic API key."
|
26 |
+
|
27 |
+
result = run_benchmark_main(tool_name, model_name, num_questions, openai_api_key, anthropic_api_key)
|
28 |
+
if result == 'completed':
|
29 |
+
# get the results file in the results directory
|
30 |
+
fns = glob('results/*.csv')
|
31 |
+
|
32 |
+
print(f"Number of files in results directory: {len(fns)}")
|
33 |
+
|
34 |
+
# convert to Path
|
35 |
+
files = [Path(file) for file in fns]
|
36 |
+
|
37 |
+
# get results and summary files
|
38 |
+
results_files = [file for file in files if 'results' in file.name]
|
39 |
+
|
40 |
+
# the other file is the summary file
|
41 |
+
summary_files = [file for file in files if 'summary' in file.name]
|
42 |
+
|
43 |
+
print(results_files, summary_files)
|
44 |
+
|
45 |
+
# get the path with results
|
46 |
+
results_df = pd.read_csv(results_files[0])
|
47 |
+
summary_df = pd.read_csv(summary_files[0])
|
48 |
+
|
49 |
+
# make sure all df float values are rounded to 4 decimal places
|
50 |
+
results_df = results_df.round(4)
|
51 |
+
summary_df = summary_df.round(4)
|
52 |
+
|
53 |
+
return gr.Dataframe(value=results_df), gr.Dataframe(value=summary_df)
|
54 |
+
|
55 |
+
return gr.Textbox(label="Benchmark Result", value=result, interactive=False), gr.Textbox(label="Summary", value="")
|
56 |
+
|
57 |
+
|
58 |
+
with demo:
|
59 |
+
gr.HTML("<h1>Olas Predict Benchmark</hjson>")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
gr.Markdown("Leaderboard showing the performance of Olas Predict tools on the Autocast dataset and overview of the project.")
|
|
|
|
|
61 |
|
62 |
+
with gr.Tabs() as tabs:
|
63 |
+
# first tab - leaderboard
|
64 |
+
with gr.TabItem("π
Benchmark Leaderboard", id=0):
|
65 |
+
|
66 |
+
gr.components.Dataframe(
|
67 |
+
value=df,
|
68 |
+
)
|
69 |
+
|
70 |
+
# second tab - about
|
71 |
+
with gr.TabItem("βΉοΈ About"):
|
72 |
+
with gr.Row():
|
73 |
+
with gr.Accordion("About the Benchmark", open=False):
|
74 |
+
gr.Markdown(about_olas_predict_benchmark)
|
75 |
+
with gr.Row():
|
76 |
+
with gr.Accordion("About the Tools", open=False):
|
77 |
+
gr.Markdown(about_the_tools)
|
78 |
+
with gr.Row():
|
79 |
+
with gr.Accordion("About the Autocast Dataset", open=False):
|
80 |
+
gr.Markdown(about_the_dataset)
|
81 |
+
with gr.Row():
|
82 |
+
with gr.Accordion("About Olas", open=False):
|
83 |
+
gr.Markdown(about_olas_predict)
|
84 |
+
|
85 |
+
|
86 |
+
# third tab - how to run the benchmark
|
87 |
+
with gr.TabItem("π Contribute"):
|
88 |
+
gr.Markdown(how_to_run)
|
89 |
+
|
90 |
+
def update_dropdown(tool):
|
91 |
+
if "claude" in tool:
|
92 |
+
return ["claude-3-haiku-20240307", "claude-3-sonnet-20240229", "claude-3-opus-20240229"]
|
93 |
+
else:
|
94 |
+
return ["gpt-3.5-turbo-0125", "gpt-4-0125-preview"]
|
95 |
+
|
96 |
+
|
97 |
+
# fourth tab - run the benchmark
|
98 |
+
with gr.TabItem("π₯ Run the Benchmark"):
|
99 |
+
with gr.Row():
|
100 |
+
tool_name = gr.Dropdown(
|
101 |
+
[
|
102 |
+
"prediction-offline",
|
103 |
+
"prediction-online",
|
104 |
+
"prediction-offline-sme",
|
105 |
+
"prediction-online-sme",
|
106 |
+
"claude-prediction-offline",
|
107 |
+
"claude-prediction-online",
|
108 |
+
'prediction-request-rag',
|
109 |
+
"prediction-with-research-conservative",
|
110 |
+
"prediction-with-research-bold",
|
111 |
+
"prediction-request-reasoning-claude",
|
112 |
+
"prediction-request-rag-claude",
|
113 |
+
"prediction-url-cot-claude",
|
114 |
+
], label="Tool Name", info="Choose the tool to run")
|
115 |
+
model_name = gr.Dropdown([
|
116 |
+
"gpt-3.5-turbo-0125",
|
117 |
+
"gpt-4-0125-preview"
|
118 |
+
"claude-3-haiku-20240307",
|
119 |
+
"claude-3-sonnet-20240229",
|
120 |
+
"claude-3-opus-20240229",
|
121 |
+
], label="Model Name", info="Choose the model to use")
|
122 |
+
with gr.Row():
|
123 |
+
openai_api_key = gr.Textbox(label="OpenAI API Key", placeholder="Enter your OpenAI API key here", type="password")
|
124 |
+
anthropic_api_key = gr.Textbox(label="Anthropic API Key", placeholder="Enter your Anthropic API key here", type="password")
|
125 |
+
with gr.Row():
|
126 |
+
num_questions = gr.Slider(
|
127 |
+
minimum=1,
|
128 |
+
maximum=340,
|
129 |
+
value=10,
|
130 |
+
label="Number of questions to run the benchmark on",
|
131 |
+
)
|
132 |
+
with gr.Row():
|
133 |
+
run_button = gr.Button("Run Benchmark")
|
134 |
+
with gr.Row():
|
135 |
+
with gr.Accordion("Results", open=True):
|
136 |
+
result = gr.Dataframe()
|
137 |
+
with gr.Row():
|
138 |
+
with gr.Accordion("Summary", open=False):
|
139 |
+
summary = gr.Dataframe()
|
140 |
+
|
141 |
+
run_button.click(run_benchmark_gradio,
|
142 |
+
inputs=[tool_name, model_name, num_questions, openai_api_key, anthropic_api_key],
|
143 |
+
outputs=[result, summary])
|
144 |
+
|
145 |
+
demo.queue(default_concurrency_limit=40).launch()
|
formatted_data.csv
CHANGED
@@ -1,8 +1,23 @@
|
|
1 |
-
Tool,Accuracy,Correct,Total,Mean Tokens Used,Mean Cost ($)
|
2 |
-
claude-prediction-offline,0.
|
3 |
-
claude-prediction-
|
4 |
-
prediction-
|
5 |
-
prediction-offline,0.
|
6 |
-
prediction-
|
7 |
-
prediction-
|
8 |
-
prediction-online-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Tool,Model,Accuracy,Correct,Total,Mean Tokens Used,Mean Cost ($)
|
2 |
+
claude-prediction-offline,claude-3-sonnet-20240229,0.756838905775076,249,329,920.5987841945289,0.0033739787234042555
|
3 |
+
claude-prediction-offline,claude-3-opus-20240229,0.7558823529411764,257,340,920.45,0.016866044117647028
|
4 |
+
prediction-request-reasoning-claude,claude-3-sonnet-20240229,0.753125,241,320,2645.509375,0.019254515624999982
|
5 |
+
prediction-offline,gpt-4-0125-preview,0.7507692307692307,244,325,727.1846153846154,0.008048953846153844
|
6 |
+
prediction-offline-sme,gpt-4-0125-preview,0.7484848484848485,247,330,1416.8484848484848,0.018169212121212114
|
7 |
+
prediction-request-reasoning,gpt-4-0125-preview,0.7483221476510067,223,298,1980.7281879194632,0.02567674496644293
|
8 |
+
claude-prediction-online,claude-3-sonnet-20240229,0.7411764705882353,252,340,2832.7617647058823,0.00959039117647058
|
9 |
+
prediction-url-cot-claude,claude-3-sonnet-20240229,0.7355623100303952,242,329,14789.27963525836,0.0510609574468085
|
10 |
+
prediction-request-reasoning-claude,claude-3-opus-20240229,0.7337278106508875,248,338,2773.284023668639,0.10624464497041416
|
11 |
+
prediction-request-rag-claude,claude-3-sonnet-20240229,0.7331288343558282,239,326,2850.1196319018404,0.01465865337423311
|
12 |
+
claude-prediction-offline,claude-2,0.7201834862385321,157,218,779.4770642201835,0.006891669724770637
|
13 |
+
prediction-request-rag,gpt-4-0125-preview,0.7161716171617162,217,303,1240.980198019802,0.013809207920792065
|
14 |
+
prediction-request-reasoning-claude,claude-3-haiku-20240307,0.6982248520710059,236,338,2700.6508875739646,0.0016877189349112328
|
15 |
+
prediction-with-research-bold,gpt-4-1106-preview,0.6938775510204082,34,49,9319.244897959185,0.11741489795918365
|
16 |
+
prediction-online,gpt-4-0125-preview,0.713855421686747,237,332,1549.8524096385543,0.017273584337349383
|
17 |
+
prediction-online-sme,gpt-4-0125-preview,0.7012195121951219,230,328,2237.868902439024,0.027385884146341445
|
18 |
+
claude-prediction-online,claude-2,0.6600660066006601,200,303,1505.3135313531352,0.013348171617161701
|
19 |
+
prediction-offline,gpt-3.5-turbo-0125,0.6578171091445427,223,339,730.1740412979351,0.0007721681415928988
|
20 |
+
prediction-request-reasoning,gpt-3.5-turbo-0125,0.6506410256410257,203,312,1871.173076923077,0.002112727564102551
|
21 |
+
prediction-offline-sme,gpt-3.5-turbo-0125,0.6294117647058823,214,340,1341.8323529411764,0.0014778852941176408
|
22 |
+
prediction-online,gpt-3.5-turbo-0125,0.551622418879056,187,339,1576.684365781711,0.0016928525073746164
|
23 |
+
prediction-online-sme,gpt-3.5-turbo-0125,0.49411764705882355,168,340,2189.1882352941175,0.002402523529411752
|
olas-predict-benchmark
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Subproject commit 04f6cf738bc2fcae0416761ce3648f6dfecd08fc
|
requirements.txt
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
gradio
|
|
|
|
start.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import subprocess
|
4 |
+
from huggingface_hub import hf_hub_download
|
5 |
+
|
6 |
+
|
7 |
+
def run_command(command: str, cwd: str = None) -> tuple:
|
8 |
+
"""Run a shell command in the specified directory and return the output."""
|
9 |
+
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
|
10 |
+
stdout, stderr = process.communicate()
|
11 |
+
if process.returncode != 0:
|
12 |
+
print(f"Error: {stderr.decode()}")
|
13 |
+
else:
|
14 |
+
print(f"Output: {stdout.decode()}")
|
15 |
+
return stdout, stderr
|
16 |
+
|
17 |
+
def download_dataset():
|
18 |
+
"""Download the dataset."""
|
19 |
+
print("Downloading the dataset...")
|
20 |
+
repo_id = "valory/autocast"
|
21 |
+
base_dir = os.getcwd()
|
22 |
+
output_dir = os.path.join(base_dir, "olas-predict-benchmark", "benchmark", "data", "autocast")
|
23 |
+
if not os.path.exists(output_dir):
|
24 |
+
os.makedirs(output_dir, exist_ok=True)
|
25 |
+
filenames = [
|
26 |
+
"autocast_questions_filtered.json",
|
27 |
+
"autocast_questions_filtered.pkl",
|
28 |
+
]
|
29 |
+
for filename in filenames:
|
30 |
+
hf_hub_download(repo_id=repo_id, filename=filename, local_dir=output_dir, repo_type="dataset")
|
31 |
+
print("Dataset downloaded successfully.")
|
32 |
+
|
33 |
+
def start():
|
34 |
+
"""Start commands."""
|
35 |
+
print("Starting commands...")
|
36 |
+
base_dir = os.getcwd()
|
37 |
+
olas_dir = os.path.join(base_dir, "olas-predict-benchmark")
|
38 |
+
mech_dir = os.path.join(olas_dir, "benchmark", "mech")
|
39 |
+
|
40 |
+
commands = [
|
41 |
+
("git submodule init", base_dir),
|
42 |
+
("git submodule update --init --recursive", base_dir),
|
43 |
+
("git submodule update --remote --recursive", base_dir),
|
44 |
+
('git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*"', olas_dir),
|
45 |
+
("git remote update", olas_dir),
|
46 |
+
("git fetch --all", olas_dir),
|
47 |
+
("git show-ref --verify --quiet \"refs/remotes/origin/fix/mech-packages\"", olas_dir),
|
48 |
+
("git checkout fix/mech-packages", olas_dir),
|
49 |
+
("git pull origin fix/mech-packages", olas_dir),
|
50 |
+
("git checkout main", mech_dir),
|
51 |
+
("git pull origin main", mech_dir),
|
52 |
+
("pip install -e .", os.path.join(olas_dir, "benchmark")),
|
53 |
+
("pip install -e .", mech_dir),
|
54 |
+
("pip install lxml[html_clean]", base_dir),
|
55 |
+
]
|
56 |
+
|
57 |
+
for command, cwd in commands:
|
58 |
+
run_command(command, cwd=cwd)
|
59 |
+
|
60 |
+
# add benchmark to the path
|
61 |
+
sys.path.append(os.path.join(olas_dir, "benchmark"))
|
62 |
+
|
63 |
+
# Download the dataset
|
64 |
+
download_dataset()
|
65 |
+
|
66 |
+
start()
|
tabs/dashboard.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import pandas as pd
|
3 |
+
|
4 |
+
csv_file_path = "formatted_data.csv"
|
5 |
+
|
6 |
+
def return_df():
|
7 |
+
# Reading the CSV file
|
8 |
+
df = pd.read_csv(csv_file_path)
|
9 |
+
|
10 |
+
# all floats to be rounded to 2 decimal places
|
11 |
+
df = df.round(4)
|
12 |
+
return df
|
13 |
+
|
14 |
+
|
15 |
+
df = return_df()
|
tabs/faq.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
about_olas_predict_benchmark = """\
|
2 |
+
How good are LLMs at making predictions about events in the future? This is a topic that hasn't been well explored to date.
|
3 |
+
[Olas Predict](https://olas.network/services/prediction-agents) aims to rectify this by incentivizing the creation of agents that predict the future (through prediction markets).
|
4 |
+
This is a leaderboard showing the performance of LLM tools for making predictions (event forecasting) on a dataset, refined from Autocast.\
|
5 |
+
The leaderboard shows tool performance in terms of accuracy and cost. \
|
6 |
+
|
7 |
+
π€ Pick a tool and run it on the benchmark using the "π₯ Run the Benchmark" page!
|
8 |
+
"""
|
9 |
+
|
10 |
+
about_the_tools = """\
|
11 |
+
- [Prediction Offline](https://github.com/valory-xyz/mech/blob/main/packages/valory/customs/prediction_request/prediction_request.py) - Uses prompt engineering, but no web crawling, to make predictions
|
12 |
+
- [Prediction Online](https://github.com/valory-xyz/mech/blob/main/packages/valory/customs/prediction_request/prediction_request.py) - Uses prompt engineering, as well as web crawling, to make predictions
|
13 |
+
- [Prediction SME](https://github.com/valory-xyz/mech/blob/main/packages/nickcom007/customs/prediction_request_sme/prediction_request_sme.py) - Use prompt engineering to get the LLM to act as a Subject Matter Expert (SME) in making a prediction.
|
14 |
+
- [Prediction with RAG](https://github.com/valory-xyz/mech/blob/main/packages/napthaai/customs/prediction_request_rag/prediction_request_rag.py) - Uses retrieval-augment-generation (RAG) over extracted search result to make predictions.
|
15 |
+
- [Prediction with Research Report](https://github.com/valory-xyz/mech/blob/main/packages/polywrap/customs/prediction_with_research_report/prediction_with_research_report.py) - Generates a research report before making a prediction.
|
16 |
+
- [Prediction with Reasoning](https://github.com/valory-xyz/mech/blob/main/packages/napthaai/customs/prediction_request_reasoning/prediction_request_reasoning.py) - Incorporates an additional call to the LLM to do reasoning over retrieved data.
|
17 |
+
- [Prediction with CoT](https://github.com/valory-xyz/mech/blob/main/packages/napthaai/customs/prediction_url_cot/prediction_url_cot.py) - Use Chain of Thought (CoT) to make predictions.
|
18 |
+
"""
|
19 |
+
|
20 |
+
about_the_dataset = """\
|
21 |
+
## Dataset Overview
|
22 |
+
This project leverages the Autocast dataset from the research paper titled ["Forecasting Future World Events with Neural Networks"](https://arxiv.org/abs/2206.15474).
|
23 |
+
The dataset has undergone further refinement to enhance the performance evaluation of Olas mech prediction tools.
|
24 |
+
Both the original and refined datasets are hosted on HuggingFace.
|
25 |
+
### Refined Dataset Files
|
26 |
+
- You can find the refined dataset on HuggingFace [here](https://huggingface.co/datasets/valory/autocast).
|
27 |
+
- `autocast_questions_filtered.json`: A JSON subset of the initial autocast dataset.
|
28 |
+
- `autocast_questions_filtered.pkl`: A pickle file mapping URLs to their respective scraped documents within the filtered dataset.
|
29 |
+
- `retrieved_docs.pkl`: Contains all the scraped texts.
|
30 |
+
### Filtering Criteria
|
31 |
+
To refine the dataset, we applied the following criteria to ensure the reliability of the URLs:
|
32 |
+
- URLs not returning HTTP 200 status codes are excluded.
|
33 |
+
- Difficult-to-scrape sites, such as Twitter and Bloomberg, are omitted.
|
34 |
+
- Links with less than 1000 words are removed.
|
35 |
+
- Only samples with a minimum of 5 and a maximum of 20 working URLs are retained.
|
36 |
+
### Scraping Approach
|
37 |
+
The content of the filtered URLs has been scraped using various libraries, depending on the source:
|
38 |
+
- `pypdf2` for PDF URLs.
|
39 |
+
- `wikipediaapi` for Wikipedia pages.
|
40 |
+
- `requests`, `readability-lxml`, and `html2text` for most other sources.
|
41 |
+
- `requests`, `beautifulsoup`, and `html2text` for BBC links.
|
42 |
+
"""
|
43 |
+
|
44 |
+
about_olas_predict = """\
|
45 |
+
Olas is a network of autonomous services that can run complex logic in a decentralized manner, interacting with on- and off-chain data autonomously and continuously. For other use cases check out [olas.network](https://olas.network/).
|
46 |
+
Since 'Olas' means 'waves' in Spanish, it is sometimes referred to as the 'ocean of services' π.
|
47 |
+
The project is co-created by [Valory](https://www.valory.xyz/). Valory aspires to enable communities, organizations and countries to co-own AI systems, beginning with decentralized autonomous agents.
|
48 |
+
"""
|
tabs/howto_benchmark.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
how_to_run = """\
|
2 |
+
## Create a new Tool
|
3 |
+
|
4 |
+
- Fork the repo [here](https://github.com/valory-xyz/mech).
|
5 |
+
- Create your tool in mech/packages/{your-name-here}/f'{your-tool}.py'
|
6 |
+
- Submit a PR
|
7 |
+
|
8 |
+
## Test your Tool on the Benchmark On HF
|
9 |
+
|
10 |
+
- Navigate to the "π₯ Run the Benchmark" page
|
11 |
+
- Choose your tool and model
|
12 |
+
- Enter your API key
|
13 |
+
- Press "Run Benchmark"
|
14 |
+
- Inspect the results
|
15 |
+
|
16 |
+
## Test your Tool on the Benchmark locally
|
17 |
+
|
18 |
+
- Fork the repo [here](https://github.com/valory-xyz/olas-predict-benchmark).
|
19 |
+
- Follow the instructions in the readme.
|
20 |
+
|
21 |
+
## Run a Prediction Agent that uses these Tools to place real bets
|
22 |
+
|
23 |
+
- Fork the repo [here](https://github.com/valory-xyz/trader-quickstart).
|
24 |
+
- Follow the instructions in the readme.
|
25 |
+
|
26 |
+
"""
|
tabs/run_benchmark.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from benchmark.run_benchmark import run_benchmark
|
3 |
+
|
4 |
+
|
5 |
+
def run_benchmark_main(tool_name, model_name, num_questions, openai_api_key, anthropic_api_key):
|
6 |
+
"""Run the benchmark using the provided function and API key."""
|
7 |
+
# Empyt the results directory
|
8 |
+
os.system("rm -rf results/*")
|
9 |
+
|
10 |
+
# Set the benchmark parameters
|
11 |
+
kwargs = {}
|
12 |
+
if not num_questions:
|
13 |
+
num_questions = 10
|
14 |
+
kwargs["num_questions"] = num_questions
|
15 |
+
kwargs["tools"] = [tool_name]
|
16 |
+
if model_name:
|
17 |
+
kwargs["model"] = model_name
|
18 |
+
kwargs["api_keys"] = {}
|
19 |
+
if openai_api_key:
|
20 |
+
kwargs["api_keys"]["openai"] = openai_api_key
|
21 |
+
if anthropic_api_key:
|
22 |
+
kwargs["api_keys"]["anthropic"] = anthropic_api_key
|
23 |
+
|
24 |
+
kwargs["num_urls"] = 3
|
25 |
+
kwargs["num_words"] = 300
|
26 |
+
kwargs["provide_source_links"] = True
|
27 |
+
|
28 |
+
print(f"Running benchmark with the following parameters: {kwargs}")
|
29 |
+
|
30 |
+
# Run the benchmark
|
31 |
+
try:
|
32 |
+
run_benchmark(kwargs=kwargs)
|
33 |
+
return "completed"
|
34 |
+
except Exception as e:
|
35 |
+
return f"Error running benchmark: {e}"
|
36 |
+
|
37 |
+
|