arshy commited on
Commit
52938ea
β€’
1 Parent(s): 4cd139e

initial commit

Browse files
Files changed (12) hide show
  1. .gitignore +165 -0
  2. .gitmodules +4 -0
  3. Dockerfile +30 -0
  4. app.py +139 -0
  5. formatted_data.csv +23 -0
  6. poetry.lock +0 -0
  7. pyproject.toml +17 -0
  8. start.sh +22 -0
  9. tabs/dashboard.py +15 -0
  10. tabs/faq.py +48 -0
  11. tabs/howto_benchmark.py +26 -0
  12. tabs/run_benchmark.py +44 -0
.gitignore ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/#use-with-ide
110
+ .pdm.toml
111
+
112
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
+ __pypackages__/
114
+
115
+ # Celery stuff
116
+ celerybeat-schedule
117
+ celerybeat.pid
118
+
119
+ # SageMath parsed files
120
+ *.sage.py
121
+
122
+ # Environments
123
+ .env
124
+ .venv
125
+ env/
126
+ venv/
127
+ ENV/
128
+ env.bak/
129
+ venv.bak/
130
+
131
+ # Spyder project settings
132
+ .spyderproject
133
+ .spyproject
134
+
135
+ # Rope project settings
136
+ .ropeproject
137
+
138
+ # mkdocs documentation
139
+ /site
140
+
141
+ # mypy
142
+ .mypy_cache/
143
+ .dmypy.json
144
+ dmypy.json
145
+
146
+ # Pyre type checker
147
+ .pyre/
148
+
149
+ # pytype static type analyzer
150
+ .pytype/
151
+
152
+ # Cython debug symbols
153
+ cython_debug/
154
+
155
+ # PyCharm
156
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
159
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
+ #.idea/
161
+
162
+ flagged/
163
+ results/
164
+ uploads/
165
+ __pycache__/
.gitmodules ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ [submodule "olas-predict-benchmark"]
2
+ path = olas-predict-benchmark
3
+ url = https://github.com/valory-xyz/olas-predict-benchmark.git
4
+ branch = fix/mech-packages
Dockerfile ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use an official Python runtime as a parent image
2
+ FROM python:3.10-slim-buster
3
+
4
+ # Set the working directory in the container to /app
5
+ WORKDIR /app
6
+
7
+ # Copy the current directory contents into the container at /app
8
+ COPY . /app
9
+
10
+ # Install git and any other dependencies you might need
11
+ RUN apt-get update && \
12
+ apt-get install -y --no-install-recommends git && \
13
+ apt-get clean && \
14
+ rm -rf /var/lib/apt/lists/*
15
+
16
+ # Install Poetry
17
+ RUN pip install poetry
18
+
19
+
20
+ # Run submodule update and checkout commands
21
+ RUN git submodule update --init --recursive && \
22
+ cd olas-predict-benchmark/benchmark && git checkout fix/mech-packages && cd ../.. && \
23
+ cd olas-predict-benchmark/benchmark/mech && git checkout main && cd ../../..
24
+
25
+ # Install project dependencies
26
+ RUN poetry install --no-interaction --no-ansi
27
+
28
+ EXPOSE 7860
29
+
30
+ CMD ["poetry", "run", "python", "app.py"]
app.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pandas as pd
3
+ from glob import glob
4
+ from pathlib import Path
5
+ from tabs.dashboard import df
6
+ from tabs.faq import (
7
+ about_olas_predict_benchmark,
8
+ about_olas_predict,
9
+ about_the_dataset,
10
+ about_the_tools
11
+ )
12
+
13
+ from tabs.howto_benchmark import how_to_run
14
+ from tabs.run_benchmark import run_benchmark_main
15
+
16
+
17
+ demo = gr.Blocks()
18
+
19
+
20
+ def run_benchmark_gradio(tool_name, model_name, openai_api_key, anthropic_api_key):
21
+ """Run the benchmark using inputs."""
22
+ if tool_name is None:
23
+ return "Please enter the name of your tool."
24
+ if openai_api_key is None and anthropic_api_key is None:
25
+ return "Please enter either OpenAI or Anthropic API key."
26
+
27
+ result = run_benchmark_main(tool_name, model_name, openai_api_key, anthropic_api_key)
28
+ if result == 'completed':
29
+ # get the results file in the results directory
30
+ fns = glob('results/*.csv')
31
+
32
+ print(f"Number of files in results directory: {len(fns)}")
33
+
34
+ # convert to Path
35
+ files = [Path(file) for file in fns]
36
+
37
+ # get results and summary files
38
+ results_files = [file for file in files if 'results' in file.name]
39
+
40
+ # the other file is the summary file
41
+ summary_files = [file for file in files if 'summary' in file.name]
42
+
43
+ print(results_files, summary_files)
44
+
45
+ # get the path with results
46
+ results_df = pd.read_csv(results_files[0])
47
+ summary_df = pd.read_csv(summary_files[0])
48
+
49
+ # make sure all df float values are rounded to 4 decimal places
50
+ results_df = results_df.round(4)
51
+ summary_df = summary_df.round(4)
52
+
53
+ return gr.Dataframe(value=results_df), gr.Dataframe(value=summary_df)
54
+
55
+ return gr.Textbox(label="Benchmark Result", value=result, interactive=False), gr.Textbox(label="Summary", value="")
56
+
57
+
58
+ with demo:
59
+ gr.HTML("<h1>Olas Predict Benchmark</hjson>")
60
+ gr.Markdown("Leaderboard showing the performance of Olas Predict tools on the Autocast dataset and overview of the project.")
61
+
62
+ with gr.Tabs() as tabs:
63
+ # first tab - leaderboard
64
+ with gr.TabItem("πŸ… Benchmark Leaderboard", id=0):
65
+
66
+ gr.components.Dataframe(
67
+ value=df,
68
+ )
69
+
70
+ # second tab - about
71
+ with gr.TabItem("ℹ️ About"):
72
+ with gr.Row():
73
+ with gr.Accordion("About the Benchmark", open=False):
74
+ gr.Markdown(about_olas_predict_benchmark)
75
+ with gr.Row():
76
+ with gr.Accordion("About the Tools", open=False):
77
+ gr.Markdown(about_the_tools)
78
+ with gr.Row():
79
+ with gr.Accordion("About the Autocast Dataset", open=False):
80
+ gr.Markdown(about_the_dataset)
81
+ with gr.Row():
82
+ with gr.Accordion("About Olas", open=False):
83
+ gr.Markdown(about_olas_predict)
84
+
85
+
86
+ # third tab - how to run the benchmark
87
+ with gr.TabItem("πŸš€ Contribute"):
88
+ gr.Markdown(how_to_run)
89
+
90
+ def update_dropdown(tool):
91
+ if "claude" in tool:
92
+ return ["claude-3-haiku-20240307", "claude-3-sonnet-20240229", "claude-3-opus-20240229"]
93
+ else:
94
+ return ["gpt-3.5-turbo-0125", "gpt-4-0125-preview"]
95
+
96
+
97
+ # fourth tab - run the benchmark
98
+ with gr.TabItem("πŸ”₯ Run the Benchmark"):
99
+ with gr.Row():
100
+ tool_name = gr.Dropdown(
101
+ [
102
+ "prediction-offline",
103
+ "prediction-online",
104
+ # "prediction-online-summarized-info",
105
+ "prediction-offline-sme",
106
+ "prediction-online-sme",
107
+ "claude-prediction-offline",
108
+ "claude-prediction-online",
109
+ 'prediction-request-rag',
110
+ "prediction-with-research-conservative",
111
+ "prediction-with-research-bold",
112
+ "prediction-request-reasoning-claude",
113
+ "prediction-request-rag-claude",
114
+ "prediction-url-cot-claude",
115
+ ], label="Tool Name", info="Choose the tool to run")
116
+ model_name = gr.Dropdown([
117
+ "gpt-3.5-turbo-0125",
118
+ "gpt-4-0125-preview"
119
+ "claude-3-haiku-20240307",
120
+ "claude-3-sonnet-20240229",
121
+ "claude-3-opus-20240229",
122
+ ], label="Model Name", info="Choose the model to use")
123
+ with gr.Row():
124
+ openai_api_key = gr.Textbox(label="OpenAI API Key", placeholder="Enter your OpenAI API key here", type="password")
125
+ anthropic_api_key = gr.Textbox(label="Anthropic API Key", placeholder="Enter your Anthropic API key here", type="password")
126
+ with gr.Row():
127
+ run_button = gr.Button("Run Benchmark")
128
+ with gr.Row():
129
+ with gr.Accordion("Results", open=True):
130
+ result = gr.Dataframe()
131
+ with gr.Row():
132
+ with gr.Accordion("Summary", open=False):
133
+ summary = gr.Dataframe()
134
+
135
+ run_button.click(run_benchmark_gradio,
136
+ inputs=[tool_name, model_name, openai_api_key, anthropic_api_key],
137
+ outputs=[result, summary])
138
+
139
+ demo.queue(default_concurrency_limit=40).launch(server_port=7860)
formatted_data.csv ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Tool,Model,Accuracy,Correct,Total,Mean Tokens Used,Mean Cost ($)
2
+ claude-prediction-offline,claude-3-sonnet-20240229,0.756838905775076,249,329,920.5987841945289,0.0033739787234042555
3
+ claude-prediction-offline,claude-3-opus-20240229,0.7558823529411764,257,340,920.45,0.016866044117647028
4
+ prediction-request-reasoning-claude,claude-3-sonnet-20240229,0.753125,241,320,2645.509375,0.019254515624999982
5
+ prediction-offline,gpt-4-0125-preview,0.7507692307692307,244,325,727.1846153846154,0.008048953846153844
6
+ prediction-offline-sme,gpt-4-0125-preview,0.7484848484848485,247,330,1416.8484848484848,0.018169212121212114
7
+ prediction-request-reasoning,gpt-4-0125-preview,0.7483221476510067,223,298,1980.7281879194632,0.02567674496644293
8
+ claude-prediction-online,claude-3-sonnet-20240229,0.7411764705882353,252,340,2832.7617647058823,0.00959039117647058
9
+ prediction-url-cot-claude,claude-3-sonnet-20240229,0.7355623100303952,242,329,14789.27963525836,0.0510609574468085
10
+ prediction-request-reasoning-claude,claude-3-opus-20240229,0.7337278106508875,248,338,2773.284023668639,0.10624464497041416
11
+ prediction-request-rag-claude,claude-3-sonnet-20240229,0.7331288343558282,239,326,2850.1196319018404,0.01465865337423311
12
+ claude-prediction-offline,claude-2,0.7201834862385321,157,218,779.4770642201835,0.006891669724770637
13
+ prediction-request-rag,gpt-4-0125-preview,0.7161716171617162,217,303,1240.980198019802,0.013809207920792065
14
+ prediction-request-reasoning-claude,claude-3-haiku-20240307,0.6982248520710059,236,338,2700.6508875739646,0.0016877189349112328
15
+ prediction-with-research-bold,gpt-4-1106-preview,0.6938775510204082,34,49,9319.244897959185,0.11741489795918365
16
+ prediction-online,gpt-4-0125-preview,0.713855421686747,237,332,1549.8524096385543,0.017273584337349383
17
+ prediction-online-sme,gpt-4-0125-preview,0.7012195121951219,230,328,2237.868902439024,0.027385884146341445
18
+ claude-prediction-online,claude-2,0.6600660066006601,200,303,1505.3135313531352,0.013348171617161701
19
+ prediction-offline,gpt-3.5-turbo-0125,0.6578171091445427,223,339,730.1740412979351,0.0007721681415928988
20
+ prediction-request-reasoning,gpt-3.5-turbo-0125,0.6506410256410257,203,312,1871.173076923077,0.002112727564102551
21
+ prediction-offline-sme,gpt-3.5-turbo-0125,0.6294117647058823,214,340,1341.8323529411764,0.0014778852941176408
22
+ prediction-online,gpt-3.5-turbo-0125,0.551622418879056,187,339,1576.684365781711,0.0016928525073746164
23
+ prediction-online-sme,gpt-3.5-turbo-0125,0.49411764705882355,168,340,2189.1882352941175,0.002402523529411752
poetry.lock ADDED
The diff for this file is too large to render. See raw diff
 
pyproject.toml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.poetry]
2
+ name = "leaderboard-gradio"
3
+ version = "0.1.0"
4
+ description = ""
5
+ authors = ["Richard Blythman <richardblythman@gmail.com>"]
6
+ readme = "README.md"
7
+
8
+ [tool.poetry.dependencies]
9
+ python = "^3.10"
10
+ benchmark = { path = "./olas-predict-benchmark/benchmark", develop = true }
11
+ mech = { path = "./olas-predict-benchmark/benchmark/mech", develop = true }
12
+ gradio = "^4.25.0"
13
+
14
+
15
+ [build-system]
16
+ requires = ["poetry-core"]
17
+ build-backend = "poetry.core.masonry.api"
start.sh ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Update and initialize submodules recursively
4
+ git submodule update --init --recursive
5
+
6
+ # Checkout specific branches for your submodules
7
+ cd olas-predict-benchmark/benchmark
8
+ git checkout fix/mech-packages
9
+ cd ../..
10
+
11
+ cd olas-predict-benchmark/benchmark/mech
12
+ git checkout main
13
+ cd ../../..
14
+
15
+ # Configure poetry to create virtual environments within the project directory
16
+ poetry config virtualenvs.in-project true
17
+
18
+ # Install dependencies as specified in poetry.lock file
19
+ poetry install
20
+
21
+ # run gradio using the command below
22
+ poetry run python app.py
tabs/dashboard.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pandas as pd
3
+
4
+ csv_file_path = "formatted_data.csv"
5
+
6
+ def return_df():
7
+ # Reading the CSV file
8
+ df = pd.read_csv(csv_file_path)
9
+
10
+ # all floats to be rounded to 2 decimal places
11
+ df = df.round(4)
12
+ return df
13
+
14
+
15
+ df = return_df()
tabs/faq.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ about_olas_predict_benchmark = """\
2
+ How good are LLMs at making predictions about events in the future? This is a topic that hasn't been well explored to date.
3
+ [Olas Predict](https://olas.network/services/prediction-agents) aims to rectify this by incentivizing the creation of agents that predict the future (through prediction markets).
4
+ This is a leaderboard showing the performance of LLM tools for making predictions (event forecasting) on a dataset, refined from Autocast.\
5
+ The leaderboard shows tool performance in terms of accuracy and cost. \
6
+
7
+ πŸ€— Pick a tool and run it on the benchmark using the "πŸ”₯ Run the Benchmark" page!
8
+ """
9
+
10
+ about_the_tools = """\
11
+ - [Prediction Offline](https://github.com/valory-xyz/mech/blob/main/packages/valory/customs/prediction_request/prediction_request.py) - Uses prompt engineering, but no web crawling, to make predictions
12
+ - [Prediction Online](https://github.com/valory-xyz/mech/blob/main/packages/valory/customs/prediction_request/prediction_request.py) - Uses prompt engineering, as well as web crawling, to make predictions
13
+ - [Prediction SME](https://github.com/valory-xyz/mech/blob/main/packages/nickcom007/customs/prediction_request_sme/prediction_request_sme.py) - Use prompt engineering to get the LLM to act as a Subject Matter Expert (SME) in making a prediction.
14
+ - [Prediction with RAG](https://github.com/valory-xyz/mech/blob/main/packages/napthaai/customs/prediction_request_rag/prediction_request_rag.py) - Uses retrieval-augment-generation (RAG) over extracted search result to make predictions.
15
+ - [Prediction with Research Report](https://github.com/valory-xyz/mech/blob/main/packages/polywrap/customs/prediction_with_research_report/prediction_with_research_report.py) - Generates a research report before making a prediction.
16
+ - [Prediction with Reasoning](https://github.com/valory-xyz/mech/blob/main/packages/napthaai/customs/prediction_request_reasoning/prediction_request_reasoning.py) - Incorporates an additional call to the LLM to do reasoning over retrieved data.
17
+ - [Prediction with CoT](https://github.com/valory-xyz/mech/blob/main/packages/napthaai/customs/prediction_url_cot/prediction_url_cot.py) - Use Chain of Thought (CoT) to make predictions.
18
+ """
19
+
20
+ about_the_dataset = """\
21
+ ## Dataset Overview
22
+ This project leverages the Autocast dataset from the research paper titled ["Forecasting Future World Events with Neural Networks"](https://arxiv.org/abs/2206.15474).
23
+ The dataset has undergone further refinement to enhance the performance evaluation of Olas mech prediction tools.
24
+ Both the original and refined datasets are hosted on HuggingFace.
25
+ ### Refined Dataset Files
26
+ - You can find the refined dataset on HuggingFace [here](https://huggingface.co/datasets/valory/autocast).
27
+ - `autocast_questions_filtered.json`: A JSON subset of the initial autocast dataset.
28
+ - `autocast_questions_filtered.pkl`: A pickle file mapping URLs to their respective scraped documents within the filtered dataset.
29
+ - `retrieved_docs.pkl`: Contains all the scraped texts.
30
+ ### Filtering Criteria
31
+ To refine the dataset, we applied the following criteria to ensure the reliability of the URLs:
32
+ - URLs not returning HTTP 200 status codes are excluded.
33
+ - Difficult-to-scrape sites, such as Twitter and Bloomberg, are omitted.
34
+ - Links with less than 1000 words are removed.
35
+ - Only samples with a minimum of 5 and a maximum of 20 working URLs are retained.
36
+ ### Scraping Approach
37
+ The content of the filtered URLs has been scraped using various libraries, depending on the source:
38
+ - `pypdf2` for PDF URLs.
39
+ - `wikipediaapi` for Wikipedia pages.
40
+ - `requests`, `readability-lxml`, and `html2text` for most other sources.
41
+ - `requests`, `beautifulsoup`, and `html2text` for BBC links.
42
+ """
43
+
44
+ about_olas_predict = """\
45
+ Olas is a network of autonomous services that can run complex logic in a decentralized manner, interacting with on- and off-chain data autonomously and continuously. For other use cases check out [olas.network](https://olas.network/).
46
+ Since 'Olas' means 'waves' in Spanish, it is sometimes referred to as the 'ocean of services' 🌊.
47
+ The project is co-created by [Valory](https://www.valory.xyz/). Valory aspires to enable communities, organizations and countries to co-own AI systems, beginning with decentralized autonomous agents.
48
+ """
tabs/howto_benchmark.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ how_to_run = """\
2
+ ## Create a new Tool
3
+
4
+ - Fork the repo [here](https://github.com/valory-xyz/mech).
5
+ - Create your tool in mech/packages/{your-name-here}/f'{your-tool}.py'
6
+ - Submit a PR
7
+
8
+ ## Test your Tool on the Benchmark On HF
9
+
10
+ - Navigate to the "πŸ”₯ Run the Benchmark" page
11
+ - Choose your tool and model
12
+ - Enter your API key
13
+ - Press "Run Benchmark"
14
+ - Inspect the results
15
+
16
+ ## Test your Tool on the Benchmark locally
17
+
18
+ - Fork the repo [here](https://github.com/valory-xyz/olas-predict-benchmark).
19
+ - Follow the instructions in the readme.
20
+
21
+ ## Run a Prediction Agent that uses these Tools to place real bets
22
+
23
+ - Fork the repo [here](https://github.com/valory-xyz/trader-quickstart).
24
+ - Follow the instructions in the readme.
25
+
26
+ """
tabs/run_benchmark.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import importlib
3
+ import sys
4
+ # from pathlib import Path
5
+ from benchmark.run_benchmark import run_benchmark
6
+ # from dotenv import load_dotenv
7
+
8
+
9
+ # file_path = Path(__file__).resolve()
10
+ # parent_path = file_path.parent.parent
11
+ # load_dotenv(parent_path / ".env")
12
+
13
+
14
+ def run_benchmark_main(tool_name, model_name, openai_api_key, anthropic_api_key):
15
+ """Run the benchmark using the provided function and API key."""
16
+ # Empyt the results directory
17
+ os.system("rm -rf results/*")
18
+
19
+ # Set the benchmark parameters
20
+ kwargs = {}
21
+ kwargs["num_questions"] = 2
22
+ kwargs["tools"] = [tool_name]
23
+ if model_name:
24
+ kwargs["model"] = [model_name]
25
+ kwargs["api_keys"] = {}
26
+ if openai_api_key:
27
+ kwargs["api_keys"]["openai"] = openai_api_key
28
+ if anthropic_api_key:
29
+ kwargs["api_keys"]["anthropic"] = anthropic_api_key
30
+
31
+ kwargs["num_urls"] = 3
32
+ kwargs["num_words"] = 300
33
+ kwargs["provide_source_links"] = True
34
+
35
+ print(f"Running benchmark with the following parameters: {kwargs}")
36
+
37
+ # Run the benchmark
38
+ try:
39
+ run_benchmark(kwargs=kwargs)
40
+ return "completed"
41
+ except Exception as e:
42
+ return f"Error running benchmark: {e}"
43
+
44
+