nouamanetazi HF staff commited on
Commit
e893bbb
1 Parent(s): 9b3b05a

add cache and instructions

Browse files
Files changed (1) hide show
  1. app.py +11 -1
app.py CHANGED
@@ -16,6 +16,7 @@ def download_submissions():
16
  )
17
  return submissions
18
 
 
19
  def format_submissions(submissions):
20
  submission_data = {**{"Submitter": []}, **{"Submission Name": []}, **{"Submission Date": []}, **{t: [] for t in TASKS}}
21
  # The following picks the latest submissions which adhere to the model card schema
@@ -52,7 +53,16 @@ st.set_page_config(layout="wide")
52
  st.title("MTEB: Massive Text Embedding Benchmark")
53
  st.markdown(
54
  """
55
- ...
 
 
 
 
 
 
 
 
 
56
  """
57
  )
58
  submissions = download_submissions()
 
16
  )
17
  return submissions
18
 
19
+ @st.cache
20
  def format_submissions(submissions):
21
  submission_data = {**{"Submitter": []}, **{"Submission Name": []}, **{"Submission Date": []}, **{t: [] for t in TASKS}}
22
  # The following picks the latest submissions which adhere to the model card schema
 
53
  st.title("MTEB: Massive Text Embedding Benchmark")
54
  st.markdown(
55
  """
56
+ To submit to MTEB, please follow the following instructions:
57
+ - Publish your .csv MTEB scores to a public Hugging Face Hub Dataset. The .csv files must be at the root of the repo.
58
+ - Add the following to the top of your model card:
59
+ ```
60
+ ---
61
+ benchmark: mteb
62
+ type: evaluation
63
+ ---
64
+ ```
65
+ That's all! [Here's an example](https://huggingface.co/datasets/mteb/mteb-example-submission/tree/main) of how your repo should look like. You should now be able to see your results in the leaderboard below.
66
  """
67
  )
68
  submissions = download_submissions()