meg-huggingface commited on
Commit
ee330de
·
2 Parent(s): 9bd728a b85f63e

Merge branch 'main' of hf.co:spaces/Bias-Leaderboard/leaderboard into main

Browse files
Files changed (2) hide show
  1. requirements.txt +1 -1
  2. src/about.py +2 -1
requirements.txt CHANGED
@@ -13,6 +13,6 @@ requests==2.28.2
13
  tqdm==4.65.0
14
  transformers==4.35.2
15
  tokenizers>=0.15.0
16
- git+https://github.com/EleutherAI/lm-evaluation-harness.git@b281b0921b636bc36ad05c0b0b0763bd6dd43463#egg=lm-eval
17
  accelerate==0.24.1
18
  sentencepiece
 
13
  tqdm==4.65.0
14
  transformers==4.35.2
15
  tokenizers>=0.15.0
16
+ git+https://github.com/meg-huggingface/lm-evaluation-harness.git@b281b0921b636bc36ad05c0b0b0763bd6dd43463#egg=lm-eval
17
  accelerate==0.24.1
18
  sentencepiece
src/about.py CHANGED
@@ -33,7 +33,8 @@ LLM_BENCHMARKS_TEXT = f"""
33
 
34
  ## Reproducibility
35
  To reproduce these results, here is the command you can run:
36
- ```python main.py --model=hf-causal-experimental --model_args="pretrained=<your_model>,use_accelerate=True,revision=<your_model_revision>" --tasks=toxigen --num_fewshot=<n_few_shot> --batch_size=1 --output_path=<output_path>```
 
37
 
38
  """
39
 
 
33
 
34
  ## Reproducibility
35
  To reproduce these results, here is the command you can run:
36
+
37
+ ```python main.py --model=hf-causal-experimental --model_args="pretrained=<your_model>,use_accelerate=True" --tasks=toxigen --batch_size=1 --output_path=<output_path>```
38
 
39
  """
40