eduagarcia commited on
Commit
d0a1998
β€’
1 Parent(s): eb67d80

New eval_version 1.1.0, updating 131 models

Browse files
This view is limited to 50 files because it contains too many changes. Β  See raw diff
Files changed (50) hide show
  1. 01-ai/Yi-34B-200K_eval_request_False_bfloat16_Original.json +1 -1
  2. 01-ai/Yi-34B_eval_request_False_bfloat16_Original.json +1 -1
  3. 01-ai/Yi-6B-200K_eval_request_False_bfloat16_Original.json +1 -1
  4. 01-ai/Yi-6B_eval_request_False_bfloat16_Original.json +1 -1
  5. 22h/cabrita_7b_pt_850000_eval_request_False_float16_Original.json +1 -1
  6. 22h/open-cabrita3b_eval_request_False_float16_Original.json +1 -1
  7. AI-Sweden-Models/gpt-sw3-20b_eval_request_False_float16_Original.json +1 -1
  8. AI-Sweden-Models/gpt-sw3-40b_eval_request_False_float16_Original.json +1 -1
  9. AI-Sweden-Models/gpt-sw3-6.7b-v2_eval_request_False_float16_Original.json +1 -1
  10. AI-Sweden-Models/gpt-sw3-6.7b_eval_request_False_float16_Original.json +1 -1
  11. BAAI/Aquila-7B_eval_request_False_float16_Original.json +1 -1
  12. BAAI/Aquila2-34B_eval_request_False_bfloat16_Original.json +1 -1
  13. BAAI/Aquila2-7B_eval_request_False_float16_Original.json +1 -1
  14. DAMO-NLP-MT/polylm-1.7b_eval_request_False_float16_Original.json +1 -1
  15. Deci/DeciLM-7B_eval_request_False_bfloat16_Original.json +1 -1
  16. EleutherAI/gpt-j-6b_eval_request_False_float16_Original.json +1 -1
  17. EleutherAI/gpt-neo-1.3B_eval_request_False_float16_Original.json +1 -1
  18. EleutherAI/gpt-neo-125m_eval_request_False_float16_Original.json +1 -1
  19. EleutherAI/gpt-neo-2.7B_eval_request_False_float16_Original.json +1 -1
  20. EleutherAI/gpt-neox-20b_eval_request_False_float16_Original.json +1 -1
  21. EleutherAI/polyglot-ko-12.8b_eval_request_False_float16_Original.json +1 -1
  22. EleutherAI/pythia-12b-deduped_eval_request_False_float16_Original.json +1 -1
  23. EleutherAI/pythia-14m_eval_request_False_float16_Original.json +1 -1
  24. EleutherAI/pythia-160m-deduped_eval_request_False_float16_Original.json +1 -1
  25. EleutherAI/pythia-1b-deduped_eval_request_False_float16_Original.json +1 -1
  26. EleutherAI/pythia-2.8b-deduped_eval_request_False_float16_Original.json +1 -1
  27. EleutherAI/pythia-410m-deduped_eval_request_False_float16_Original.json +1 -1
  28. EleutherAI/pythia-6.9b-deduped_eval_request_False_float16_Original.json +1 -1
  29. EleutherAI/pythia-70m-deduped_eval_request_False_float16_Original.json +1 -1
  30. HeyLucasLeao/gpt-neo-small-portuguese_eval_request_False_float16_Original.json +1 -1
  31. NucleusAI/nucleus-22B-token-500B_eval_request_False_float16_Original.json +1 -1
  32. OpenLLM-France/Claire-7B-0.1_eval_request_False_bfloat16_Original.json +1 -1
  33. OpenLLM-France/Claire-Mistral-7B-0.1_eval_request_False_bfloat16_Original.json +1 -1
  34. OrionStarAI/Orion-14B-Base_eval_request_False_bfloat16_Original.json +1 -1
  35. Qwen/Qwen-1_8B_eval_request_False_bfloat16_Original.json +1 -1
  36. Qwen/Qwen-72B_eval_request_False_bfloat16_Original.json +1 -1
  37. Qwen/Qwen-7B_eval_request_False_bfloat16_Original.json +1 -1
  38. Skywork/Skywork-13B-base_eval_request_False_bfloat16_Original.json +1 -1
  39. THUDM/chatglm3-6b-base_eval_request_False_float16_Original.json +1 -1
  40. TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T_eval_request_False_float16_Original.json +1 -1
  41. Unbabel/TowerBase-7B-v0.1_eval_request_False_bfloat16_Original.json +1 -1
  42. baichuan-inc/Baichuan-7B_eval_request_False_float16_Original.json +1 -1
  43. baichuan-inc/Baichuan2-13B-Base_eval_request_False_bfloat16_Original.json +1 -1
  44. baichuan-inc/Baichuan2-7B-Base_eval_request_False_bfloat16_Original.json +1 -1
  45. bigscience/bloom-1b7_eval_request_False_float16_Original.json +1 -1
  46. bigscience/bloom-3b_eval_request_False_float16_Original.json +1 -1
  47. bigscience/bloom-560m_eval_request_False_float16_Original.json +1 -1
  48. bigscience/bloom-7b1_eval_request_False_float16_Original.json +1 -1
  49. deepseek-ai/deepseek-llm-67b-base_eval_request_False_bfloat16_Original.json +1 -1
  50. deepseek-ai/deepseek-llm-7b-base_eval_request_False_bfloat16_Original.json +1 -1
01-ai/Yi-34B-200K_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 34.389,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:18:19Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 34.389,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:18:19Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
01-ai/Yi-34B_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 34.389,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:05:39Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 34.389,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:05:39Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
01-ai/Yi-6B-200K_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 6.061,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:18:12Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 6.061,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:18:12Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
01-ai/Yi-6B_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 6.061,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:04:05Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 6.061,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:04:05Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
22h/cabrita_7b_pt_850000_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 7.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-11T13:34:40Z",
12
  "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)",
13
  "source": "script",
 
7
  "params": 7.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-11T13:34:40Z",
12
  "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)",
13
  "source": "script",
22h/open-cabrita3b_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 3.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-11T13:34:36Z",
12
  "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)",
13
  "source": "script",
 
7
  "params": 3.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-11T13:34:36Z",
12
  "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)",
13
  "source": "script",
AI-Sweden-Models/gpt-sw3-20b_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 20.918,
8
  "architectures": "GPT2LMHeadModel",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:15:38Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 20.918,
8
  "architectures": "GPT2LMHeadModel",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:15:38Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
AI-Sweden-Models/gpt-sw3-40b_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 39.927,
8
  "architectures": "GPT2LMHeadModel",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:15:47Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 39.927,
8
  "architectures": "GPT2LMHeadModel",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:15:47Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
AI-Sweden-Models/gpt-sw3-6.7b-v2_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 7.111,
8
  "architectures": "GPT2LMHeadModel",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:15:31Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 7.111,
8
  "architectures": "GPT2LMHeadModel",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:15:31Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
AI-Sweden-Models/gpt-sw3-6.7b_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 7.111,
8
  "architectures": "GPT2LMHeadModel",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:15:23Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 7.111,
8
  "architectures": "GPT2LMHeadModel",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:15:23Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
BAAI/Aquila-7B_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 7.0,
8
  "architectures": "AquilaModel",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:09:00Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 7.0,
8
  "architectures": "AquilaModel",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:09:00Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
BAAI/Aquila2-34B_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 34.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:10:17Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 34.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:10:17Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
BAAI/Aquila2-7B_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 7.0,
8
  "architectures": "AquilaModel",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:09:07Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 7.0,
8
  "architectures": "AquilaModel",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:09:07Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
DAMO-NLP-MT/polylm-1.7b_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 1.7,
8
  "architectures": "GPT2LMHeadModel",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-11T13:34:48Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 1.7,
8
  "architectures": "GPT2LMHeadModel",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-11T13:34:48Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
Deci/DeciLM-7B_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 7.044,
8
  "architectures": "DeciLMForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:06:34Z",
12
  "model_type": "πŸ”Ά : fine-tuned",
13
  "source": "script",
 
7
  "params": 7.044,
8
  "architectures": "DeciLMForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:06:34Z",
12
  "model_type": "πŸ”Ά : fine-tuned",
13
  "source": "script",
EleutherAI/gpt-j-6b_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 6.0,
8
  "architectures": "GPTJForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:12:19Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 6.0,
8
  "architectures": "GPTJForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:12:19Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
EleutherAI/gpt-neo-1.3B_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 1.366,
8
  "architectures": "GPTNeoForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:12:06Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 1.366,
8
  "architectures": "GPTNeoForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:12:06Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
EleutherAI/gpt-neo-125m_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 0.15,
8
  "architectures": "GPTNeoForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:11:59Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 0.15,
8
  "architectures": "GPTNeoForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:11:59Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
EleutherAI/gpt-neo-2.7B_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 2.718,
8
  "architectures": "GPTNeoForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:12:14Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 2.718,
8
  "architectures": "GPTNeoForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:12:14Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
EleutherAI/gpt-neox-20b_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 20.739,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:12:26Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 20.739,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:12:26Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
EleutherAI/polyglot-ko-12.8b_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 13.061,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:15:01Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 13.061,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:15:01Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
EleutherAI/pythia-12b-deduped_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 12.0,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:11:53Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 12.0,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:11:53Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
EleutherAI/pythia-14m_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 0.039,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:11:12Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 0.039,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:11:12Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
EleutherAI/pythia-160m-deduped_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 0.213,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:11:23Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 0.213,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:11:23Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
EleutherAI/pythia-1b-deduped_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 1.079,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:11:36Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 1.079,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:11:36Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
EleutherAI/pythia-2.8b-deduped_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 2.909,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:11:43Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 2.909,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:11:43Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
EleutherAI/pythia-410m-deduped_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 0.506,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:11:30Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 0.506,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:11:30Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
EleutherAI/pythia-6.9b-deduped_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 6.9,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:11:48Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 6.9,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:11:48Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
EleutherAI/pythia-70m-deduped_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 0.096,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:11:17Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 0.096,
8
  "architectures": "GPTNeoXForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:11:17Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
HeyLucasLeao/gpt-neo-small-portuguese_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 0,
8
  "architectures": "GPTNeoForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:14:26Z",
12
  "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)",
13
  "source": "script",
 
7
  "params": 0,
8
  "architectures": "GPTNeoForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:14:26Z",
12
  "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)",
13
  "source": "script",
NucleusAI/nucleus-22B-token-500B_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 21.828,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:11:04Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 21.828,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:11:04Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
OpenLLM-France/Claire-7B-0.1_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 7.0,
8
  "architectures": "FalconForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:16:00Z",
12
  "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)",
13
  "source": "script",
 
7
  "params": 7.0,
8
  "architectures": "FalconForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:16:00Z",
12
  "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)",
13
  "source": "script",
OpenLLM-France/Claire-Mistral-7B-0.1_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 7.0,
8
  "architectures": "MistralForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:15:54Z",
12
  "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)",
13
  "source": "script",
 
7
  "params": 7.0,
8
  "architectures": "MistralForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:15:54Z",
12
  "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)",
13
  "source": "script",
OrionStarAI/Orion-14B-Base_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 14.0,
8
  "architectures": "OrionForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:08:40Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 14.0,
8
  "architectures": "OrionForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:08:40Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
Qwen/Qwen-1_8B_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 1.837,
8
  "architectures": "QWenLMHeadModel",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:07:14Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 1.837,
8
  "architectures": "QWenLMHeadModel",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:07:14Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
Qwen/Qwen-72B_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 72.288,
8
  "architectures": "QWenLMHeadModel",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:09:47Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 72.288,
8
  "architectures": "QWenLMHeadModel",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:09:47Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
Qwen/Qwen-7B_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 7.721,
8
  "architectures": "QWenLMHeadModel",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:07:22Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 7.721,
8
  "architectures": "QWenLMHeadModel",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:07:22Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
Skywork/Skywork-13B-base_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 13.0,
8
  "architectures": "SkyworkForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:08:06Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 13.0,
8
  "architectures": "SkyworkForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:08:06Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
THUDM/chatglm3-6b-base_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 6.0,
8
  "architectures": "ChatGLMModel",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:09:16Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 6.0,
8
  "architectures": "ChatGLMModel",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:09:16Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 1.1,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T22:59:37Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 1.1,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T22:59:37Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
Unbabel/TowerBase-7B-v0.1_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 6.738,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:04:12Z",
12
  "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)",
13
  "source": "script",
 
7
  "params": 6.738,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:04:12Z",
12
  "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)",
13
  "source": "script",
baichuan-inc/Baichuan-7B_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 7.0,
8
  "architectures": "BaiChuanForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:08:13Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 7.0,
8
  "architectures": "BaiChuanForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:08:13Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
baichuan-inc/Baichuan2-13B-Base_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 13.0,
8
  "architectures": "BaichuanForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:08:33Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 13.0,
8
  "architectures": "BaichuanForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:08:33Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
baichuan-inc/Baichuan2-7B-Base_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 7.0,
8
  "architectures": "BaichuanForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:08:25Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 7.0,
8
  "architectures": "BaichuanForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:08:25Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
bigscience/bloom-1b7_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 1.722,
8
  "architectures": "BloomForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:04:30Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 1.722,
8
  "architectures": "BloomForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:04:30Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
bigscience/bloom-3b_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 3.003,
8
  "architectures": "BloomForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:04:36Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 3.003,
8
  "architectures": "BloomForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:04:36Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
bigscience/bloom-560m_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 0.559,
8
  "architectures": "BloomForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:04:24Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 0.559,
8
  "architectures": "BloomForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:04:24Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
bigscience/bloom-7b1_eval_request_False_float16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 7.069,
8
  "architectures": "BloomForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:04:44Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 7.069,
8
  "architectures": "BloomForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:04:44Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
deepseek-ai/deepseek-llm-67b-base_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 67.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:10:09Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 67.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:10:09Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
deepseek-ai/deepseek-llm-7b-base_eval_request_False_bfloat16_Original.json CHANGED
@@ -7,7 +7,7 @@
7
  "params": 7.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
- "status": "FINISHED",
11
  "submitted_time": "2024-02-05T23:08:46Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",
 
7
  "params": 7.0,
8
  "architectures": "LlamaForCausalLM",
9
  "weight_type": "Original",
10
+ "status": "PENDING_NEW_EVAL",
11
  "submitted_time": "2024-02-05T23:08:46Z",
12
  "model_type": "🟒 : pretrained",
13
  "source": "script",