Files changed (1) hide show
  1. README.md +154 -41
README.md CHANGED
@@ -7,16 +7,16 @@ language:
7
  - vi
8
  - ms
9
  - lo
 
 
 
 
 
10
  datasets:
11
  - cerebras/SlimPajama-627B
12
  - Skywork/SkyPile-150B
13
  - allenai/MADLAD-400
14
  - cc100
15
- tags:
16
- - multilingual
17
- - sea
18
- - sailor
19
- license: apache-2.0
20
  base_model: Qwen/Qwen1.5-7B
21
  model-index:
22
  - name: Sailor-7B
@@ -27,117 +27,217 @@ model-index:
27
  name: XQuAD-Thai
28
  type: XQuAD-Thai
29
  metrics:
30
- - name: EM (3-Shot)
31
- type: EM (3-Shot)
32
  value: 57.88
33
- - name: F1 (3-Shot)
34
- type: F1 (3-Shot)
35
  value: 71.06
 
36
  - task:
37
  type: text-generation
38
  dataset:
39
  name: TyDiQA-Indonesian
40
  type: TyDiQA-Indonesian
41
  metrics:
42
- - name: EM (3-Shot)
43
- type: EM (3-Shot)
44
  value: 60.53
45
- - name: F1 (3-Shot)
46
- type: F1 (3-Shot)
47
  value: 75.42
 
48
  - task:
49
  type: text-generation
50
  dataset:
51
  name: XQuAD-Vietnamese
52
  type: XQuAD-Vietnamese
53
  metrics:
54
- - name: EM (3-Shot)
55
- type: EM (3-Shot)
56
  value: 53.81
57
- - name: F1 (3-Shot)
58
- type: F1 (3-Shot)
59
  value: 74.62
 
60
  - task:
61
  type: text-generation
62
  dataset:
63
  name: XCOPA-Thai
64
  type: XCOPA-Thai
65
  metrics:
66
- - name: EM (3-Shot)
67
- type: EM (3-Shot)
68
- value: 59.00
69
  - task:
70
  type: text-generation
71
  dataset:
72
  name: XCOPA-Indonesian
73
  type: XCOPA-Indonesian
74
  metrics:
75
- - name: EM (3-Shot)
76
- type: EM (3-Shot)
77
- value: 72.20
78
  - task:
79
  type: text-generation
80
  dataset:
81
  name: XCOPA-Vietnamese
82
  type: XCOPA-Vietnamese
83
  metrics:
84
- - name: EM (3-Shot)
85
- type: EM (3-Shot)
86
- value: 72.20
87
  - task:
88
  type: text-generation
89
  dataset:
90
  name: M3Exam-Thai
91
  type: M3Exam-Thai
92
  metrics:
93
- - name: EM (3-Shot)
94
- type: EM (3-Shot)
95
- value: 30.00
96
  - task:
97
  type: text-generation
98
  dataset:
99
  name: M3Exam-Indonesian
100
  type: M3Exam-Indonesian
101
  metrics:
102
- - name: EM (3-Shot)
103
- type: EM (3-Shot)
104
  value: 32.88
 
105
  - task:
106
  type: text-generation
107
  dataset:
108
  name: M3Exam-Vietnamese
109
  type: M3Exam-Vietnamese
110
  metrics:
111
- - name: EM (3-Shot)
112
- type: EM (3-Shot)
113
- value: 44.10
114
  - task:
115
  type: text-generation
116
  dataset:
117
  name: BELEBELE-Thai
118
  type: BELEBELE-Thai
119
  metrics:
120
- - name: EM (3-Shot)
121
- type: EM (3-Shot)
122
  value: 41.56
 
123
  - task:
124
  type: text-generation
125
  dataset:
126
  name: BELEBELE-Indonesian
127
  type: BELEBELE-Indonesian
128
  metrics:
129
- - name: EM (3-Shot)
130
- type: EM (3-Shot)
131
  value: 44.33
 
132
  - task:
133
  type: text-generation
134
  dataset:
135
  name: BELEBELE-Vietnamese
136
  type: BELEBELE-Vietnamese
137
  metrics:
138
- - name: EM (3-Shot)
139
- type: EM (3-Shot)
140
  value: 45.33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141
  ---
142
 
143
  <div align="center">
@@ -210,4 +310,17 @@ No restrict on the research and the commercial use, but should comply with the [
210
 
211
  # Contact Us
212
 
213
- If you have any questions, please raise an issue or contact us at [doulx@sea.com](mailto:doulx@sea.com) or [liuqian@sea.com](mailto:liuqian@sea.com).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  - vi
8
  - ms
9
  - lo
10
+ license: apache-2.0
11
+ tags:
12
+ - multilingual
13
+ - sea
14
+ - sailor
15
  datasets:
16
  - cerebras/SlimPajama-627B
17
  - Skywork/SkyPile-150B
18
  - allenai/MADLAD-400
19
  - cc100
 
 
 
 
 
20
  base_model: Qwen/Qwen1.5-7B
21
  model-index:
22
  - name: Sailor-7B
 
27
  name: XQuAD-Thai
28
  type: XQuAD-Thai
29
  metrics:
30
+ - type: EM (3-Shot)
 
31
  value: 57.88
32
+ name: EM (3-Shot)
33
+ - type: F1 (3-Shot)
34
  value: 71.06
35
+ name: F1 (3-Shot)
36
  - task:
37
  type: text-generation
38
  dataset:
39
  name: TyDiQA-Indonesian
40
  type: TyDiQA-Indonesian
41
  metrics:
42
+ - type: EM (3-Shot)
 
43
  value: 60.53
44
+ name: EM (3-Shot)
45
+ - type: F1 (3-Shot)
46
  value: 75.42
47
+ name: F1 (3-Shot)
48
  - task:
49
  type: text-generation
50
  dataset:
51
  name: XQuAD-Vietnamese
52
  type: XQuAD-Vietnamese
53
  metrics:
54
+ - type: EM (3-Shot)
 
55
  value: 53.81
56
+ name: EM (3-Shot)
57
+ - type: F1 (3-Shot)
58
  value: 74.62
59
+ name: F1 (3-Shot)
60
  - task:
61
  type: text-generation
62
  dataset:
63
  name: XCOPA-Thai
64
  type: XCOPA-Thai
65
  metrics:
66
+ - type: EM (3-Shot)
67
+ value: 59.0
68
+ name: EM (3-Shot)
69
  - task:
70
  type: text-generation
71
  dataset:
72
  name: XCOPA-Indonesian
73
  type: XCOPA-Indonesian
74
  metrics:
75
+ - type: EM (3-Shot)
76
+ value: 72.2
77
+ name: EM (3-Shot)
78
  - task:
79
  type: text-generation
80
  dataset:
81
  name: XCOPA-Vietnamese
82
  type: XCOPA-Vietnamese
83
  metrics:
84
+ - type: EM (3-Shot)
85
+ value: 72.2
86
+ name: EM (3-Shot)
87
  - task:
88
  type: text-generation
89
  dataset:
90
  name: M3Exam-Thai
91
  type: M3Exam-Thai
92
  metrics:
93
+ - type: EM (3-Shot)
94
+ value: 30.0
95
+ name: EM (3-Shot)
96
  - task:
97
  type: text-generation
98
  dataset:
99
  name: M3Exam-Indonesian
100
  type: M3Exam-Indonesian
101
  metrics:
102
+ - type: EM (3-Shot)
 
103
  value: 32.88
104
+ name: EM (3-Shot)
105
  - task:
106
  type: text-generation
107
  dataset:
108
  name: M3Exam-Vietnamese
109
  type: M3Exam-Vietnamese
110
  metrics:
111
+ - type: EM (3-Shot)
112
+ value: 44.1
113
+ name: EM (3-Shot)
114
  - task:
115
  type: text-generation
116
  dataset:
117
  name: BELEBELE-Thai
118
  type: BELEBELE-Thai
119
  metrics:
120
+ - type: EM (3-Shot)
 
121
  value: 41.56
122
+ name: EM (3-Shot)
123
  - task:
124
  type: text-generation
125
  dataset:
126
  name: BELEBELE-Indonesian
127
  type: BELEBELE-Indonesian
128
  metrics:
129
+ - type: EM (3-Shot)
 
130
  value: 44.33
131
+ name: EM (3-Shot)
132
  - task:
133
  type: text-generation
134
  dataset:
135
  name: BELEBELE-Vietnamese
136
  type: BELEBELE-Vietnamese
137
  metrics:
138
+ - type: EM (3-Shot)
 
139
  value: 45.33
140
+ name: EM (3-Shot)
141
+ - task:
142
+ type: text-generation
143
+ name: Text Generation
144
+ dataset:
145
+ name: AI2 Reasoning Challenge (25-Shot)
146
+ type: ai2_arc
147
+ config: ARC-Challenge
148
+ split: test
149
+ args:
150
+ num_few_shot: 25
151
+ metrics:
152
+ - type: acc_norm
153
+ value: 49.83
154
+ name: normalized accuracy
155
+ source:
156
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=sail/Sailor-7B
157
+ name: Open LLM Leaderboard
158
+ - task:
159
+ type: text-generation
160
+ name: Text Generation
161
+ dataset:
162
+ name: HellaSwag (10-Shot)
163
+ type: hellaswag
164
+ split: validation
165
+ args:
166
+ num_few_shot: 10
167
+ metrics:
168
+ - type: acc_norm
169
+ value: 76.21
170
+ name: normalized accuracy
171
+ source:
172
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=sail/Sailor-7B
173
+ name: Open LLM Leaderboard
174
+ - task:
175
+ type: text-generation
176
+ name: Text Generation
177
+ dataset:
178
+ name: MMLU (5-Shot)
179
+ type: cais/mmlu
180
+ config: all
181
+ split: test
182
+ args:
183
+ num_few_shot: 5
184
+ metrics:
185
+ - type: acc
186
+ value: 54.65
187
+ name: accuracy
188
+ source:
189
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=sail/Sailor-7B
190
+ name: Open LLM Leaderboard
191
+ - task:
192
+ type: text-generation
193
+ name: Text Generation
194
+ dataset:
195
+ name: TruthfulQA (0-shot)
196
+ type: truthful_qa
197
+ config: multiple_choice
198
+ split: validation
199
+ args:
200
+ num_few_shot: 0
201
+ metrics:
202
+ - type: mc2
203
+ value: 40.08
204
+ source:
205
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=sail/Sailor-7B
206
+ name: Open LLM Leaderboard
207
+ - task:
208
+ type: text-generation
209
+ name: Text Generation
210
+ dataset:
211
+ name: Winogrande (5-shot)
212
+ type: winogrande
213
+ config: winogrande_xl
214
+ split: validation
215
+ args:
216
+ num_few_shot: 5
217
+ metrics:
218
+ - type: acc
219
+ value: 69.14
220
+ name: accuracy
221
+ source:
222
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=sail/Sailor-7B
223
+ name: Open LLM Leaderboard
224
+ - task:
225
+ type: text-generation
226
+ name: Text Generation
227
+ dataset:
228
+ name: GSM8k (5-shot)
229
+ type: gsm8k
230
+ config: main
231
+ split: test
232
+ args:
233
+ num_few_shot: 5
234
+ metrics:
235
+ - type: acc
236
+ value: 33.36
237
+ name: accuracy
238
+ source:
239
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=sail/Sailor-7B
240
+ name: Open LLM Leaderboard
241
  ---
242
 
243
  <div align="center">
 
310
 
311
  # Contact Us
312
 
313
+ If you have any questions, please raise an issue or contact us at [doulx@sea.com](mailto:doulx@sea.com) or [liuqian@sea.com](mailto:liuqian@sea.com).
314
+ # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
315
+ Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_sail__Sailor-7B)
316
+
317
+ | Metric |Value|
318
+ |---------------------------------|----:|
319
+ |Avg. |53.88|
320
+ |AI2 Reasoning Challenge (25-Shot)|49.83|
321
+ |HellaSwag (10-Shot) |76.21|
322
+ |MMLU (5-Shot) |54.65|
323
+ |TruthfulQA (0-shot) |40.08|
324
+ |Winogrande (5-shot) |69.14|
325
+ |GSM8k (5-shot) |33.36|
326
+