Update README.md
Browse files
README.md
CHANGED
@@ -27,152 +27,151 @@ datasets:
|
|
27 |
- nickrosh/Evol-Instruct-Code-80k-v1
|
28 |
- open-phi/textbooks
|
29 |
- open-phi/programming_books_llama
|
30 |
-
|
31 |
model-index:
|
32 |
- name: CrystalChat
|
33 |
results:
|
34 |
- task:
|
35 |
-
type: text-generation
|
36 |
dataset:
|
37 |
-
type: openai_humanneval
|
38 |
-
name: OpenAI HumanEval
|
39 |
metrics:
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
- task:
|
47 |
-
type: text-generation
|
48 |
dataset:
|
49 |
-
type: mbpp
|
50 |
-
name: Mostly Basic Python Problems (mbpp)
|
51 |
metrics:
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
- task:
|
60 |
-
type: multiple-choice
|
61 |
dataset:
|
62 |
-
type: race
|
63 |
-
name: RACE
|
64 |
metrics:
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
- task:
|
69 |
-
type: multiple-choice
|
70 |
dataset:
|
71 |
-
type: mmlu
|
72 |
-
name: Measuring Massive Multitask Language Understanding (MMLU)
|
73 |
metrics:
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
- task:
|
78 |
-
type: multiple-choice
|
79 |
dataset:
|
80 |
-
type: truthful_qa
|
81 |
-
name: Truthful QA
|
82 |
metrics:
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
- task:
|
87 |
-
type: multiple-choice
|
88 |
dataset:
|
89 |
-
type:
|
90 |
-
name:
|
91 |
metrics:
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
- task:
|
99 |
-
type: multiple-choice
|
100 |
dataset:
|
101 |
-
type:
|
102 |
-
name:
|
103 |
metrics:
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
- task:
|
108 |
-
type: text-classification
|
109 |
dataset:
|
110 |
-
type: boolq
|
111 |
-
name: Boolq
|
112 |
metrics:
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
- task:
|
117 |
-
type: question-answering
|
118 |
dataset:
|
119 |
-
type: openbookqa
|
120 |
-
name: Openbook QA
|
121 |
metrics:
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
- task:
|
126 |
-
type: multiple-choice
|
127 |
dataset:
|
128 |
-
type: hellaSwag
|
129 |
-
name: HellaSwag
|
130 |
metrics:
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
- task:
|
138 |
-
type: question-answering
|
139 |
dataset:
|
140 |
-
type: piqa
|
141 |
-
name: PIQA
|
142 |
metrics:
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
- task:
|
147 |
-
type: question-answering
|
148 |
dataset:
|
149 |
-
type: ai2_arc
|
150 |
-
name: ARC (Easy)
|
151 |
metrics:
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
- task:
|
156 |
-
type: question-answering
|
157 |
dataset:
|
158 |
-
type: ai2_arc
|
159 |
-
name: ARC (Challenge)
|
160 |
metrics:
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
- task:
|
168 |
-
type: text-generation
|
169 |
dataset:
|
170 |
-
type: gsm8k
|
171 |
-
name: GSM8K (Grade School Math 8K)
|
172 |
metrics:
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
---
|
177 |
|
178 |
# CrystalChat
|
|
|
27 |
- nickrosh/Evol-Instruct-Code-80k-v1
|
28 |
- open-phi/textbooks
|
29 |
- open-phi/programming_books_llama
|
30 |
+
- LLM360/CrystalCoderDatasets
|
31 |
model-index:
|
32 |
- name: CrystalChat
|
33 |
results:
|
34 |
- task:
|
35 |
+
type: text-generation
|
36 |
dataset:
|
37 |
+
type: openai_humanneval
|
38 |
+
name: OpenAI HumanEval
|
39 |
metrics:
|
40 |
+
- name: pass@1 (t=0.01)
|
41 |
+
type: pass@1
|
42 |
+
value: 31.707
|
43 |
+
- name: pass@10 (t=0.8)
|
44 |
+
type: pass@10
|
45 |
+
value: 65.755
|
46 |
- task:
|
47 |
+
type: text-generation
|
48 |
dataset:
|
49 |
+
type: mbpp
|
50 |
+
name: Mostly Basic Python Problems (mbpp)
|
51 |
metrics:
|
52 |
+
- name: pass@1 (t=0.01)
|
53 |
+
type: pass@1
|
54 |
+
value: 39.4
|
55 |
+
- name: pass@10 (t=0.8)
|
56 |
+
type: pass@10
|
57 |
+
value: 59.895
|
|
|
58 |
- task:
|
59 |
+
type: multiple-choice
|
60 |
dataset:
|
61 |
+
type: race
|
62 |
+
name: RACE
|
63 |
metrics:
|
64 |
+
- name: accuracy
|
65 |
+
type: accuracy
|
66 |
+
value: 41.148
|
67 |
- task:
|
68 |
+
type: multiple-choice
|
69 |
dataset:
|
70 |
+
type: mmlu
|
71 |
+
name: Measuring Massive Multitask Language Understanding (MMLU)
|
72 |
metrics:
|
73 |
+
- name: accuracy
|
74 |
+
type: accuracy
|
75 |
+
value: 52.789
|
76 |
- task:
|
77 |
+
type: multiple-choice
|
78 |
dataset:
|
79 |
+
type: truthful_qa
|
80 |
+
name: Truthful QA
|
81 |
metrics:
|
82 |
+
- name: accuracy
|
83 |
+
type: accuracy
|
84 |
+
value: 47.29
|
85 |
- task:
|
86 |
+
type: multiple-choice
|
87 |
dataset:
|
88 |
+
type: winogrande
|
89 |
+
name: Winogrande
|
90 |
metrics:
|
91 |
+
- name: accuracy (5 shot)
|
92 |
+
type: accuracy
|
93 |
+
value: 70.639
|
94 |
+
- name: accuracy (0 shot)
|
95 |
+
type: accuracy
|
96 |
+
value: 68.114
|
97 |
- task:
|
98 |
+
type: multiple-choice
|
99 |
dataset:
|
100 |
+
type: copa
|
101 |
+
name: COPA
|
102 |
metrics:
|
103 |
+
- name: accuracy
|
104 |
+
type: accuracy
|
105 |
+
value: 85
|
106 |
- task:
|
107 |
+
type: text-classification
|
108 |
dataset:
|
109 |
+
type: boolq
|
110 |
+
name: Boolq
|
111 |
metrics:
|
112 |
+
- name: accuracy
|
113 |
+
type: accuracy
|
114 |
+
value: 82.783
|
115 |
- task:
|
116 |
+
type: question-answering
|
117 |
dataset:
|
118 |
+
type: openbookqa
|
119 |
+
name: Openbook QA
|
120 |
metrics:
|
121 |
+
- name: accuracy
|
122 |
+
type: accuracy
|
123 |
+
value: 42
|
124 |
- task:
|
125 |
+
type: multiple-choice
|
126 |
dataset:
|
127 |
+
type: hellaSwag
|
128 |
+
name: HellaSwag
|
129 |
metrics:
|
130 |
+
- name: accuracy (10-shot)
|
131 |
+
type: accuracy
|
132 |
+
value: 76.12
|
133 |
+
- name: accuracy (0-shot)
|
134 |
+
type: accuracy
|
135 |
+
value: 73.312
|
136 |
- task:
|
137 |
+
type: question-answering
|
138 |
dataset:
|
139 |
+
type: piqa
|
140 |
+
name: PIQA
|
141 |
metrics:
|
142 |
+
- name: accuracy
|
143 |
+
type: accuracy
|
144 |
+
value: 77.856
|
145 |
- task:
|
146 |
+
type: question-answering
|
147 |
dataset:
|
148 |
+
type: ai2_arc
|
149 |
+
name: ARC (Easy)
|
150 |
metrics:
|
151 |
+
- name: accuracy
|
152 |
+
type: accuracy
|
153 |
+
value: 70.328
|
154 |
- task:
|
155 |
+
type: question-answering
|
156 |
dataset:
|
157 |
+
type: ai2_arc
|
158 |
+
name: ARC (Challenge)
|
159 |
metrics:
|
160 |
+
- name: accuracy (25-shot)
|
161 |
+
type: accuracy
|
162 |
+
value: 51.706
|
163 |
+
- name: accuracy (0-shot)
|
164 |
+
type: accuracy
|
165 |
+
value: 44.625
|
166 |
- task:
|
167 |
+
type: text-generation
|
168 |
dataset:
|
169 |
+
type: gsm8k
|
170 |
+
name: GSM8K (Grade School Math 8K)
|
171 |
metrics:
|
172 |
+
- name: Accuracy (5 shot)
|
173 |
+
type: accuracy
|
174 |
+
value: 28.052
|
175 |
---
|
176 |
|
177 |
# CrystalChat
|