albertvillanova HF staff commited on
Commit
c302a00
1 Parent(s): 0e05c2a

Add ner data files

Browse files
README.md CHANGED
@@ -124,13 +124,13 @@ dataset_info:
124
  '12': O
125
  splits:
126
  - name: train
127
- num_bytes: 19891953
128
  num_examples: 21008
129
  - name: validation
130
- num_bytes: 4937579
131
  num_examples: 5000
132
- download_size: 4308644
133
- dataset_size: 24829532
134
  - config_name: nli
135
  features:
136
  - name: guid
@@ -313,6 +313,12 @@ dataset_info:
313
  download_size: 5012303
314
  dataset_size: 12148765
315
  configs:
 
 
 
 
 
 
316
  - config_name: nli
317
  data_files:
318
  - split: train
 
124
  '12': O
125
  splits:
126
  - name: train
127
+ num_bytes: 19891905
128
  num_examples: 21008
129
  - name: validation
130
+ num_bytes: 4937563
131
  num_examples: 5000
132
+ download_size: 5265887
133
+ dataset_size: 24829468
134
  - config_name: nli
135
  features:
136
  - name: guid
 
313
  download_size: 5012303
314
  dataset_size: 12148765
315
  configs:
316
+ - config_name: ner
317
+ data_files:
318
+ - split: train
319
+ path: ner/train-*
320
+ - split: validation
321
+ path: ner/validation-*
322
  - config_name: nli
323
  data_files:
324
  - split: train
dataset_infos.json CHANGED
@@ -197,22 +197,17 @@
197
  "features": {
198
  "sentence": {
199
  "dtype": "string",
200
- "id": null,
201
  "_type": "Value"
202
  },
203
  "tokens": {
204
  "feature": {
205
  "dtype": "string",
206
- "id": null,
207
  "_type": "Value"
208
  },
209
- "length": -1,
210
- "id": null,
211
  "_type": "Sequence"
212
  },
213
  "ner_tags": {
214
  "feature": {
215
- "num_classes": 13,
216
  "names": [
217
  "B-DT",
218
  "I-DT",
@@ -228,19 +223,13 @@
228
  "I-TI",
229
  "O"
230
  ],
231
- "names_file": null,
232
- "id": null,
233
  "_type": "ClassLabel"
234
  },
235
- "length": -1,
236
- "id": null,
237
  "_type": "Sequence"
238
  }
239
  },
240
- "post_processed": null,
241
- "supervised_keys": null,
242
- "task_templates": null,
243
  "builder_name": "klue",
 
244
  "config_name": "ner",
245
  "version": {
246
  "version_str": "1.0.0",
@@ -252,27 +241,20 @@
252
  "splits": {
253
  "train": {
254
  "name": "train",
255
- "num_bytes": 19891953,
256
  "num_examples": 21008,
257
- "dataset_name": "klue"
258
  },
259
  "validation": {
260
  "name": "validation",
261
- "num_bytes": 4937579,
262
  "num_examples": 5000,
263
- "dataset_name": "klue"
264
- }
265
- },
266
- "download_checksums": {
267
- "http://klue-benchmark.com.s3.amazonaws.com/app/Competitions/000069/data/klue-ner-v1.tar.gz": {
268
- "num_bytes": 4308644,
269
- "checksum": "848a89759ac6b7c149c9a00d820726fe2a140c22782201f1a40d856672e7ea8e"
270
  }
271
  },
272
- "download_size": 4308644,
273
- "post_processing_size": null,
274
- "dataset_size": 24829532,
275
- "size_in_bytes": 29138176
276
  },
277
  "re": {
278
  "description": "KLUE (Korean Language Understanding Evaluation)\nKorean Language Understanding Evaluation (KLUE) benchmark is a series of datasets to evaluate natural language\nunderstanding capability of Korean language models. KLUE consists of 8 diverse and representative tasks, which are accessible\nto anyone without any restrictions. With ethical considerations in mind, we deliberately design annotation guidelines to obtain\nunambiguous annotations for all datasets. Futhermore, we build an evaluation system and carefully choose evaluations metrics\nfor every task, thus establishing fair comparison across Korean language models.\n",
 
197
  "features": {
198
  "sentence": {
199
  "dtype": "string",
 
200
  "_type": "Value"
201
  },
202
  "tokens": {
203
  "feature": {
204
  "dtype": "string",
 
205
  "_type": "Value"
206
  },
 
 
207
  "_type": "Sequence"
208
  },
209
  "ner_tags": {
210
  "feature": {
 
211
  "names": [
212
  "B-DT",
213
  "I-DT",
 
223
  "I-TI",
224
  "O"
225
  ],
 
 
226
  "_type": "ClassLabel"
227
  },
 
 
228
  "_type": "Sequence"
229
  }
230
  },
 
 
 
231
  "builder_name": "klue",
232
+ "dataset_name": "klue",
233
  "config_name": "ner",
234
  "version": {
235
  "version_str": "1.0.0",
 
241
  "splits": {
242
  "train": {
243
  "name": "train",
244
+ "num_bytes": 19891905,
245
  "num_examples": 21008,
246
+ "dataset_name": null
247
  },
248
  "validation": {
249
  "name": "validation",
250
+ "num_bytes": 4937563,
251
  "num_examples": 5000,
252
+ "dataset_name": null
 
 
 
 
 
 
253
  }
254
  },
255
+ "download_size": 5265887,
256
+ "dataset_size": 24829468,
257
+ "size_in_bytes": 30095355
 
258
  },
259
  "re": {
260
  "description": "KLUE (Korean Language Understanding Evaluation)\nKorean Language Understanding Evaluation (KLUE) benchmark is a series of datasets to evaluate natural language\nunderstanding capability of Korean language models. KLUE consists of 8 diverse and representative tasks, which are accessible\nto anyone without any restrictions. With ethical considerations in mind, we deliberately design annotation guidelines to obtain\nunambiguous annotations for all datasets. Futhermore, we build an evaluation system and carefully choose evaluations metrics\nfor every task, thus establishing fair comparison across Korean language models.\n",
ner/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:291ded874432fa413e5007ec3770b41bb2e76f44a4e42cc7cc22cb4ebd8c27b6
3
+ size 4209983
ner/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db2129c2ae38fc9a7de630d902cd41f80ac6174f620a8318b4d80569e31b9f84
3
+ size 1055904