Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
10K<n<100K
Language Creators:
found
Annotations Creators:
expert-generated
Source Datasets:
original
ArXiv:
License:
albertvillanova HF staff commited on
Commit
15ef643
1 Parent(s): f80dda9

Convert dataset to Parquet (#6)

Browse files

- Convert dataset to Parquet (98ee45b75b2cb1d467f3ba11b7fcb16c0ba0e10b)
- Delete loading script (2ca24aa853751f3c5c72576177e7e9c42db56c10)

README.md CHANGED
@@ -45,8 +45,17 @@ dataset_info:
45
  - name: test
46
  num_bytes: 987712
47
  num_examples: 1750
48
- download_size: 8556464
49
  dataset_size: 9733172
 
 
 
 
 
 
 
 
 
50
  train-eval-index:
51
  - config: default
52
  task: token-classification
45
  - name: test
46
  num_bytes: 987712
47
  num_examples: 1750
48
+ download_size: 2071007
49
  dataset_size: 9733172
50
+ configs:
51
+ - config_name: default
52
+ data_files:
53
+ - split: train
54
+ path: data/train-*
55
+ - split: validation
56
+ path: data/validation-*
57
+ - split: test
58
+ path: data/test-*
59
  train-eval-index:
60
  - config: default
61
  task: token-classification
acronym_identification.py DELETED
@@ -1,93 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- import json
18
-
19
- import datasets
20
-
21
-
22
- _DESCRIPTION = """\
23
- Acronym identification training and development sets for the acronym identification task at SDU@AAAI-21.
24
- """
25
- _HOMEPAGE_URL = "https://github.com/amirveyseh/AAAI-21-SDU-shared-task-1-AI"
26
- _CITATION = """\
27
- @inproceedings{veyseh-et-al-2020-what,
28
- title={{What Does This Acronym Mean? Introducing a New Dataset for Acronym Identification and Disambiguation}},
29
- author={Amir Pouran Ben Veyseh and Franck Dernoncourt and Quan Hung Tran and Thien Huu Nguyen},
30
- year={2020},
31
- booktitle={Proceedings of COLING},
32
- link={https://arxiv.org/pdf/2010.14678v1.pdf}
33
- }
34
- """
35
-
36
- _TRAIN_URL = "https://raw.githubusercontent.com/amirveyseh/AAAI-21-SDU-shared-task-1-AI/master/dataset/train.json"
37
- _VALID_URL = "https://raw.githubusercontent.com/amirveyseh/AAAI-21-SDU-shared-task-1-AI/master/dataset/dev.json"
38
- _TEST_URL = "https://raw.githubusercontent.com/amirveyseh/AAAI-21-SDU-shared-task-1-AI/master/dataset/test.json"
39
-
40
-
41
- class AcronymIdentification(datasets.GeneratorBasedBuilder):
42
- VERSION = datasets.Version("1.0.0")
43
-
44
- def _info(self):
45
- return datasets.DatasetInfo(
46
- description=_DESCRIPTION,
47
- features=datasets.Features(
48
- {
49
- "id": datasets.Value("string"),
50
- "tokens": datasets.Sequence(datasets.Value("string")),
51
- "labels": datasets.Sequence(
52
- datasets.ClassLabel(names=["B-long", "B-short", "I-long", "I-short", "O"])
53
- ),
54
- },
55
- ),
56
- supervised_keys=None,
57
- homepage=_HOMEPAGE_URL,
58
- citation=_CITATION,
59
- )
60
-
61
- def _split_generators(self, dl_manager):
62
- train_path = dl_manager.download_and_extract(_TRAIN_URL)
63
- valid_path = dl_manager.download_and_extract(_VALID_URL)
64
- test_path = dl_manager.download_and_extract(_TEST_URL)
65
- return [
66
- datasets.SplitGenerator(
67
- name=datasets.Split.TRAIN,
68
- gen_kwargs={"datapath": train_path, "datatype": "train"},
69
- ),
70
- datasets.SplitGenerator(
71
- name=datasets.Split.VALIDATION,
72
- gen_kwargs={"datapath": valid_path, "datatype": "valid"},
73
- ),
74
- datasets.SplitGenerator(
75
- name=datasets.Split.TEST,
76
- gen_kwargs={"datapath": test_path, "datatype": "test"},
77
- ),
78
- ]
79
-
80
- def _generate_examples(self, datapath, datatype):
81
- with open(datapath, encoding="utf-8") as f:
82
- data = json.load(f)
83
-
84
- for sentence_counter, d in enumerate(data):
85
- resp = {
86
- "id": d["id"],
87
- "tokens": d["tokens"],
88
- }
89
- if datatype != "test":
90
- resp["labels"] = d["labels"]
91
- else:
92
- resp["labels"] = ["O"] * len(d["tokens"])
93
- yield sentence_counter, resp
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36ea099f3d85fee1094dfcb67d54d1c695a1943e6ae0d88056b3f7bb697c6ad5
3
+ size 206100
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb4abbdd5ef83e0debbc89e65b129aef90a28272beeafb5f97a98413480c2883
3
+ size 1657991
data/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:395e208ee3f995fd50665dcf25669c565d1014b38d1489914407320287198ab4
3
+ size 206916