albertvillanova HF staff commited on
Commit
2ca24aa
1 Parent(s): 98ee45b

Delete loading script

Browse files
Files changed (1) hide show
  1. acronym_identification.py +0 -93
acronym_identification.py DELETED
@@ -1,93 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- import json
18
-
19
- import datasets
20
-
21
-
22
- _DESCRIPTION = """\
23
- Acronym identification training and development sets for the acronym identification task at SDU@AAAI-21.
24
- """
25
- _HOMEPAGE_URL = "https://github.com/amirveyseh/AAAI-21-SDU-shared-task-1-AI"
26
- _CITATION = """\
27
- @inproceedings{veyseh-et-al-2020-what,
28
- title={{What Does This Acronym Mean? Introducing a New Dataset for Acronym Identification and Disambiguation}},
29
- author={Amir Pouran Ben Veyseh and Franck Dernoncourt and Quan Hung Tran and Thien Huu Nguyen},
30
- year={2020},
31
- booktitle={Proceedings of COLING},
32
- link={https://arxiv.org/pdf/2010.14678v1.pdf}
33
- }
34
- """
35
-
36
- _TRAIN_URL = "https://raw.githubusercontent.com/amirveyseh/AAAI-21-SDU-shared-task-1-AI/master/dataset/train.json"
37
- _VALID_URL = "https://raw.githubusercontent.com/amirveyseh/AAAI-21-SDU-shared-task-1-AI/master/dataset/dev.json"
38
- _TEST_URL = "https://raw.githubusercontent.com/amirveyseh/AAAI-21-SDU-shared-task-1-AI/master/dataset/test.json"
39
-
40
-
41
- class AcronymIdentification(datasets.GeneratorBasedBuilder):
42
- VERSION = datasets.Version("1.0.0")
43
-
44
- def _info(self):
45
- return datasets.DatasetInfo(
46
- description=_DESCRIPTION,
47
- features=datasets.Features(
48
- {
49
- "id": datasets.Value("string"),
50
- "tokens": datasets.Sequence(datasets.Value("string")),
51
- "labels": datasets.Sequence(
52
- datasets.ClassLabel(names=["B-long", "B-short", "I-long", "I-short", "O"])
53
- ),
54
- },
55
- ),
56
- supervised_keys=None,
57
- homepage=_HOMEPAGE_URL,
58
- citation=_CITATION,
59
- )
60
-
61
- def _split_generators(self, dl_manager):
62
- train_path = dl_manager.download_and_extract(_TRAIN_URL)
63
- valid_path = dl_manager.download_and_extract(_VALID_URL)
64
- test_path = dl_manager.download_and_extract(_TEST_URL)
65
- return [
66
- datasets.SplitGenerator(
67
- name=datasets.Split.TRAIN,
68
- gen_kwargs={"datapath": train_path, "datatype": "train"},
69
- ),
70
- datasets.SplitGenerator(
71
- name=datasets.Split.VALIDATION,
72
- gen_kwargs={"datapath": valid_path, "datatype": "valid"},
73
- ),
74
- datasets.SplitGenerator(
75
- name=datasets.Split.TEST,
76
- gen_kwargs={"datapath": test_path, "datatype": "test"},
77
- ),
78
- ]
79
-
80
- def _generate_examples(self, datapath, datatype):
81
- with open(datapath, encoding="utf-8") as f:
82
- data = json.load(f)
83
-
84
- for sentence_counter, d in enumerate(data):
85
- resp = {
86
- "id": d["id"],
87
- "tokens": d["tokens"],
88
- }
89
- if datatype != "test":
90
- resp["labels"] = d["labels"]
91
- else:
92
- resp["labels"] = ["O"] * len(d["tokens"])
93
- yield sentence_counter, resp