albertvillanova HF staff commited on
Commit
ba956bc
1 Parent(s): 018cc32

Convert dataset to Parquet (#4)

Browse files

- Convert dataset to Parquet (660cb7ffe80f230d9c26cbe6fdb2cea0337bb5e2)
- Delete loading script (41d9710294cfd6f09fdeb22b534f9cb319b710dc)

README.md CHANGED
@@ -21,6 +21,7 @@ pretty_name: eHealth-KD
21
  tags:
22
  - relation-prediction
23
  dataset_info:
 
24
  features:
25
  - name: sentence
26
  dtype: string
@@ -67,19 +68,28 @@ dataset_info:
67
  dtype: string
68
  - name: arg2
69
  dtype: string
70
- config_name: ehealth_kd
71
  splits:
72
  - name: train
73
- num_bytes: 425713
74
  num_examples: 800
75
  - name: validation
76
- num_bytes: 108154
77
  num_examples: 199
78
  - name: test
79
- num_bytes: 47314
80
  num_examples: 100
81
- download_size: 565900
82
- dataset_size: 581181
 
 
 
 
 
 
 
 
 
 
83
  ---
84
 
85
  # Dataset Card for eHealth-KD
 
21
  tags:
22
  - relation-prediction
23
  dataset_info:
24
+ config_name: ehealth_kd
25
  features:
26
  - name: sentence
27
  dtype: string
 
68
  dtype: string
69
  - name: arg2
70
  dtype: string
 
71
  splits:
72
  - name: train
73
+ num_bytes: 425681
74
  num_examples: 800
75
  - name: validation
76
+ num_bytes: 108122
77
  num_examples: 199
78
  - name: test
79
+ num_bytes: 47282
80
  num_examples: 100
81
+ download_size: 349989
82
+ dataset_size: 581085
83
+ configs:
84
+ - config_name: ehealth_kd
85
+ data_files:
86
+ - split: train
87
+ path: ehealth_kd/train-*
88
+ - split: validation
89
+ path: ehealth_kd/validation-*
90
+ - split: test
91
+ path: ehealth_kd/test-*
92
+ default: true
93
  ---
94
 
95
  # Dataset Card for eHealth-KD
ehealth_kd.py DELETED
@@ -1,185 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """The eHealth-KD 2020 Corpus."""
16
-
17
-
18
- import datasets
19
-
20
-
21
- _CITATION = """\
22
- @inproceedings{overview_ehealthkd2020,
23
- author = {Piad{-}Morffis, Alejandro and
24
- Guti{\'{e}}rrez, Yoan and
25
- Cañizares-Diaz, Hian and
26
- Estevez{-}Velarde, Suilan and
27
- Almeida{-}Cruz, Yudivi{\'{a}}n and
28
- Muñoz, Rafael and
29
- Montoyo, Andr{\'{e}}s},
30
- title = {Overview of the eHealth Knowledge Discovery Challenge at IberLEF 2020},
31
- booktitle = ,
32
- year = {2020},
33
- }
34
- """
35
-
36
- _DESCRIPTION = """\
37
- Dataset of the eHealth Knowledge Discovery Challenge at IberLEF 2020. It is designed for
38
- the identification of semantic entities and relations in Spanish health documents.
39
- """
40
-
41
- _HOMEPAGE = "https://knowledge-learning.github.io/ehealthkd-2020/"
42
-
43
- _LICENSE = "https://creativecommons.org/licenses/by-nc-sa/4.0/"
44
-
45
- _URL = "https://raw.githubusercontent.com/knowledge-learning/ehealthkd-2020/master/data/"
46
- _TRAIN_DIR = "training/"
47
- _DEV_DIR = "development/main/"
48
- _TEST_DIR = "testing/scenario3-taskB/"
49
- _TEXT_FILE = "scenario.txt"
50
- _ANNOTATIONS_FILE = "scenario.ann"
51
-
52
-
53
- class EhealthKD(datasets.GeneratorBasedBuilder):
54
- """The eHealth-KD 2020 Corpus."""
55
-
56
- VERSION = datasets.Version("1.1.0")
57
-
58
- BUILDER_CONFIGS = [
59
- datasets.BuilderConfig(name="ehealth_kd", version=VERSION, description="eHealth-KD Corpus"),
60
- ]
61
-
62
- def _info(self):
63
- return datasets.DatasetInfo(
64
- description=_DESCRIPTION,
65
- features=datasets.Features(
66
- {
67
- "sentence": datasets.Value("string"),
68
- "entities": [
69
- {
70
- "ent_id": datasets.Value("string"),
71
- "ent_text": datasets.Value("string"),
72
- "ent_label": datasets.ClassLabel(names=["Concept", "Action", "Predicate", "Reference"]),
73
- "start_character": datasets.Value("int32"),
74
- "end_character": datasets.Value("int32"),
75
- }
76
- ],
77
- "relations": [
78
- {
79
- "rel_id": datasets.Value("string"),
80
- "rel_label": datasets.ClassLabel(
81
- names=[
82
- "is-a",
83
- "same-as",
84
- "has-property",
85
- "part-of",
86
- "causes",
87
- "entails",
88
- "in-time",
89
- "in-place",
90
- "in-context",
91
- "subject",
92
- "target",
93
- "domain",
94
- "arg",
95
- ]
96
- ),
97
- "arg1": datasets.Value("string"),
98
- "arg2": datasets.Value("string"),
99
- }
100
- ],
101
- }
102
- ),
103
- supervised_keys=None,
104
- homepage=_HOMEPAGE,
105
- license=_LICENSE,
106
- citation=_CITATION,
107
- )
108
-
109
- def _split_generators(self, dl_manager):
110
- """Returns SplitGenerators."""
111
- urls_to_download = {
112
- k: [f"{_URL}{v}{_TEXT_FILE}", f"{_URL}{v}{_ANNOTATIONS_FILE}"]
113
- for k, v in zip(["train", "dev", "test"], [_TRAIN_DIR, _DEV_DIR, _TEST_DIR])
114
- }
115
-
116
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
117
-
118
- return [
119
- datasets.SplitGenerator(
120
- name=datasets.Split.TRAIN,
121
- gen_kwargs={"txt_path": downloaded_files["train"][0], "ann_path": downloaded_files["train"][1]},
122
- ),
123
- datasets.SplitGenerator(
124
- name=datasets.Split.VALIDATION,
125
- gen_kwargs={"txt_path": downloaded_files["dev"][0], "ann_path": downloaded_files["dev"][1]},
126
- ),
127
- datasets.SplitGenerator(
128
- name=datasets.Split.TEST,
129
- gen_kwargs={"txt_path": downloaded_files["test"][0], "ann_path": downloaded_files["test"][1]},
130
- ),
131
- ]
132
-
133
- def _generate_examples(self, txt_path, ann_path):
134
- """Yields examples."""
135
- with open(txt_path, encoding="utf-8") as txt_file, open(ann_path, encoding="utf-8") as ann_file:
136
- _id = 0
137
- entities = []
138
- relations = []
139
-
140
- annotations = ann_file.readlines()
141
- last = annotations[-1]
142
-
143
- # Create a variable to keep track of the last annotation (entity or relation) to know when a sentence is fully annotated
144
- # In the annotations file, the entities are before the relations
145
- last_annotation = ""
146
-
147
- for annotation in annotations:
148
- if annotation == last:
149
- sentence = txt_file.readline().strip()
150
- yield _id, {"sentence": sentence, "entities": entities, "relations": relations}
151
-
152
- if annotation.startswith("T"):
153
- if last_annotation == "relation":
154
- sentence = txt_file.readline().strip()
155
- yield _id, {"sentence": sentence, "entities": entities, "relations": relations}
156
- _id += 1
157
- entities = []
158
- relations = []
159
-
160
- ent_id, mid, ent_text = annotation.strip().split("\t")
161
- ent_label, spans = mid.split(" ", 1)
162
- start_character = spans.split(" ")[0]
163
- end_character = spans.split(" ")[-1]
164
-
165
- entities.append(
166
- {
167
- "ent_id": ent_id,
168
- "ent_text": ent_text,
169
- "ent_label": ent_label,
170
- "start_character": start_character,
171
- "end_character": end_character,
172
- }
173
- )
174
-
175
- last_annotation = "entity"
176
-
177
- else:
178
- rel_id, rel_label, arg1, arg2 = annotation.strip().split()
179
- if annotation.startswith("R"):
180
- arg1 = arg1.split(":")[1]
181
- arg2 = arg2.split(":")[1]
182
-
183
- relations.append({"rel_id": rel_id, "rel_label": rel_label, "arg1": arg1, "arg2": arg2})
184
-
185
- last_annotation = "relation"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ehealth_kd/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ceec65dacc5e13f07c08ec5ecbe8a349a9a59cfadb0c275af003f7dc9c6da06
3
+ size 35298
ehealth_kd/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a67e4e45fbb06e05d986580f1276459397732674d9ed212cd943bc337860c1f2
3
+ size 245584
ehealth_kd/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:743183f1be4c074dab31050516c8ac070cb5d112abcc2a9ddccb678e0b722d61
3
+ size 69107