Datasets:

Modalities:
Text
ArXiv:
Libraries:
Datasets
License:
mvarma commited on
Commit
fbe6a44
1 Parent(s): f5dc7f3

Create medwiki.py

Browse files
Files changed (1) hide show
  1. medwiki.py +123 -0
medwiki.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """MedWiki is a large-scale sentence dataset collected from Wikipedia with medical entity (UMLS) annotations. This dataset is intended for pretraining"""
16
+
17
+ import csv
18
+ import json
19
+ import os
20
+
21
+ import datasets
22
+
23
+ _CITATION = """\
24
+ @inproceedings{medwiki,
25
+ title={Cross-Domain Data Integration for Named Entity Disambiguation in Biomedical Text},
26
+ author={Maya Varma and Laurel Orr and Sen Wu and Megan Leszczynski and Xiao Ling and Christopher Ré},
27
+ year={2021},
28
+ booktitle={Findings of the Association for Computational Linguistics: EMNLP 2021}
29
+ }
30
+ """
31
+
32
+ _DESCRIPTION = """\
33
+ MedWiki is a large-scale sentence dataset collected from Wikipedia with medical entity (UMLS) annotations. This dataset is intended for pretraining.
34
+ """
35
+
36
+ _HOMEPAGE = ""
37
+
38
+ _LICENSE = ""
39
+
40
+ _URLs = ["https://huggingface.co/datasets/mvarma/medwiki/blob/main/medwiki_full.zip", \
41
+ "https://huggingface.co/datasets/mvarma/medwiki/blob/main/medwiki_hq.zip"]
42
+
43
+
44
+ class MedWiki(datasets.GeneratorBasedBuilder):
45
+ """MedWiki: A Large-Scale Sentence Dataset with Medical Entity (UMLS) Annotations"""
46
+
47
+ VERSION = datasets.Version("1.1.0")
48
+ BUILDER_CONFIGS = [
49
+ datasets.BuilderConfig(
50
+ name="medwiki",
51
+ version=VERSION,
52
+ description="MedWiki: A Large-Scale Sentence Dataset with Medical Entity (UMLS) Annotations"),
53
+ ]
54
+ BUILDER_CONFIGS = [
55
+ datasets.BuilderConfig(name="medwiki_full", version=VERSION, description="MedWiki (Full): A Large-Scale Sentence Dataset with Medical Entity (UMLS) Annotations."),
56
+ datasets.BuilderConfig(name="medwiki_hq", version=VERSION, description="MedWiki (HQ): A Large-Scale Sentence Dataset with Medical Entity (UMLS) Annotations. The HQ (high quality) subset of MedWiki includes a portion of the dataset with higher-quality entity annotations."),
57
+ ]
58
+
59
+
60
+ def _info(self):
61
+ features = datasets.Features(
62
+ {
63
+ "mentions": datasets.Sequence(datasets.Value("string")),
64
+ "entities": datasets.Sequence(datasets.Value("string")),
65
+ "entity_titles": datasets.Sequence(datasets.Value("string")),
66
+ "types": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
67
+ "spans": datasets.Sequence(datasets.Sequence(datasets.Value("int32"))),
68
+ "sentence": datasets.Value("string"),
69
+ "sent_idx_unq": datasets.Value("int32"),
70
+ }
71
+ )
72
+ return datasets.DatasetInfo(
73
+ description=_DESCRIPTION,
74
+ features=features,
75
+ supervised_keys=None,
76
+ homepage=_HOMEPAGE,
77
+ license=_LICENSE,
78
+ citation=_CITATION,
79
+ )
80
+
81
+ def _split_generators(self, dl_manager):
82
+ """Returns SplitGenerators."""
83
+ my_urls = _URLs[self.config.name]
84
+ data_dir = dl_manager.download_and_extract(my_urls)
85
+ return [
86
+ datasets.SplitGenerator(
87
+ name=datasets.Split.TRAIN,
88
+ gen_kwargs={
89
+ "filepath": os.path.join(data_dir, "train.jsonl"),
90
+ "split": "train",
91
+ },
92
+ ),
93
+ datasets.SplitGenerator(
94
+ name=datasets.Split.TEST,
95
+ gen_kwargs={
96
+ "filepath": os.path.join(data_dir, "test.jsonl"),
97
+ "split": "test"
98
+ },
99
+ ),
100
+ datasets.SplitGenerator(
101
+ name=datasets.Split.VALIDATION,
102
+ gen_kwargs={
103
+ "filepath": os.path.join(data_dir, "dev.jsonl"),
104
+ "split": "dev",
105
+ },
106
+ ),
107
+ ]
108
+
109
+ def _generate_examples(self, filepath, split ):
110
+ """ Yields examples as (key, example) tuples. """
111
+
112
+ with open(filepath, encoding="utf-8") as f:
113
+ for id_, row in enumerate(f):
114
+ data = json.loads(row)
115
+ yield id_, {
116
+ "mentions": data["mentions"],
117
+ "entities": data["entities"],
118
+ "entity_titles": data['entity_titles'],
119
+ "types": data["types"],
120
+ "spans": data["spans"],
121
+ "sentence": data["sentence"],
122
+ "sent_idx_unq": data["sent_idx_unq"],
123
+ }