albertvillanova HF staff commited on
Commit
637834e
1 Parent(s): 75b5654

Convert dataset to Parquet (#3)

Browse files

- Convert dataset to Parquet (99d06ea2779a1f7f260133f38fa9cfee329f703e)
- Add 'lt' config data files (f885922d7574102b5b0b211182515f0a8f63671f)
- Add '1177' config data files (20192f0e24c3842ce43ae100244f9d88b5c7a212)
- Delete loading script (711bc70b277cd4fc9364abe5c8bc86656733e50f)

1177/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af8d4cb45b64896e0fd4724e0a4b9766b879a577ce9b2d2a337f6d84e40b181b
3
+ size 77472
README.md CHANGED
@@ -22,7 +22,7 @@ pretty_name: SwedMedNER
22
  language_bcp47:
23
  - sv-SE
24
  dataset_info:
25
- - config_name: wiki
26
  features:
27
  - name: sid
28
  dtype: string
@@ -45,10 +45,10 @@ dataset_info:
45
  '2': Body Structure
46
  splits:
47
  - name: train
48
- num_bytes: 7044714
49
- num_examples: 48720
50
- download_size: 52272712
51
- dataset_size: 7044714
52
  - config_name: lt
53
  features:
54
  - name: sid
@@ -72,11 +72,11 @@ dataset_info:
72
  '2': Body Structure
73
  splits:
74
  - name: train
75
- num_bytes: 97955287
76
  num_examples: 745753
77
- download_size: 52272712
78
- dataset_size: 97955287
79
- - config_name: '1177'
80
  features:
81
  - name: sid
82
  dtype: string
@@ -99,10 +99,23 @@ dataset_info:
99
  '2': Body Structure
100
  splits:
101
  - name: train
102
- num_bytes: 159007
103
- num_examples: 927
104
- download_size: 52272712
105
- dataset_size: 159007
 
 
 
 
 
 
 
 
 
 
 
 
 
106
  ---
107
 
108
  # Dataset Card for swedish_medical_ner
 
22
  language_bcp47:
23
  - sv-SE
24
  dataset_info:
25
+ - config_name: '1177'
26
  features:
27
  - name: sid
28
  dtype: string
 
45
  '2': Body Structure
46
  splits:
47
  - name: train
48
+ num_bytes: 158979
49
+ num_examples: 927
50
+ download_size: 77472
51
+ dataset_size: 158979
52
  - config_name: lt
53
  features:
54
  - name: sid
 
72
  '2': Body Structure
73
  splits:
74
  - name: train
75
+ num_bytes: 97953187
76
  num_examples: 745753
77
+ download_size: 52246351
78
+ dataset_size: 97953187
79
+ - config_name: wiki
80
  features:
81
  - name: sid
82
  dtype: string
 
99
  '2': Body Structure
100
  splits:
101
  - name: train
102
+ num_bytes: 7044574
103
+ num_examples: 48720
104
+ download_size: 2571416
105
+ dataset_size: 7044574
106
+ configs:
107
+ - config_name: '1177'
108
+ data_files:
109
+ - split: train
110
+ path: 1177/train-*
111
+ - config_name: lt
112
+ data_files:
113
+ - split: train
114
+ path: lt/train-*
115
+ - config_name: wiki
116
+ data_files:
117
+ - split: train
118
+ path: wiki/train-*
119
  ---
120
 
121
  # Dataset Card for swedish_medical_ner
lt/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:390ccd01ff87fdf2011cbaffe7023deadc61f8e97daae99c7241cf77ee21779b
3
+ size 52246351
swedish_medical_ner.py DELETED
@@ -1,202 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """SwedMedNER: A Named Entity Recognition Dataset on medical texts in Swedish"""
16
-
17
-
18
- import re
19
-
20
- import datasets
21
-
22
-
23
- _CITATION = """\
24
- @inproceedings{almgrenpavlovmogren2016bioner,
25
- title={Named Entity Recognition in Swedish Medical Journals with Deep Bidirectional Character-Based LSTMs},
26
- author={Simon Almgren, Sean Pavlov, Olof Mogren},
27
- booktitle={Proceedings of the Fifth Workshop on Building and Evaluating Resources for Biomedical Text Mining (BioTxtM 2016)},
28
- pages={1},
29
- year={2016}
30
- }
31
- """
32
-
33
-
34
- _DESCRIPTION = """\
35
- SwedMedNER is a dataset for training and evaluating Named Entity Recognition systems on medical texts in Swedish.
36
- It is derived from medical articles on the Swedish Wikipedia, Läkartidningen, and 1177 Vårdguiden.
37
- """
38
-
39
-
40
- _LICENSE = """\
41
- Creative Commons Attribution-ShareAlike 4.0 International Public License (CC BY-SA 4.0)
42
- See http://creativecommons.org/licenses/by-sa/4.0/ for the summary of the license.
43
- """
44
-
45
-
46
- _URL = "https://github.com/olofmogren/biomedical-ner-data-swedish"
47
-
48
-
49
- _DATA_URL = "https://raw.githubusercontent.com/olofmogren/biomedical-ner-data-swedish/master/"
50
-
51
-
52
- class SwedishMedicalNerConfig(datasets.BuilderConfig):
53
- """BuilderConfig for SwedMedNER"""
54
-
55
- def __init__(self, **kwargs):
56
- """
57
- Args:
58
- **kwargs: keyword arguments forwarded to super.
59
- """
60
- super(SwedishMedicalNerConfig, self).__init__(**kwargs)
61
-
62
-
63
- class SwedishMedicalNer(datasets.GeneratorBasedBuilder):
64
- """SwedMedNER: A Named Entity Recognition Dataset on medical texts in Swedish"""
65
-
66
- VERSION = datasets.Version("1.0.0")
67
-
68
- BUILDER_CONFIGS = [
69
- datasets.BuilderConfig(name="wiki", version=VERSION, description="The Swedish Wikipedia part of the dataset"),
70
- datasets.BuilderConfig(name="lt", version=VERSION, description="The Läkartidningen part of the dataset"),
71
- datasets.BuilderConfig(name="1177", version=VERSION, description="The 1177 Vårdguiden part of the dataset"),
72
- ]
73
-
74
- def _info(self):
75
- if self.config.name == "wiki":
76
- features = datasets.Features(
77
- {
78
- "sid": datasets.Value("string"),
79
- "sentence": datasets.Value("string"),
80
- "entities": datasets.Sequence(
81
- {
82
- "start": datasets.Value("int32"),
83
- "end": datasets.Value("int32"),
84
- "text": datasets.Value("string"),
85
- "type": datasets.ClassLabel(
86
- names=["Disorder and Finding", "Pharmaceutical Drug", "Body Structure"]
87
- ),
88
- }
89
- ),
90
- }
91
- )
92
- elif self.config.name == "lt":
93
- features = datasets.Features(
94
- {
95
- "sid": datasets.Value("string"),
96
- "sentence": datasets.Value("string"),
97
- "entities": datasets.Sequence(
98
- {
99
- "start": datasets.Value("int32"),
100
- "end": datasets.Value("int32"),
101
- "text": datasets.Value("string"),
102
- "type": datasets.ClassLabel(
103
- names=["Disorder and Finding", "Pharmaceutical Drug", "Body Structure"]
104
- ),
105
- }
106
- ),
107
- }
108
- )
109
- elif self.config.name == "1177":
110
- features = datasets.Features(
111
- {
112
- "sid": datasets.Value("string"),
113
- "sentence": datasets.Value("string"),
114
- "entities": datasets.Sequence(
115
- {
116
- "start": datasets.Value("int32"),
117
- "end": datasets.Value("int32"),
118
- "text": datasets.Value("string"),
119
- "type": datasets.ClassLabel(
120
- names=["Disorder and Finding", "Pharmaceutical Drug", "Body Structure"]
121
- ),
122
- }
123
- ),
124
- }
125
- )
126
- return datasets.DatasetInfo(
127
- description=_DESCRIPTION,
128
- features=features,
129
- supervised_keys=None,
130
- homepage=_URL,
131
- license=_LICENSE,
132
- citation=_CITATION,
133
- )
134
-
135
- def _split_generators(self, dl_manager):
136
- """Returns SplitGenerators."""
137
- urls_to_download = {
138
- "wiki": _DATA_URL + "Wiki_annotated_60.txt",
139
- "lt": _DATA_URL + "LT_annotated_60.txt",
140
- "1177": _DATA_URL + "1177_annotated_sentences.txt",
141
- }
142
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
143
-
144
- if self.config.name == "wiki":
145
- return [
146
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["wiki"]})
147
- ]
148
- elif self.config.name == "lt":
149
- return [
150
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["lt"]})
151
- ]
152
- elif self.config.name == "1177":
153
- return [
154
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["1177"]})
155
- ]
156
-
157
- def _generate_examples(self, filepath):
158
- """Yields examples as (key, example) tuples."""
159
-
160
- def find_type(s, e):
161
- if (s == "(") and (e == ")"):
162
- return "Disorder and Finding"
163
- elif (s == "[") and (e == "]"):
164
- return "Pharmaceutical Drug"
165
- elif (s == "{") and (e == "}"):
166
- return "Body Structure"
167
-
168
- pattern = r"\[([^\[\]()]+)\]|\(([^\[\]()]+)\)|\{([^\[\]()]+)\}"
169
- with open(filepath, encoding="utf-8") as f:
170
- for id_, row in enumerate(f):
171
- sentence = row.replace("\n", "")
172
-
173
- if self.config.name == "1177":
174
- targets = [
175
- {
176
- "start": m.start(0),
177
- "end": m.end(0),
178
- "text": sentence[m.start(0) + 2 : m.end(0) - 2],
179
- "type": find_type(sentence[m.start(0)], sentence[m.end(0) - 1]),
180
- }
181
- for m in re.finditer(pattern, sentence)
182
- ]
183
- yield id_, {
184
- "sid": self.config.name + "_" + str(id_),
185
- "sentence": sentence,
186
- "entities": targets if targets else [],
187
- }
188
- else:
189
- targets = [
190
- {
191
- "start": m.start(0),
192
- "end": m.end(0),
193
- "text": sentence[m.start(0) + 1 : m.end(0) - 1],
194
- "type": find_type(sentence[m.start(0)], sentence[m.end(0) - 1]),
195
- }
196
- for m in re.finditer(pattern, sentence)
197
- ]
198
- yield id_, {
199
- "sid": self.config.name + "_" + str(id_),
200
- "sentence": sentence,
201
- "entities": targets if targets else [],
202
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
wiki/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c651896521ab3226834c37aa1ecc68c06d2c237ce8502acd9122737224dedc85
3
+ size 2571416