system HF staff commited on
Commit
c4217ba
1 Parent(s): d629bf3

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

Files changed (2) hide show
  1. README.md +1 -0
  2. turku_ner_corpus.py +39 -38
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  annotations_creators:
3
  - expert-generated
4
  language_creators:
1
  ---
2
+ pretty_name: Turku NER corpus
3
  annotations_creators:
4
  - expert-generated
5
  language_creators:
turku_ner_corpus.py CHANGED
@@ -14,7 +14,6 @@
14
  # limitations under the License.
15
 
16
  # Lint as: python3
17
- import os
18
 
19
  import datasets
20
 
@@ -73,47 +72,63 @@ class TurkuNERCorpus(datasets.GeneratorBasedBuilder):
73
  )
74
 
75
  def _split_generators(self, dl_manager):
76
- path = dl_manager.download_and_extract(_URL)
77
  return [
78
  datasets.SplitGenerator(
79
  name=datasets.Split.TRAIN,
80
- gen_kwargs={"data_path": path, "data_type": "train"},
81
  ),
82
  datasets.SplitGenerator(
83
  name=datasets.Split.VALIDATION,
84
- gen_kwargs={"data_path": path, "data_type": "valid"},
85
  ),
86
  datasets.SplitGenerator(
87
  name=datasets.Split.TEST,
88
- gen_kwargs={"data_path": path, "data_type": "test"},
89
  ),
90
  ]
91
 
92
- def _generate_examples(self, data_path, data_type):
93
  if data_type == "train":
94
- data_path = os.path.join(data_path, "turku-ner-corpus-1.0/data/conll/train.tsv")
95
  elif data_type == "valid":
96
- data_path = os.path.join(data_path, "turku-ner-corpus-1.0/data/conll/dev.tsv")
97
  elif data_type == "test":
98
- data_path = os.path.join(data_path, "turku-ner-corpus-1.0/data/conll/test.tsv")
99
  else:
100
  raise Exception("data_type not understood")
101
 
102
  sentence_counter = 0
103
- with open(data_path, encoding="utf-8") as f:
104
- current_words = []
105
- current_labels = []
106
- for row in f:
107
- row = row.rstrip()
108
- row_split = row.split("\t")
109
- if len(row_split) == 2:
110
- token, label = row_split
111
- current_words.append(token)
112
- current_labels.append(label)
113
- else:
114
- if not current_words:
115
- continue
116
- assert len(current_words) == len(current_labels), "word len doesnt match label length"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
  sentence = (
118
  sentence_counter,
119
  {
@@ -122,19 +137,5 @@ class TurkuNERCorpus(datasets.GeneratorBasedBuilder):
122
  "ner_tags": current_labels,
123
  },
124
  )
125
- sentence_counter += 1
126
- current_words = []
127
- current_labels = []
128
  yield sentence
129
-
130
- # if something remains:
131
- if current_words:
132
- sentence = (
133
- sentence_counter,
134
- {
135
- "id": str(sentence_counter),
136
- "tokens": current_words,
137
- "ner_tags": current_labels,
138
- },
139
- )
140
- yield sentence
14
  # limitations under the License.
15
 
16
  # Lint as: python3
 
17
 
18
  import datasets
19
 
72
  )
73
 
74
  def _split_generators(self, dl_manager):
75
+ archive = dl_manager.download(_URL)
76
  return [
77
  datasets.SplitGenerator(
78
  name=datasets.Split.TRAIN,
79
+ gen_kwargs={"files": dl_manager.iter_archive(archive), "data_type": "train"},
80
  ),
81
  datasets.SplitGenerator(
82
  name=datasets.Split.VALIDATION,
83
+ gen_kwargs={"files": dl_manager.iter_archive(archive), "data_type": "valid"},
84
  ),
85
  datasets.SplitGenerator(
86
  name=datasets.Split.TEST,
87
+ gen_kwargs={"files": dl_manager.iter_archive(archive), "data_type": "test"},
88
  ),
89
  ]
90
 
91
+ def _generate_examples(self, files, data_type):
92
  if data_type == "train":
93
+ data_path = "turku-ner-corpus-1.0/data/conll/train.tsv"
94
  elif data_type == "valid":
95
+ data_path = "turku-ner-corpus-1.0/data/conll/dev.tsv"
96
  elif data_type == "test":
97
+ data_path = "turku-ner-corpus-1.0/data/conll/test.tsv"
98
  else:
99
  raise Exception("data_type not understood")
100
 
101
  sentence_counter = 0
102
+ for path, f in files:
103
+ if path == data_path:
104
+ current_words = []
105
+ current_labels = []
106
+ for row in f:
107
+ row = row.decode("utf-8").rstrip()
108
+ row_split = row.split("\t")
109
+ if len(row_split) == 2:
110
+ token, label = row_split
111
+ current_words.append(token)
112
+ current_labels.append(label)
113
+ else:
114
+ if not current_words:
115
+ continue
116
+ assert len(current_words) == len(current_labels), "word len doesnt match label length"
117
+ sentence = (
118
+ sentence_counter,
119
+ {
120
+ "id": str(sentence_counter),
121
+ "tokens": current_words,
122
+ "ner_tags": current_labels,
123
+ },
124
+ )
125
+ sentence_counter += 1
126
+ current_words = []
127
+ current_labels = []
128
+ yield sentence
129
+
130
+ # if something remains:
131
+ if current_words:
132
  sentence = (
133
  sentence_counter,
134
  {
137
  "ner_tags": current_labels,
138
  },
139
  )
 
 
 
140
  yield sentence
141
+ break