system HF staff commited on
Commit
526e6ec
1 Parent(s): 8c6d57c

Update files from the datasets library (from 1.1.3)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.1.3

Files changed (2) hide show
  1. conll2000.py +92 -15
  2. dataset_infos.json +1 -1
conll2000.py CHANGED
@@ -77,9 +77,86 @@ class Conll2000(datasets.GeneratorBasedBuilder):
77
  features=datasets.Features(
78
  {
79
  "id": datasets.Value("string"),
80
- "words": datasets.Sequence(datasets.Value("string")),
81
- "pos": datasets.Sequence(datasets.Value("string")),
82
- "chunk": datasets.Sequence(datasets.Value("string")),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  }
84
  ),
85
  supervised_keys=None,
@@ -104,22 +181,22 @@ class Conll2000(datasets.GeneratorBasedBuilder):
104
  logging.info("⏳ Generating examples from = %s", filepath)
105
  with open(filepath, encoding="utf-8") as f:
106
  guid = 0
107
- words = []
108
- pos = []
109
- chunk = []
110
  for line in f:
111
  if line == "" or line == "\n":
112
- if words:
113
- yield guid, {"id": str(guid), "words": words, "pos": pos, "chunk": chunk}
114
  guid += 1
115
- words = []
116
- pos = []
117
- chunk = []
118
  else:
119
  # conll2000 tokens are space separated
120
  splits = line.split(" ")
121
- words.append(splits[0])
122
- pos.append(splits[1])
123
- chunk.append(splits[2].rstrip())
124
  # last example
125
- yield guid, {"id": str(guid), "words": words, "pos": pos, "chunk": chunk}
 
77
  features=datasets.Features(
78
  {
79
  "id": datasets.Value("string"),
80
+ "tokens": datasets.Sequence(datasets.Value("string")),
81
+ "pos_tags": datasets.Sequence(
82
+ datasets.features.ClassLabel(
83
+ names=[
84
+ "''",
85
+ "#",
86
+ "$",
87
+ "(",
88
+ ")",
89
+ ",",
90
+ ".",
91
+ ":",
92
+ "``",
93
+ "CC",
94
+ "CD",
95
+ "DT",
96
+ "EX",
97
+ "FW",
98
+ "IN",
99
+ "JJ",
100
+ "JJR",
101
+ "JJS",
102
+ "MD",
103
+ "NN",
104
+ "NNP",
105
+ "NNPS",
106
+ "NNS",
107
+ "PDT",
108
+ "POS",
109
+ "PRP",
110
+ "PRP$",
111
+ "RB",
112
+ "RBR",
113
+ "RBS",
114
+ "RP",
115
+ "SYM",
116
+ "TO",
117
+ "UH",
118
+ "VB",
119
+ "VBD",
120
+ "VBG",
121
+ "VBN",
122
+ "VBP",
123
+ "VBZ",
124
+ "WDT",
125
+ "WP",
126
+ "WP$",
127
+ "WRB",
128
+ ]
129
+ )
130
+ ),
131
+ "chunk_tags": datasets.Sequence(
132
+ datasets.features.ClassLabel(
133
+ names=[
134
+ "O",
135
+ "B-ADJP",
136
+ "I-ADJP",
137
+ "B-ADVP",
138
+ "I-ADVP",
139
+ "B-CONJP",
140
+ "I-CONJP",
141
+ "B-INTJ",
142
+ "I-INTJ",
143
+ "B-LST",
144
+ "I-LST",
145
+ "B-NP",
146
+ "I-NP",
147
+ "B-PP",
148
+ "I-PP",
149
+ "B-PRT",
150
+ "I-PRT",
151
+ "B-SBAR",
152
+ "I-SBAR",
153
+ "B-UCP",
154
+ "I-UCP",
155
+ "B-VP",
156
+ "I-VP",
157
+ ]
158
+ )
159
+ ),
160
  }
161
  ),
162
  supervised_keys=None,
 
181
  logging.info("⏳ Generating examples from = %s", filepath)
182
  with open(filepath, encoding="utf-8") as f:
183
  guid = 0
184
+ tokens = []
185
+ pos_tags = []
186
+ chunk_tags = []
187
  for line in f:
188
  if line == "" or line == "\n":
189
+ if tokens:
190
+ yield guid, {"id": str(guid), "tokens": tokens, "pos_tags": pos_tags, "chunk_tags": chunk_tags}
191
  guid += 1
192
+ tokens = []
193
+ pos_tags = []
194
+ chunk_tags = []
195
  else:
196
  # conll2000 tokens are space separated
197
  splits = line.split(" ")
198
+ tokens.append(splits[0])
199
+ pos_tags.append(splits[1])
200
+ chunk_tags.append(splits[2].rstrip())
201
  # last example
202
+ yield guid, {"id": str(guid), "tokens": tokens, "pos_tags": pos_tags, "chunk_tags": chunk_tags}
dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"conll2000": {"description": " Text chunking consists of dividing a text in syntactically correlated parts of words. For example, the sentence\n He reckons the current account deficit will narrow to only # 1.8 billion in September . can be divided as follows:\n[NP He ] [VP reckons ] [NP the current account deficit ] [VP will narrow ] [PP to ] [NP only # 1.8 billion ]\n[PP in ] [NP September ] .\n\nText chunking is an intermediate step towards full parsing. It was the shared task for CoNLL-2000. Training and test\ndata for this task is available. This data consists of the same partitions of the Wall Street Journal corpus (WSJ)\nas the widely used data for noun phrase chunking: sections 15-18 as training data (211727 tokens) and section 20 as\ntest data (47377 tokens). The annotation of the data has been derived from the WSJ corpus by a program written by\nSabine Buchholz from Tilburg University, The Netherlands.\n", "citation": "@inproceedings{tksbuchholz2000conll,\n author = \"Tjong Kim Sang, Erik F. and Sabine Buchholz\",\n title = \"Introduction to the CoNLL-2000 Shared Task: Chunking\",\n editor = \"Claire Cardie and Walter Daelemans and Claire\n Nedellec and Tjong Kim Sang, Erik\",\n booktitle = \"Proceedings of CoNLL-2000 and LLL-2000\",\n publisher = \"Lisbon, Portugal\",\n pages = \"127--132\",\n year = \"2000\"\n}\n", "homepage": "https://www.clips.uantwerpen.be/conll2000/chunking/", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "words": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "chunk": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "conll2000", "config_name": "conll2000", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4916429, "num_examples": 8937, "dataset_name": "conll2000"}, "test": {"name": "test", "num_bytes": 1102955, "num_examples": 2013, "dataset_name": "conll2000"}}, "download_checksums": {"https://github.com/teropa/nlp/raw/master/resources/corpora/conll2000/train.txt": {"num_bytes": 2842164, "checksum": "82033cd7a72b209923a98007793e8f9de3abc1c8b79d646c50648eb949b87cea"}, "https://github.com/teropa/nlp/raw/master/resources/corpora/conll2000/test.txt": {"num_bytes": 639396, "checksum": "73b7b1e565fa75a1e22fe52ecdf41b6624d6f59dacb591d44252bf4d692b1628"}}, "download_size": 3481560, "post_processing_size": null, "dataset_size": 6019384, "size_in_bytes": 9500944}}
 
1
+ {"conll2000": {"description": " Text chunking consists of dividing a text in syntactically correlated parts of words. For example, the sentence\n He reckons the current account deficit will narrow to only # 1.8 billion in September . can be divided as follows:\n[NP He ] [VP reckons ] [NP the current account deficit ] [VP will narrow ] [PP to ] [NP only # 1.8 billion ]\n[PP in ] [NP September ] .\n\nText chunking is an intermediate step towards full parsing. It was the shared task for CoNLL-2000. Training and test\ndata for this task is available. This data consists of the same partitions of the Wall Street Journal corpus (WSJ)\nas the widely used data for noun phrase chunking: sections 15-18 as training data (211727 tokens) and section 20 as\ntest data (47377 tokens). The annotation of the data has been derived from the WSJ corpus by a program written by\nSabine Buchholz from Tilburg University, The Netherlands.\n", "citation": "@inproceedings{tksbuchholz2000conll,\n author = \"Tjong Kim Sang, Erik F. and Sabine Buchholz\",\n title = \"Introduction to the CoNLL-2000 Shared Task: Chunking\",\n editor = \"Claire Cardie and Walter Daelemans and Claire\n Nedellec and Tjong Kim Sang, Erik\",\n booktitle = \"Proceedings of CoNLL-2000 and LLL-2000\",\n publisher = \"Lisbon, Portugal\",\n pages = \"127--132\",\n year = \"2000\"\n}\n", "homepage": "https://www.clips.uantwerpen.be/conll2000/chunking/", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 44, "names": ["''", "#", "$", "(", ")", ",", ".", ":", "``", "CC", "CD", "DT", "EX", "FW", "IN", "JJ", "JJR", "JJS", "MD", "NN", "NNP", "NNPS", "NNS", "PDT", "POS", "PRP", "PRP$", "RB", "RBR", "RBS", "RP", "SYM", "TO", "UH", "VB", "VBD", "VBG", "VBN", "VBP", "VBZ", "WDT", "WP", "WP$", "WRB"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "chunk_tags": {"feature": {"num_classes": 23, "names": ["O", "B-ADJP", "I-ADJP", "B-ADVP", "I-ADVP", "B-CONJP", "I-CONJP", "B-INTJ", "I-INTJ", "B-LST", "I-LST", "B-NP", "I-NP", "B-PP", "I-PP", "B-PRT", "I-PRT", "B-SBAR", "I-SBAR", "B-UCP", "I-UCP", "B-VP", "I-VP"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "conll2000", "config_name": "conll2000", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 5356965, "num_examples": 8937, "dataset_name": "conll2000"}, "test": {"name": "test", "num_bytes": 1201151, "num_examples": 2013, "dataset_name": "conll2000"}}, "download_checksums": {"https://github.com/teropa/nlp/raw/master/resources/corpora/conll2000/train.txt": {"num_bytes": 2842164, "checksum": "82033cd7a72b209923a98007793e8f9de3abc1c8b79d646c50648eb949b87cea"}, "https://github.com/teropa/nlp/raw/master/resources/corpora/conll2000/test.txt": {"num_bytes": 639396, "checksum": "73b7b1e565fa75a1e22fe52ecdf41b6624d6f59dacb591d44252bf4d692b1628"}}, "download_size": 3481560, "post_processing_size": null, "dataset_size": 6558116, "size_in_bytes": 10039676}}