system HF staff commited on
Commit
8c6d57c
0 Parent(s):

Update files from the datasets library (from 1.0.2)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.2

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
conll2000.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Introduction to the CoNLL-2000 Shared Task: Chunking"""
18
+
19
+ import logging
20
+
21
+ import datasets
22
+
23
+
24
+ _CITATION = """\
25
+ @inproceedings{tksbuchholz2000conll,
26
+ author = "Tjong Kim Sang, Erik F. and Sabine Buchholz",
27
+ title = "Introduction to the CoNLL-2000 Shared Task: Chunking",
28
+ editor = "Claire Cardie and Walter Daelemans and Claire
29
+ Nedellec and Tjong Kim Sang, Erik",
30
+ booktitle = "Proceedings of CoNLL-2000 and LLL-2000",
31
+ publisher = "Lisbon, Portugal",
32
+ pages = "127--132",
33
+ year = "2000"
34
+ }
35
+ """
36
+
37
+ _DESCRIPTION = """\
38
+ Text chunking consists of dividing a text in syntactically correlated parts of words. For example, the sentence
39
+ He reckons the current account deficit will narrow to only # 1.8 billion in September . can be divided as follows:
40
+ [NP He ] [VP reckons ] [NP the current account deficit ] [VP will narrow ] [PP to ] [NP only # 1.8 billion ]
41
+ [PP in ] [NP September ] .
42
+
43
+ Text chunking is an intermediate step towards full parsing. It was the shared task for CoNLL-2000. Training and test
44
+ data for this task is available. This data consists of the same partitions of the Wall Street Journal corpus (WSJ)
45
+ as the widely used data for noun phrase chunking: sections 15-18 as training data (211727 tokens) and section 20 as
46
+ test data (47377 tokens). The annotation of the data has been derived from the WSJ corpus by a program written by
47
+ Sabine Buchholz from Tilburg University, The Netherlands.
48
+ """
49
+
50
+ _URL = "https://github.com/teropa/nlp/raw/master/resources/corpora/conll2000/"
51
+ _TRAINING_FILE = "train.txt"
52
+ _TEST_FILE = "test.txt"
53
+
54
+
55
+ class Conll2000Config(datasets.BuilderConfig):
56
+ """BuilderConfig for Conll2000"""
57
+
58
+ def __init__(self, **kwargs):
59
+ """BuilderConfig forConll2000.
60
+
61
+ Args:
62
+ **kwargs: keyword arguments forwarded to super.
63
+ """
64
+ super(Conll2000Config, self).__init__(**kwargs)
65
+
66
+
67
+ class Conll2000(datasets.GeneratorBasedBuilder):
68
+ """Conll2000 dataset."""
69
+
70
+ BUILDER_CONFIGS = [
71
+ Conll2000Config(name="conll2000", version=datasets.Version("1.0.0"), description="Conll2000 dataset"),
72
+ ]
73
+
74
+ def _info(self):
75
+ return datasets.DatasetInfo(
76
+ description=_DESCRIPTION,
77
+ features=datasets.Features(
78
+ {
79
+ "id": datasets.Value("string"),
80
+ "words": datasets.Sequence(datasets.Value("string")),
81
+ "pos": datasets.Sequence(datasets.Value("string")),
82
+ "chunk": datasets.Sequence(datasets.Value("string")),
83
+ }
84
+ ),
85
+ supervised_keys=None,
86
+ homepage="https://www.clips.uantwerpen.be/conll2000/chunking/",
87
+ citation=_CITATION,
88
+ )
89
+
90
+ def _split_generators(self, dl_manager):
91
+ """Returns SplitGenerators."""
92
+ urls_to_download = {
93
+ "train": f"{_URL}{_TRAINING_FILE}",
94
+ "test": f"{_URL}{_TEST_FILE}",
95
+ }
96
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
97
+
98
+ return [
99
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
100
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
101
+ ]
102
+
103
+ def _generate_examples(self, filepath):
104
+ logging.info("⏳ Generating examples from = %s", filepath)
105
+ with open(filepath, encoding="utf-8") as f:
106
+ guid = 0
107
+ words = []
108
+ pos = []
109
+ chunk = []
110
+ for line in f:
111
+ if line == "" or line == "\n":
112
+ if words:
113
+ yield guid, {"id": str(guid), "words": words, "pos": pos, "chunk": chunk}
114
+ guid += 1
115
+ words = []
116
+ pos = []
117
+ chunk = []
118
+ else:
119
+ # conll2000 tokens are space separated
120
+ splits = line.split(" ")
121
+ words.append(splits[0])
122
+ pos.append(splits[1])
123
+ chunk.append(splits[2].rstrip())
124
+ # last example
125
+ yield guid, {"id": str(guid), "words": words, "pos": pos, "chunk": chunk}
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
1
+ {"conll2000": {"description": " Text chunking consists of dividing a text in syntactically correlated parts of words. For example, the sentence\n He reckons the current account deficit will narrow to only # 1.8 billion in September . can be divided as follows:\n[NP He ] [VP reckons ] [NP the current account deficit ] [VP will narrow ] [PP to ] [NP only # 1.8 billion ]\n[PP in ] [NP September ] .\n\nText chunking is an intermediate step towards full parsing. It was the shared task for CoNLL-2000. Training and test\ndata for this task is available. This data consists of the same partitions of the Wall Street Journal corpus (WSJ)\nas the widely used data for noun phrase chunking: sections 15-18 as training data (211727 tokens) and section 20 as\ntest data (47377 tokens). The annotation of the data has been derived from the WSJ corpus by a program written by\nSabine Buchholz from Tilburg University, The Netherlands.\n", "citation": "@inproceedings{tksbuchholz2000conll,\n author = \"Tjong Kim Sang, Erik F. and Sabine Buchholz\",\n title = \"Introduction to the CoNLL-2000 Shared Task: Chunking\",\n editor = \"Claire Cardie and Walter Daelemans and Claire\n Nedellec and Tjong Kim Sang, Erik\",\n booktitle = \"Proceedings of CoNLL-2000 and LLL-2000\",\n publisher = \"Lisbon, Portugal\",\n pages = \"127--132\",\n year = \"2000\"\n}\n", "homepage": "https://www.clips.uantwerpen.be/conll2000/chunking/", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "words": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "chunk": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "conll2000", "config_name": "conll2000", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4916429, "num_examples": 8937, "dataset_name": "conll2000"}, "test": {"name": "test", "num_bytes": 1102955, "num_examples": 2013, "dataset_name": "conll2000"}}, "download_checksums": {"https://github.com/teropa/nlp/raw/master/resources/corpora/conll2000/train.txt": {"num_bytes": 2842164, "checksum": "82033cd7a72b209923a98007793e8f9de3abc1c8b79d646c50648eb949b87cea"}, "https://github.com/teropa/nlp/raw/master/resources/corpora/conll2000/test.txt": {"num_bytes": 639396, "checksum": "73b7b1e565fa75a1e22fe52ecdf41b6624d6f59dacb591d44252bf4d692b1628"}}, "download_size": 3481560, "post_processing_size": null, "dataset_size": 6019384, "size_in_bytes": 9500944}}
dummy/conll2000/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52caa4d43585fd5fe542c2d2d4d05baae27bdcc9f40e2826b92904eb20ab8e3c
3
+ size 994