system HF staff commited on
Commit
639daf1
0 Parent(s):

Update files from the datasets library (from 1.2.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.2.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - found
4
+ language_creators:
5
+ - found
6
+ languages:
7
+ - ig
8
+ licenses:
9
+ - unknown
10
+ multilinguality:
11
+ - monolingual
12
+ size_categories:
13
+ - 10K<n<100K
14
+ source_datasets:
15
+ - original
16
+ task_categories:
17
+ - structure-prediction
18
+ task_ids:
19
+ - named-entity-recognition
20
+ ---
21
+
22
+ # Dataset Card for Igbo NER dataset
23
+
24
+ ## Table of Contents
25
+ - [Dataset Description](#dataset-description)
26
+ - [Dataset Summary](#dataset-summary)
27
+ - [Supported Tasks](#supported-tasks-and-leaderboards)
28
+ - [Languages](#languages)
29
+ - [Dataset Structure](#dataset-structure)
30
+ - [Data Instances](#data-instances)
31
+ - [Data Fields](#data-fields)
32
+ - [Data Splits](#data-splits)
33
+ - [Dataset Creation](#dataset-creation)
34
+ - [Curation Rationale](#curation-rationale)
35
+ - [Source Data](#source-data)
36
+ - [Annotations](#annotations)
37
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
38
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
39
+ - [Social Impact of Dataset](#social-impact-of-dataset)
40
+ - [Discussion of Biases](#discussion-of-biases)
41
+ - [Other Known Limitations](#other-known-limitations)
42
+ - [Additional Information](#additional-information)
43
+ - [Dataset Curators](#dataset-curators)
44
+ - [Licensing Information](#licensing-information)
45
+ - [Citation Information](#citation-information)
46
+
47
+ ## Dataset Description
48
+
49
+ - **Homepage:** https://github.com/IgnatiusEzeani/IGBONLP/tree/master/ig_ner
50
+ - **Repository:** https://github.com/IgnatiusEzeani/IGBONLP/tree/master/ig_ner
51
+ - **Paper:** https://arxiv.org/abs/2004.00648
52
+
53
+ ### Dataset Summary
54
+
55
+ [More Information Needed]
56
+
57
+ ### Supported Tasks and Leaderboards
58
+
59
+ [More Information Needed]
60
+
61
+ ### Languages
62
+
63
+ [More Information Needed]
64
+
65
+ ## Dataset Structure
66
+
67
+ ### Data Instances
68
+
69
+ Here is an example from the dataset:
70
+ ```
71
+ {'content_n': 'content_0', 'named_entity': 'Ike Ekweremmadụ', 'sentences': ['Ike Ekweremmadụ', "Ike ịda jụụ otụ nkeji banyere oke ogbugbu na-eme n'ala Naijiria agwụla Ekweremmadụ"]}
72
+ ```
73
+
74
+ ### Data Fields
75
+
76
+ - content_n : ID
77
+ - named_entity : Name of the entity
78
+ - sentences : List of sentences for the entity
79
+
80
+ ### Data Splits
81
+
82
+ [More Information Needed]
83
+
84
+ ## Dataset Creation
85
+
86
+ ### Curation Rationale
87
+
88
+ [More Information Needed]
89
+
90
+ ### Source Data
91
+
92
+ #### Initial Data Collection and Normalization
93
+
94
+ [More Information Needed]
95
+
96
+ #### Who are the source language producers?
97
+
98
+ [More Information Needed]
99
+
100
+ ### Annotations
101
+
102
+ #### Annotation process
103
+
104
+ [More Information Needed]
105
+
106
+ #### Who are the annotators?
107
+
108
+ [More Information Needed]
109
+
110
+ ### Personal and Sensitive Information
111
+
112
+ [More Information Needed]
113
+
114
+ ## Considerations for Using the Data
115
+
116
+ ### Social Impact of Dataset
117
+
118
+ [More Information Needed]
119
+
120
+ ### Discussion of Biases
121
+
122
+ [More Information Needed]
123
+
124
+ ### Other Known Limitations
125
+
126
+ [More Information Needed]
127
+
128
+ ## Additional Information
129
+
130
+ ### Dataset Curators
131
+
132
+ [More Information Needed]
133
+
134
+ ### Licensing Information
135
+
136
+ [More Information Needed]
137
+
138
+ ### Citation Information
139
+
140
+ @misc{ezeani2020igboenglish,
141
+ title={Igbo-English Machine Translation: An Evaluation Benchmark},
142
+ author={Ignatius Ezeani and Paul Rayson and Ikechukwu Onyenwe and Chinedu Uchechukwu and Mark Hepple},
143
+ year={2020},
144
+ eprint={2004.00648},
145
+ archivePrefix={arXiv},
146
+ primaryClass={cs.CL}
147
+ }
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"ner_data": {"description": "Igbo Named Entity Recognition Dataset\n", "citation": "@misc{ezeani2020igboenglish,\n title={Igbo-English Machine Translation: An Evaluation Benchmark},\n author={Ignatius Ezeani and Paul Rayson and Ikechukwu Onyenwe and Chinedu Uchechukwu and Mark Hepple},\n year={2020},\n eprint={2004.00648},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://github.com/IgnatiusEzeani/IGBONLP/tree/master/ig_ner", "license": "", "features": {"content_n": {"dtype": "string", "id": null, "_type": "Value"}, "named_entity": {"dtype": "string", "id": null, "_type": "Value"}, "sentences": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "igbo_ner", "config_name": "ner_data", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 60315228, "num_examples": 30715, "dataset_name": "igbo_ner"}}, "download_checksums": {"https://raw.githubusercontent.com/IgnatiusEzeani/IGBONLP/master/ig_ner/igbo_data.txt": {"num_bytes": 3311204, "checksum": "788141de3c325937d478ad4c960e759816dd324670333aed370916a06d8262eb"}}, "download_size": 3311204, "post_processing_size": null, "dataset_size": 60315228, "size_in_bytes": 63626432}, "free_text": {"description": "Igbo Named Entity Recognition Dataset\n", "citation": "@misc{ezeani2020igboenglish,\n title={Igbo-English Machine Translation: An Evaluation Benchmark},\n author={Ignatius Ezeani and Paul Rayson and Ikechukwu Onyenwe and Chinedu Uchechukwu and Mark Hepple},\n year={2020},\n eprint={2004.00648},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://github.com/IgnatiusEzeani/IGBONLP/tree/master/ig_ner", "license": "", "features": {"sentences": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "igbo_ner", "config_name": "free_text", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1172152, "num_examples": 10000, "dataset_name": "igbo_ner"}}, "download_checksums": {"https://raw.githubusercontent.com/IgnatiusEzeani/IGBONLP/master/ig_ner/igbo_data10000.txt": {"num_bytes": 1132151, "checksum": "6f664460a5beb3928063463041bde5a8b31fe4ac02d96f77094ef099a8e4f5c2"}}, "download_size": 1132151, "post_processing_size": null, "dataset_size": 1172152, "size_in_bytes": 2304303}}
dummy/free_text/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1bbb471167b318f1215e58a177dda3c2e2236b94e6e402a280769ffef7774472
3
+ size 702
dummy/ner_data/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:432b8f38303b64c3902e984e147f794fb350efb7c3dd0f880b049276a1298357
3
+ size 628
igbo_ner.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Igbo Named Entity Recognition Dataset"""
16
+
17
+ from __future__ import absolute_import, division, print_function
18
+
19
+ import datasets
20
+
21
+
22
+ _CITATION = """\
23
+ @misc{ezeani2020igboenglish,
24
+ title={Igbo-English Machine Translation: An Evaluation Benchmark},
25
+ author={Ignatius Ezeani and Paul Rayson and Ikechukwu Onyenwe and Chinedu Uchechukwu and Mark Hepple},
26
+ year={2020},
27
+ eprint={2004.00648},
28
+ archivePrefix={arXiv},
29
+ primaryClass={cs.CL}
30
+ }
31
+ """
32
+
33
+ _DESCRIPTION = """\
34
+ Igbo Named Entity Recognition Dataset
35
+ """
36
+
37
+ _HOMEPAGE = "https://github.com/IgnatiusEzeani/IGBONLP/tree/master/ig_ner"
38
+
39
+ _URLs = {
40
+ "ner_data": "https://raw.githubusercontent.com/IgnatiusEzeani/IGBONLP/master/ig_ner/igbo_data.txt",
41
+ "free_text": "https://raw.githubusercontent.com/IgnatiusEzeani/IGBONLP/master/ig_ner/igbo_data10000.txt",
42
+ }
43
+
44
+
45
+ class IgboNer(datasets.GeneratorBasedBuilder):
46
+ """Dataset from the Igbo NER Project"""
47
+
48
+ VERSION = datasets.Version("1.1.0")
49
+
50
+ BUILDER_CONFIGS = [
51
+ datasets.BuilderConfig(
52
+ name="ner_data",
53
+ version=VERSION,
54
+ description="This dataset contains the named entity and all the sentences containing that entity.",
55
+ ),
56
+ datasets.BuilderConfig(
57
+ name="free_text", version=VERSION, description="This dataset contains all sentences used for NER."
58
+ ),
59
+ ]
60
+
61
+ DEFAULT_CONFIG_NAME = "ner_data"
62
+
63
+ def _info(self):
64
+ if self.config.name == "ner_data":
65
+ features = datasets.Features(
66
+ {
67
+ "content_n": datasets.Value("string"),
68
+ "named_entity": datasets.Value("string"),
69
+ "sentences": datasets.Sequence(datasets.Value("string")),
70
+ }
71
+ )
72
+ else:
73
+ features = datasets.Features(
74
+ {
75
+ "sentences": datasets.Value("string"),
76
+ }
77
+ )
78
+ return datasets.DatasetInfo(
79
+ description=_DESCRIPTION,
80
+ features=features,
81
+ supervised_keys=None,
82
+ homepage=_HOMEPAGE,
83
+ citation=_CITATION,
84
+ )
85
+
86
+ def _split_generators(self, dl_manager):
87
+ """Returns SplitGenerators."""
88
+ my_urls = _URLs[self.config.name]
89
+ data_dir = dl_manager.download_and_extract(my_urls)
90
+ return [
91
+ datasets.SplitGenerator(
92
+ name=datasets.Split.TRAIN,
93
+ gen_kwargs={
94
+ "filepath": data_dir,
95
+ "split": "train",
96
+ },
97
+ ),
98
+ ]
99
+
100
+ def _generate_examples(self, filepath, split):
101
+ """ Yields examples. """
102
+ dictionary = {}
103
+ with open(filepath, "r", encoding="utf-8-sig") as f:
104
+ if self.config.name == "ner_data":
105
+ for id_, row in enumerate(f):
106
+ row = row.strip().split("\t")
107
+ content_n = row[0]
108
+ if content_n in dictionary.keys():
109
+ (dictionary[content_n]["sentences"]).append(row[1])
110
+ else:
111
+ dictionary[content_n] = {}
112
+ dictionary[content_n]["named_entity"] = row[1]
113
+ dictionary[content_n]["sentences"] = [row[1]]
114
+ yield id_, {
115
+ "content_n": content_n,
116
+ "named_entity": dictionary[content_n]["named_entity"],
117
+ "sentences": dictionary[content_n]["sentences"],
118
+ }
119
+ else:
120
+ for id_, row in enumerate(f):
121
+ yield id_, {
122
+ "sentences": row.strip(),
123
+ }