Datasets:

Languages:
Afrikaans
Multilinguality:
monolingual
Size Categories:
1K<n<10K
Language Creators:
expert-generated
Annotations Creators:
expert-generated
Source Datasets:
original
Tags:
License:
albertvillanova HF staff commited on
Commit
d08fe25
1 Parent(s): 87ad41f

Delete loading script

Browse files
Files changed (1) hide show
  1. afrikaans_ner_corpus.py +0 -135
afrikaans_ner_corpus.py DELETED
@@ -1,135 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """ Named entity annotated data from the NCHLT Text Resource Development: Phase II Project for Afrikaans"""
16
-
17
-
18
- import os
19
-
20
- import datasets
21
-
22
-
23
- logger = datasets.logging.get_logger(__name__)
24
-
25
-
26
- _CITATION = """\
27
- @inproceedings{afrikaans_ner_corpus,
28
- author = { Gerhard van Huyssteen and
29
- Martin Puttkammer and
30
- E.B. Trollip and
31
- J.C. Liversage and
32
- Roald Eiselen},
33
- title = {NCHLT Afrikaans Named Entity Annotated Corpus},
34
- booktitle = {Eiselen, R. 2016. Government domain named entity recognition for South African languages. Proceedings of the 10th Language Resource and Evaluation Conference, Portorož, Slovenia.},
35
- year = {2016},
36
- url = {https://repo.sadilar.org/handle/20.500.12185/299},
37
- }
38
- """
39
-
40
- _DESCRIPTION = """\
41
- Named entity annotated data from the NCHLT Text Resource Development: Phase II Project, annotated with PERSON, LOCATION, ORGANISATION and MISCELLANEOUS tags.
42
- """
43
-
44
- _URL = "https://repo.sadilar.org/bitstream/handle/20.500.12185/299/nchlt_afrikaans_named_entity_annotated_corpus.zip?sequence=3&isAllowed=y"
45
-
46
-
47
- _EXTRACTED_FILE = "NCHLT Afrikaans Named Entity Annotated Corpus/Dataset.NCHLT-II.AF.NER.Full.txt"
48
-
49
-
50
- class AfrikaansNerCorpusConfig(datasets.BuilderConfig):
51
- """BuilderConfig for AfrikaansNerCorpus"""
52
-
53
- def __init__(self, **kwargs):
54
- """BuilderConfig for AfrikaansNerCorpus.
55
- Args:
56
- **kwargs: keyword arguments forwarded to super.
57
- """
58
- super(AfrikaansNerCorpusConfig, self).__init__(**kwargs)
59
-
60
-
61
- class AfrikaansNerCorpus(datasets.GeneratorBasedBuilder):
62
- """Afrikaans Ner dataset"""
63
-
64
- BUILDER_CONFIGS = [
65
- AfrikaansNerCorpusConfig(
66
- name="afrikaans_ner_corpus",
67
- version=datasets.Version("1.0.0"),
68
- description="AfrikaansNerCorpus dataset",
69
- ),
70
- ]
71
-
72
- def _info(self):
73
- return datasets.DatasetInfo(
74
- description=_DESCRIPTION,
75
- features=datasets.Features(
76
- {
77
- "id": datasets.Value("string"),
78
- "tokens": datasets.Sequence(datasets.Value("string")),
79
- "ner_tags": datasets.Sequence(
80
- datasets.features.ClassLabel(
81
- names=[
82
- "OUT",
83
- "B-PERS",
84
- "I-PERS",
85
- "B-ORG",
86
- "I-ORG",
87
- "B-LOC",
88
- "I-LOC",
89
- "B-MISC",
90
- "I-MISC",
91
- ]
92
- )
93
- ),
94
- }
95
- ),
96
- supervised_keys=None,
97
- homepage="https://repo.sadilar.org/handle/20.500.12185/299",
98
- citation=_CITATION,
99
- )
100
-
101
- def _split_generators(self, dl_manager):
102
- data_dir = dl_manager.download_and_extract(_URL)
103
- return [
104
- datasets.SplitGenerator(
105
- name=datasets.Split.TRAIN,
106
- gen_kwargs={"filepath": os.path.join(data_dir, _EXTRACTED_FILE)},
107
- ),
108
- ]
109
-
110
- def _generate_examples(self, filepath):
111
- logger.info("⏳ Generating examples from = %s", filepath)
112
- with open(filepath, encoding="utf-8") as f:
113
- guid = 0
114
- tokens = []
115
- ner_tags = []
116
- for line in f:
117
- if line == "" or line == "\n":
118
- if tokens:
119
- yield guid, {
120
- "id": str(guid),
121
- "tokens": tokens,
122
- "ner_tags": ner_tags,
123
- }
124
- guid += 1
125
- tokens = []
126
- ner_tags = []
127
- else:
128
- splits = line.split("\t")
129
- tokens.append(splits[0])
130
- ner_tags.append(splits[1].rstrip())
131
- yield guid, {
132
- "id": str(guid),
133
- "tokens": tokens,
134
- "ner_tags": ner_tags,
135
- }