Datasets:

Languages:
Arabic
Multilinguality:
monolingual
Size Categories:
100K<n<1M
Language Creators:
expert-generated
Annotations Creators:
expert-generated
Source Datasets:
original
Tags:
License:
albertvillanova HF staff commited on
Commit
d80ee8d
1 Parent(s): 480cf0f

Delete loading script

Browse files
Files changed (1) hide show
  1. caner.py +0 -122
caner.py DELETED
@@ -1,122 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """A new corpus of tagged data that can be useful for handling the issues in recognition of Classical Arabic named entities"""
16
-
17
-
18
- import csv
19
- import os
20
-
21
- import datasets
22
-
23
-
24
- _CITATION = """\
25
- @article{article,
26
- author = {Salah, Ramzi and Zakaria, Lailatul},
27
- year = {2018},
28
- month = {12},
29
- pages = {},
30
- title = {BUILDING THE CLASSICAL ARABIC NAMED ENTITY RECOGNITION CORPUS (CANERCORPUS)},
31
- volume = {96},
32
- journal = {Journal of Theoretical and Applied Information Technology}
33
- }
34
- """
35
-
36
- _DESCRIPTION = """\
37
- Classical Arabic Named Entity Recognition corpus as a new corpus of tagged data that can be useful for handling the issues in recognition of Arabic named entities.
38
- """
39
-
40
- _HOMEPAGE = "https://github.com/RamziSalah/Classical-Arabic-Named-Entity-Recognition-Corpus"
41
-
42
- # TODO: Add the licence for the dataset here if you can find it
43
- _LICENSE = ""
44
-
45
- _URL = "https://raw.githubusercontent.com/RamziSalah/Classical-Arabic-Named-Entity-Recognition-Corpus/master/CANERCorpus.csv"
46
-
47
-
48
- class Caner(datasets.GeneratorBasedBuilder):
49
- """Classical Arabic Named Entity Recognition corpus as a new corpus of tagged data that can be useful for handling the issues in recognition of Arabic named entities"""
50
-
51
- VERSION = datasets.Version("1.1.0")
52
-
53
- def _info(self):
54
-
55
- features = datasets.Features(
56
- {
57
- "token": datasets.Value("string"),
58
- "ner_tag": datasets.ClassLabel(
59
- names=[
60
- "Allah",
61
- "Book",
62
- "Clan",
63
- "Crime",
64
- "Date",
65
- "Day",
66
- "Hell",
67
- "Loc",
68
- "Meas",
69
- "Mon",
70
- "Month",
71
- "NatOb",
72
- "Number",
73
- "O",
74
- "Org",
75
- "Para",
76
- "Pers",
77
- "Prophet",
78
- "Rlig",
79
- "Sect",
80
- "Time",
81
- ]
82
- ),
83
- }
84
- )
85
-
86
- return datasets.DatasetInfo(
87
- description=_DESCRIPTION,
88
- features=features,
89
- supervised_keys=None,
90
- homepage=_HOMEPAGE,
91
- license=_LICENSE,
92
- citation=_CITATION,
93
- )
94
-
95
- def _split_generators(self, dl_manager):
96
- """Returns SplitGenerators."""
97
-
98
- data_path = dl_manager.download(_URL)
99
-
100
- return [
101
- datasets.SplitGenerator(
102
- name=datasets.Split.TRAIN,
103
- # These kwargs will be passed to _generate_examples
104
- gen_kwargs={
105
- "filepath": data_path,
106
- },
107
- )
108
- ]
109
-
110
- def _generate_examples(self, filepath):
111
- """Yields examples."""
112
-
113
- with open(filepath, encoding="utf-8") as csv_file:
114
- reader = csv.reader(csv_file, delimiter=",")
115
- next(reader, None)
116
-
117
- for id_, row in enumerate(reader):
118
-
119
- yield id_, {
120
- "token": row[0],
121
- "ner_tag": row[1],
122
- }