albertvillanova HF staff commited on
Commit
b5ad955
1 Parent(s): 94d13ff

Delete loading script

Browse files
Files changed (1) hide show
  1. labr.py +0 -116
labr.py DELETED
@@ -1,116 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """Arabic Book Reviews."""
18
-
19
-
20
- import csv
21
-
22
- import datasets
23
- from datasets.tasks import TextClassification
24
-
25
-
26
- _DESCRIPTION = """\
27
- This dataset contains over 63,000 book reviews in Arabic.\
28
- It is the largest sentiment analysis dataset for Arabic to-date.\
29
- The book reviews were harvested from the website Goodreads during the month or March 2013.\
30
- Each book review comes with the goodreads review id, the user id, the book id, the rating (1 to 5) and the text of the review.
31
- """
32
-
33
- _CITATION = """\
34
- @inproceedings{aly2013labr,
35
- title={Labr: A large scale arabic book reviews dataset},
36
- author={Aly, Mohamed and Atiya, Amir},
37
- booktitle={Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)},
38
- pages={494--498},
39
- year={2013}
40
- }
41
- """
42
-
43
- _URL = "https://raw.githubusercontent.com/mohamedadaly/LABR/master/data/"
44
- _URLS = {
45
- "train": _URL + "5class-balanced-train.txt",
46
- "test": _URL + "5class-balanced-test.txt",
47
- "reviews": _URL + "reviews.tsv",
48
- }
49
-
50
-
51
- class LabrConfig(datasets.BuilderConfig):
52
- """BuilderConfig for Labr."""
53
-
54
- def __init__(self, **kwargs):
55
- """BuilderConfig for Labr.
56
-
57
- Args:
58
- **kwargs: keyword arguments forwarded to super.
59
- """
60
- super(LabrConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
61
-
62
-
63
- class Labr(datasets.GeneratorBasedBuilder):
64
- """Labr dataset."""
65
-
66
- BUILDER_CONFIGS = [
67
- LabrConfig(
68
- name="plain_text",
69
- description="Plain text",
70
- )
71
- ]
72
-
73
- def _info(self):
74
- return datasets.DatasetInfo(
75
- description=_DESCRIPTION,
76
- features=datasets.Features(
77
- {
78
- "text": datasets.Value("string"),
79
- "label": datasets.features.ClassLabel(
80
- names=[
81
- "1",
82
- "2",
83
- "3",
84
- "4",
85
- "5",
86
- ]
87
- ),
88
- }
89
- ),
90
- supervised_keys=None,
91
- homepage="https://github.com/mohamedadaly/LABR",
92
- citation=_CITATION,
93
- task_templates=[TextClassification(text_column="text", label_column="label")],
94
- )
95
-
96
- def _split_generators(self, dl_manager):
97
- data_dir = dl_manager.download(_URLS)
98
- self.reviews_path = data_dir["reviews"]
99
- return [
100
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"directory": data_dir["train"]}),
101
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"directory": data_dir["test"]}),
102
- ]
103
-
104
- def _generate_examples(self, directory):
105
- """Generate examples."""
106
- # For labeled examples, extract the label from the path.
107
- reviews = []
108
- with open(self.reviews_path, encoding="utf-8") as tsvfile:
109
- tsvreader = csv.reader(tsvfile, delimiter="\t")
110
- for line in tsvreader:
111
- reviews.append(line)
112
-
113
- with open(directory, encoding="utf-8") as f:
114
- for id_, record in enumerate(f.read().splitlines()):
115
- rating, _, _, _, review_text = reviews[int(record)]
116
- yield str(id_), {"text": review_text, "label": rating}