albertvillanova HF staff commited on
Commit
170054b
1 Parent(s): ab88c38

Convert dataset to Parquet (#4)

Browse files

- Convert dataset to Parquet (94d13ffc005b2156fc9a95c7e4bdd3efdeeaf4be)
- Delete loading script (b5ad95592e5e8d5ed507ce163940d9b065634a2d)

README.md CHANGED
@@ -20,6 +20,7 @@ task_ids:
20
  paperswithcode_id: labr
21
  pretty_name: LABR
22
  dataset_info:
 
23
  features:
24
  - name: text
25
  dtype: string
@@ -32,16 +33,23 @@ dataset_info:
32
  '2': '3'
33
  '3': '4'
34
  '4': '5'
35
- config_name: plain_text
36
  splits:
37
  - name: train
38
- num_bytes: 7051103
39
  num_examples: 11760
40
  - name: test
41
- num_bytes: 1703399
42
  num_examples: 2935
43
- download_size: 39953712
44
- dataset_size: 8754502
 
 
 
 
 
 
 
 
45
  ---
46
 
47
  # Dataset Card for LABR
 
20
  paperswithcode_id: labr
21
  pretty_name: LABR
22
  dataset_info:
23
+ config_name: plain_text
24
  features:
25
  - name: text
26
  dtype: string
 
33
  '2': '3'
34
  '3': '4'
35
  '4': '5'
 
36
  splits:
37
  - name: train
38
+ num_bytes: 7051095
39
  num_examples: 11760
40
  - name: test
41
+ num_bytes: 1703395
42
  num_examples: 2935
43
+ download_size: 4745822
44
+ dataset_size: 8754490
45
+ configs:
46
+ - config_name: plain_text
47
+ data_files:
48
+ - split: train
49
+ path: plain_text/train-*
50
+ - split: test
51
+ path: plain_text/test-*
52
+ default: true
53
  ---
54
 
55
  # Dataset Card for LABR
labr.py DELETED
@@ -1,116 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """Arabic Book Reviews."""
18
-
19
-
20
- import csv
21
-
22
- import datasets
23
- from datasets.tasks import TextClassification
24
-
25
-
26
- _DESCRIPTION = """\
27
- This dataset contains over 63,000 book reviews in Arabic.\
28
- It is the largest sentiment analysis dataset for Arabic to-date.\
29
- The book reviews were harvested from the website Goodreads during the month or March 2013.\
30
- Each book review comes with the goodreads review id, the user id, the book id, the rating (1 to 5) and the text of the review.
31
- """
32
-
33
- _CITATION = """\
34
- @inproceedings{aly2013labr,
35
- title={Labr: A large scale arabic book reviews dataset},
36
- author={Aly, Mohamed and Atiya, Amir},
37
- booktitle={Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)},
38
- pages={494--498},
39
- year={2013}
40
- }
41
- """
42
-
43
- _URL = "https://raw.githubusercontent.com/mohamedadaly/LABR/master/data/"
44
- _URLS = {
45
- "train": _URL + "5class-balanced-train.txt",
46
- "test": _URL + "5class-balanced-test.txt",
47
- "reviews": _URL + "reviews.tsv",
48
- }
49
-
50
-
51
- class LabrConfig(datasets.BuilderConfig):
52
- """BuilderConfig for Labr."""
53
-
54
- def __init__(self, **kwargs):
55
- """BuilderConfig for Labr.
56
-
57
- Args:
58
- **kwargs: keyword arguments forwarded to super.
59
- """
60
- super(LabrConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
61
-
62
-
63
- class Labr(datasets.GeneratorBasedBuilder):
64
- """Labr dataset."""
65
-
66
- BUILDER_CONFIGS = [
67
- LabrConfig(
68
- name="plain_text",
69
- description="Plain text",
70
- )
71
- ]
72
-
73
- def _info(self):
74
- return datasets.DatasetInfo(
75
- description=_DESCRIPTION,
76
- features=datasets.Features(
77
- {
78
- "text": datasets.Value("string"),
79
- "label": datasets.features.ClassLabel(
80
- names=[
81
- "1",
82
- "2",
83
- "3",
84
- "4",
85
- "5",
86
- ]
87
- ),
88
- }
89
- ),
90
- supervised_keys=None,
91
- homepage="https://github.com/mohamedadaly/LABR",
92
- citation=_CITATION,
93
- task_templates=[TextClassification(text_column="text", label_column="label")],
94
- )
95
-
96
- def _split_generators(self, dl_manager):
97
- data_dir = dl_manager.download(_URLS)
98
- self.reviews_path = data_dir["reviews"]
99
- return [
100
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"directory": data_dir["train"]}),
101
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"directory": data_dir["test"]}),
102
- ]
103
-
104
- def _generate_examples(self, directory):
105
- """Generate examples."""
106
- # For labeled examples, extract the label from the path.
107
- reviews = []
108
- with open(self.reviews_path, encoding="utf-8") as tsvfile:
109
- tsvreader = csv.reader(tsvfile, delimiter="\t")
110
- for line in tsvreader:
111
- reviews.append(line)
112
-
113
- with open(directory, encoding="utf-8") as f:
114
- for id_, record in enumerate(f.read().splitlines()):
115
- rating, _, _, _, review_text = reviews[int(record)]
116
- yield str(id_), {"text": review_text, "label": rating}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
plain_text/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8de908fbbe05a9c5ba9c6b30f18acd0cea29a550d91a32b359280b5e5551ac8b
3
+ size 919405
plain_text/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea9c358b8ae0062086e118cd9365dfe881415d0a9e3954cd4c343473cc30add6
3
+ size 3826417