Datasets:

Languages:
Arabic
Multilinguality:
monolingual
Size Categories:
1K<n<10K
Language Creators:
found
Annotations Creators:
found
Source Datasets:
original
Tags:
License:
albertvillanova HF staff commited on
Commit
d51bf24
1 Parent(s): c00a78a

Convert dataset to Parquet (#2)

Browse files

- Convert dataset to Parquet (0d5b85c0d50a3d72cf267e0a300785b2cfb842b1)
- Delete loading script (8759c493bb016cf1ad19a875f4d39877402108db)
- Delete legacy dataset_infos.json (ac899f2f728c92427cec81896b787db5ac9f1449)

README.md CHANGED
@@ -34,10 +34,15 @@ dataset_info:
34
  dtype: string
35
  splits:
36
  - name: train
37
- num_bytes: 3617097
38
  num_examples: 8364
39
- download_size: 3503230
40
- dataset_size: 3617097
 
 
 
 
 
41
  ---
42
 
43
  # Dataset Card for ArRestReviews
34
  dtype: string
35
  splits:
36
  - name: train
37
+ num_bytes: 3617085
38
  num_examples: 8364
39
+ download_size: 1887029
40
+ dataset_size: 3617085
41
+ configs:
42
+ - config_name: default
43
+ data_files:
44
+ - split: train
45
+ path: data/train-*
46
  ---
47
 
48
  # Dataset Card for ArRestReviews
ar_res_reviews.py DELETED
@@ -1,85 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Dataset of 8364 restaurant reviews scrapped from qaym.com in Arabic for sentiment analysis"""
16
-
17
-
18
- import csv
19
-
20
- import datasets
21
-
22
-
23
- _CITATION = """\
24
- @InProceedings{10.1007/978-3-319-18117-2_2,
25
- author="ElSahar, Hady
26
- and El-Beltagy, Samhaa R.",
27
- editor="Gelbukh, Alexander",
28
- title="Building Large Arabic Multi-domain Resources for Sentiment Analysis",
29
- booktitle="Computational Linguistics and Intelligent Text Processing",
30
- year="2015",
31
- publisher="Springer International Publishing",
32
- address="Cham",
33
- pages="23--34",
34
- isbn="978-3-319-18117-2"
35
- }
36
- """
37
-
38
- _DESCRIPTION = """\
39
- Dataset of 8364 restaurant reviews scrapped from qaym.com in Arabic for sentiment analysis
40
- """
41
-
42
- _HOMEPAGE = "https://github.com/hadyelsahar/large-arabic-sentiment-analysis-resouces"
43
-
44
- _DOWNLOAD_URL = (
45
- "https://raw.githubusercontent.com/hadyelsahar/large-arabic-sentiment-analysis-resouces/master/datasets/RES1.csv"
46
- )
47
-
48
-
49
- class ArResReviews(datasets.GeneratorBasedBuilder):
50
- """Dataset of 8364 restaurant reviews in Arabic for sentiment analysis"""
51
-
52
- def _info(self):
53
- return datasets.DatasetInfo(
54
- description=_DESCRIPTION,
55
- features=datasets.Features(
56
- {
57
- "polarity": datasets.ClassLabel(names=["negative", "positive"]),
58
- "text": datasets.Value("string"),
59
- "restaurant_id": datasets.Value("string"),
60
- "user_id": datasets.Value("string"),
61
- }
62
- ),
63
- homepage=_HOMEPAGE,
64
- citation=_CITATION,
65
- )
66
-
67
- def _split_generators(self, dl_manager):
68
- """Returns SplitGenerators."""
69
-
70
- data_dir = dl_manager.download_and_extract(_DOWNLOAD_URL)
71
- return [
72
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir}),
73
- ]
74
-
75
- def _generate_examples(self, filepath):
76
- """Generate arabic restaurant reviews examples."""
77
- with open(filepath, encoding="utf-8") as csv_file:
78
- next(csv_file)
79
- csv_reader = csv.reader(
80
- csv_file, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True
81
- )
82
- for id_, row in enumerate(csv_reader):
83
- polarity, text, restaurant_id, user_id = row
84
- polarity = "negative" if polarity == "-1" else "positive"
85
- yield id_, {"polarity": polarity, "text": text, "restaurant_id": restaurant_id, "user_id": user_id}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:140cf35289bed629d676a9db236fa9955a1609794816090defd7350c8abc9e43
3
+ size 1887029
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"default": {"description": "Dataset of 8364 restaurant reviews scrapped from qaym.com in Arabic for sentiment analysis\n", "citation": "@InProceedings{10.1007/978-3-319-18117-2_2,\nauthor=\"ElSahar, Hady\nand El-Beltagy, Samhaa R.\",\neditor=\"Gelbukh, Alexander\",\ntitle=\"Building Large Arabic Multi-domain Resources for Sentiment Analysis\",\nbooktitle=\"Computational Linguistics and Intelligent Text Processing\",\nyear=\"2015\",\npublisher=\"Springer International Publishing\",\naddress=\"Cham\",\npages=\"23--34\",\nisbn=\"978-3-319-18117-2\"\n}\n", "homepage": "https://github.com/hadyelsahar/large-arabic-sentiment-analysis-resouces", "license": "", "features": {"polarity": {"num_classes": 2, "names": ["negative", "positive"], "names_file": null, "id": null, "_type": "ClassLabel"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "restaurant_id": {"dtype": "string", "id": null, "_type": "Value"}, "user_id": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "ar_res_reviews", "config_name": "default", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3617097, "num_examples": 8364, "dataset_name": "ar_res_reviews"}}, "download_checksums": {"https://raw.githubusercontent.com/hadyelsahar/large-arabic-sentiment-analysis-resouces/master/datasets/RES1.csv": {"num_bytes": 3503230, "checksum": "afdb587d41310302372ed154a91a7231f566c137cadeea9df166e7326c2c4b19"}}, "download_size": 3503230, "post_processing_size": null, "dataset_size": 3617097, "size_in_bytes": 7120327}}