Datasets:

Languages:
Arabic
Multilinguality:
monolingual
Size Categories:
1K<n<10K
Language Creators:
found
Annotations Creators:
found
Source Datasets:
original
Tags:
License:
albertvillanova HF staff commited on
Commit
0d5b85c
1 Parent(s): c00a78a

Convert dataset to Parquet

Browse files

Convert dataset to Parquet.

README.md CHANGED
@@ -34,10 +34,15 @@ dataset_info:
34
  dtype: string
35
  splits:
36
  - name: train
37
- num_bytes: 3617097
38
  num_examples: 8364
39
- download_size: 3503230
40
- dataset_size: 3617097
 
 
 
 
 
41
  ---
42
 
43
  # Dataset Card for ArRestReviews
 
34
  dtype: string
35
  splits:
36
  - name: train
37
+ num_bytes: 3617085
38
  num_examples: 8364
39
+ download_size: 1887029
40
+ dataset_size: 3617085
41
+ configs:
42
+ - config_name: default
43
+ data_files:
44
+ - split: train
45
+ path: data/train-*
46
  ---
47
 
48
  # Dataset Card for ArRestReviews
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:140cf35289bed629d676a9db236fa9955a1609794816090defd7350c8abc9e43
3
+ size 1887029
dataset_infos.json CHANGED
@@ -1 +1,49 @@
1
- {"default": {"description": "Dataset of 8364 restaurant reviews scrapped from qaym.com in Arabic for sentiment analysis\n", "citation": "@InProceedings{10.1007/978-3-319-18117-2_2,\nauthor=\"ElSahar, Hady\nand El-Beltagy, Samhaa R.\",\neditor=\"Gelbukh, Alexander\",\ntitle=\"Building Large Arabic Multi-domain Resources for Sentiment Analysis\",\nbooktitle=\"Computational Linguistics and Intelligent Text Processing\",\nyear=\"2015\",\npublisher=\"Springer International Publishing\",\naddress=\"Cham\",\npages=\"23--34\",\nisbn=\"978-3-319-18117-2\"\n}\n", "homepage": "https://github.com/hadyelsahar/large-arabic-sentiment-analysis-resouces", "license": "", "features": {"polarity": {"num_classes": 2, "names": ["negative", "positive"], "names_file": null, "id": null, "_type": "ClassLabel"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "restaurant_id": {"dtype": "string", "id": null, "_type": "Value"}, "user_id": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "ar_res_reviews", "config_name": "default", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3617097, "num_examples": 8364, "dataset_name": "ar_res_reviews"}}, "download_checksums": {"https://raw.githubusercontent.com/hadyelsahar/large-arabic-sentiment-analysis-resouces/master/datasets/RES1.csv": {"num_bytes": 3503230, "checksum": "afdb587d41310302372ed154a91a7231f566c137cadeea9df166e7326c2c4b19"}}, "download_size": 3503230, "post_processing_size": null, "dataset_size": 3617097, "size_in_bytes": 7120327}}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "default": {
3
+ "description": "Dataset of 8364 restaurant reviews scrapped from qaym.com in Arabic for sentiment analysis\n",
4
+ "citation": "@InProceedings{10.1007/978-3-319-18117-2_2,\nauthor=\"ElSahar, Hady\nand El-Beltagy, Samhaa R.\",\neditor=\"Gelbukh, Alexander\",\ntitle=\"Building Large Arabic Multi-domain Resources for Sentiment Analysis\",\nbooktitle=\"Computational Linguistics and Intelligent Text Processing\",\nyear=\"2015\",\npublisher=\"Springer International Publishing\",\naddress=\"Cham\",\npages=\"23--34\",\nisbn=\"978-3-319-18117-2\"\n}\n",
5
+ "homepage": "https://github.com/hadyelsahar/large-arabic-sentiment-analysis-resouces",
6
+ "license": "",
7
+ "features": {
8
+ "polarity": {
9
+ "names": [
10
+ "negative",
11
+ "positive"
12
+ ],
13
+ "_type": "ClassLabel"
14
+ },
15
+ "text": {
16
+ "dtype": "string",
17
+ "_type": "Value"
18
+ },
19
+ "restaurant_id": {
20
+ "dtype": "string",
21
+ "_type": "Value"
22
+ },
23
+ "user_id": {
24
+ "dtype": "string",
25
+ "_type": "Value"
26
+ }
27
+ },
28
+ "builder_name": "parquet",
29
+ "dataset_name": "ar_res_reviews",
30
+ "config_name": "default",
31
+ "version": {
32
+ "version_str": "0.0.0",
33
+ "major": 0,
34
+ "minor": 0,
35
+ "patch": 0
36
+ },
37
+ "splits": {
38
+ "train": {
39
+ "name": "train",
40
+ "num_bytes": 3617085,
41
+ "num_examples": 8364,
42
+ "dataset_name": null
43
+ }
44
+ },
45
+ "download_size": 1887029,
46
+ "dataset_size": 3617085,
47
+ "size_in_bytes": 5504114
48
+ }
49
+ }