albertvillanova HF staff commited on
Commit
489fda2
1 Parent(s): c090a3d

Convert dataset to Parquet

Browse files

Convert dataset to Parquet.

README.md CHANGED
@@ -20,6 +20,7 @@ task_ids:
20
  paperswithcode_id: allocine
21
  pretty_name: Allociné
22
  dataset_info:
 
23
  features:
24
  - name: review
25
  dtype: string
@@ -29,19 +30,28 @@ dataset_info:
29
  names:
30
  '0': neg
31
  '1': pos
32
- config_name: allocine
33
  splits:
34
  - name: train
35
- num_bytes: 91330696
36
  num_examples: 160000
37
  - name: validation
38
- num_bytes: 11546250
39
  num_examples: 20000
40
  - name: test
41
- num_bytes: 11547697
42
  num_examples: 20000
43
- download_size: 66625305
44
- dataset_size: 114424643
 
 
 
 
 
 
 
 
 
 
45
  train-eval-index:
46
  - config: allocine
47
  task: text-classification
 
20
  paperswithcode_id: allocine
21
  pretty_name: Allociné
22
  dataset_info:
23
+ config_name: allocine
24
  features:
25
  - name: review
26
  dtype: string
 
30
  names:
31
  '0': neg
32
  '1': pos
 
33
  splits:
34
  - name: train
35
+ num_bytes: 91330632
36
  num_examples: 160000
37
  - name: validation
38
+ num_bytes: 11546242
39
  num_examples: 20000
40
  - name: test
41
+ num_bytes: 11547689
42
  num_examples: 20000
43
+ download_size: 75125954
44
+ dataset_size: 114424563
45
+ configs:
46
+ - config_name: allocine
47
+ data_files:
48
+ - split: train
49
+ path: allocine/train-*
50
+ - split: validation
51
+ path: allocine/validation-*
52
+ - split: test
53
+ path: allocine/test-*
54
+ default: true
55
  train-eval-index:
56
  - config: allocine
57
  task: text-classification
allocine/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e313c31e2db65eae40072f525eb0bc3567817baad70a85f798836b1e5be5a88
3
+ size 7580549
allocine/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5cdabde7b62d2d56a2bc24e790cb9697057645103b607c281d82092bc5d53307
3
+ size 59970147
allocine/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2a25489c7f923475a11756071acc20df1a967b58042ca853698800164e731aa
3
+ size 7575258
dataset_infos.json CHANGED
@@ -1 +1,60 @@
1
- {"allocine": {"description": " Allocine Dataset: A Large-Scale French Movie Reviews Dataset.\n This is a dataset for binary sentiment classification, made of user reviews scraped from Allocine.fr.\n It contains 100k positive and 100k negative reviews divided into 3 balanced splits: train (160k reviews), val (20k) and test (20k).\n", "citation": "@misc{blard2019allocine,\n author = {Blard, Theophile},\n title = {french-sentiment-analysis-with-bert},\n year = {2020},\n publisher = {GitHub},\n journal = {GitHub repository},\n howpublished={\\url{https://github.com/TheophileBlard/french-sentiment-analysis-with-bert}},\n}\n", "homepage": "https://github.com/TheophileBlard/french-sentiment-analysis-with-bert", "license": "", "features": {"review": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["neg", "pos"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": [{"task": "text-classification", "text_column": "review", "label_column": "label", "labels": ["neg", "pos"]}], "builder_name": "allocine_dataset", "config_name": "allocine", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 91330696, "num_examples": 160000, "dataset_name": "allocine_dataset"}, "validation": {"name": "validation", "num_bytes": 11546250, "num_examples": 20000, "dataset_name": "allocine_dataset"}, "test": {"name": "test", "num_bytes": 11547697, "num_examples": 20000, "dataset_name": "allocine_dataset"}}, "download_checksums": {"https://github.com/TheophileBlard/french-sentiment-analysis-with-bert/raw/master/allocine_dataset/data.tar.bz2": {"num_bytes": 66625305, "checksum": "8c49a8cac783da201697ed1a91b36d2f6618222b3b7ea1c2996f2a3fbc37dfb4"}}, "download_size": 66625305, "post_processing_size": null, "dataset_size": 114424643, "size_in_bytes": 181049948}}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "allocine": {
3
+ "description": " Allocine Dataset: A Large-Scale French Movie Reviews Dataset.\n This is a dataset for binary sentiment classification, made of user reviews scraped from Allocine.fr.\n It contains 100k positive and 100k negative reviews divided into 3 balanced splits: train (160k reviews), val (20k) and test (20k).\n",
4
+ "citation": "@misc{blard2019allocine,\n author = {Blard, Theophile},\n title = {french-sentiment-analysis-with-bert},\n year = {2020},\n publisher = {GitHub},\n journal = {GitHub repository},\n howpublished={\\url{https://github.com/TheophileBlard/french-sentiment-analysis-with-bert}},\n}\n",
5
+ "homepage": "https://github.com/TheophileBlard/french-sentiment-analysis-with-bert",
6
+ "license": "",
7
+ "features": {
8
+ "review": {
9
+ "dtype": "string",
10
+ "_type": "Value"
11
+ },
12
+ "label": {
13
+ "names": [
14
+ "neg",
15
+ "pos"
16
+ ],
17
+ "_type": "ClassLabel"
18
+ }
19
+ },
20
+ "task_templates": [
21
+ {
22
+ "task": "text-classification",
23
+ "text_column": "review",
24
+ "label_column": "label"
25
+ }
26
+ ],
27
+ "builder_name": "allocine",
28
+ "dataset_name": "allocine",
29
+ "config_name": "allocine",
30
+ "version": {
31
+ "version_str": "1.0.0",
32
+ "major": 1,
33
+ "minor": 0,
34
+ "patch": 0
35
+ },
36
+ "splits": {
37
+ "train": {
38
+ "name": "train",
39
+ "num_bytes": 91330632,
40
+ "num_examples": 160000,
41
+ "dataset_name": null
42
+ },
43
+ "validation": {
44
+ "name": "validation",
45
+ "num_bytes": 11546242,
46
+ "num_examples": 20000,
47
+ "dataset_name": null
48
+ },
49
+ "test": {
50
+ "name": "test",
51
+ "num_bytes": 11547689,
52
+ "num_examples": 20000,
53
+ "dataset_name": null
54
+ }
55
+ },
56
+ "download_size": 75125954,
57
+ "dataset_size": 114424563,
58
+ "size_in_bytes": 189550517
59
+ }
60
+ }