Datasets:

Languages:
French
Multilinguality:
monolingual
Size Categories:
100K<n<1M
Language Creators:
found
Annotations Creators:
no-annotation
Source Datasets:
original
Tags:
License:
albertvillanova HF staff commited on
Commit
a4654f4
1 Parent(s): c090a3d

Convert dataset to Parquet (#3)

Browse files

- Convert dataset to Parquet (489fda293fc3ce54d5c72cabc0ede3d73e707968)
- Delete loading script (56ded245e6d2fa47a30cca2f218f2088cdaca64b)
- Delete legacy dataset_infos.json (d6237325328b5b8c387793a5cd6ec8c29a6dec61)

README.md CHANGED
@@ -20,6 +20,7 @@ task_ids:
20
  paperswithcode_id: allocine
21
  pretty_name: Allociné
22
  dataset_info:
 
23
  features:
24
  - name: review
25
  dtype: string
@@ -29,19 +30,28 @@ dataset_info:
29
  names:
30
  '0': neg
31
  '1': pos
32
- config_name: allocine
33
  splits:
34
  - name: train
35
- num_bytes: 91330696
36
  num_examples: 160000
37
  - name: validation
38
- num_bytes: 11546250
39
  num_examples: 20000
40
  - name: test
41
- num_bytes: 11547697
42
  num_examples: 20000
43
- download_size: 66625305
44
- dataset_size: 114424643
 
 
 
 
 
 
 
 
 
 
45
  train-eval-index:
46
  - config: allocine
47
  task: text-classification
20
  paperswithcode_id: allocine
21
  pretty_name: Allociné
22
  dataset_info:
23
+ config_name: allocine
24
  features:
25
  - name: review
26
  dtype: string
30
  names:
31
  '0': neg
32
  '1': pos
 
33
  splits:
34
  - name: train
35
+ num_bytes: 91330632
36
  num_examples: 160000
37
  - name: validation
38
+ num_bytes: 11546242
39
  num_examples: 20000
40
  - name: test
41
+ num_bytes: 11547689
42
  num_examples: 20000
43
+ download_size: 75125954
44
+ dataset_size: 114424563
45
+ configs:
46
+ - config_name: allocine
47
+ data_files:
48
+ - split: train
49
+ path: allocine/train-*
50
+ - split: validation
51
+ path: allocine/validation-*
52
+ - split: test
53
+ path: allocine/test-*
54
+ default: true
55
  train-eval-index:
56
  - config: allocine
57
  task: text-classification
allocine.py DELETED
@@ -1,106 +0,0 @@
1
- """Allocine Dataset: A Large-Scale French Movie Reviews Dataset."""
2
-
3
-
4
- import json
5
-
6
- import datasets
7
- from datasets.tasks import TextClassification
8
-
9
-
10
- _CITATION = """\
11
- @misc{blard2019allocine,
12
- author = {Blard, Theophile},
13
- title = {french-sentiment-analysis-with-bert},
14
- year = {2020},
15
- publisher = {GitHub},
16
- journal = {GitHub repository},
17
- howpublished={\\url{https://github.com/TheophileBlard/french-sentiment-analysis-with-bert}},
18
- }
19
- """
20
-
21
- _DESCRIPTION = """\
22
- Allocine Dataset: A Large-Scale French Movie Reviews Dataset.
23
- This is a dataset for binary sentiment classification, made of user reviews scraped from Allocine.fr.
24
- It contains 100k positive and 100k negative reviews divided into 3 balanced splits: train (160k reviews), val (20k) and test (20k).
25
- """
26
-
27
-
28
- class AllocineConfig(datasets.BuilderConfig):
29
- """BuilderConfig for Allocine."""
30
-
31
- def __init__(self, **kwargs):
32
- """BuilderConfig for Allocine.
33
-
34
- Args:
35
- **kwargs: keyword arguments forwarded to super.
36
- """
37
- super(AllocineConfig, self).__init__(**kwargs)
38
-
39
-
40
- class AllocineDataset(datasets.GeneratorBasedBuilder):
41
- """Allocine Dataset: A Large-Scale French Movie Reviews Dataset."""
42
-
43
- _DOWNLOAD_URL = "https://github.com/TheophileBlard/french-sentiment-analysis-with-bert/raw/master/allocine_dataset/data.tar.bz2"
44
- _TRAIN_FILE = "train.jsonl"
45
- _VAL_FILE = "val.jsonl"
46
- _TEST_FILE = "test.jsonl"
47
-
48
- BUILDER_CONFIGS = [
49
- AllocineConfig(
50
- name="allocine",
51
- version=datasets.Version("1.0.0"),
52
- description="Allocine Dataset: A Large-Scale French Movie Reviews Dataset",
53
- ),
54
- ]
55
-
56
- def _info(self):
57
- return datasets.DatasetInfo(
58
- description=_DESCRIPTION,
59
- features=datasets.Features(
60
- {
61
- "review": datasets.Value("string"),
62
- "label": datasets.features.ClassLabel(names=["neg", "pos"]),
63
- }
64
- ),
65
- supervised_keys=None,
66
- homepage="https://github.com/TheophileBlard/french-sentiment-analysis-with-bert",
67
- citation=_CITATION,
68
- task_templates=[TextClassification(text_column="review", label_column="label")],
69
- )
70
-
71
- def _split_generators(self, dl_manager):
72
- archive_path = dl_manager.download(self._DOWNLOAD_URL)
73
- data_dir = "data"
74
- return [
75
- datasets.SplitGenerator(
76
- name=datasets.Split.TRAIN,
77
- gen_kwargs={
78
- "filepath": f"{data_dir}/{self._TRAIN_FILE}",
79
- "files": dl_manager.iter_archive(archive_path),
80
- },
81
- ),
82
- datasets.SplitGenerator(
83
- name=datasets.Split.VALIDATION,
84
- gen_kwargs={
85
- "filepath": f"{data_dir}/{self._VAL_FILE}",
86
- "files": dl_manager.iter_archive(archive_path),
87
- },
88
- ),
89
- datasets.SplitGenerator(
90
- name=datasets.Split.TEST,
91
- gen_kwargs={
92
- "filepath": f"{data_dir}/{self._TEST_FILE}",
93
- "files": dl_manager.iter_archive(archive_path),
94
- },
95
- ),
96
- ]
97
-
98
- def _generate_examples(self, filepath, files):
99
- """Generate Allocine examples."""
100
- for path, file in files:
101
- if path == filepath:
102
- for id_, row in enumerate(file):
103
- data = json.loads(row.decode("utf-8"))
104
- review = data["review"]
105
- label = "neg" if data["polarity"] == 0 else "pos"
106
- yield id_, {"review": review, "label": label}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
allocine/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e313c31e2db65eae40072f525eb0bc3567817baad70a85f798836b1e5be5a88
3
+ size 7580549
allocine/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5cdabde7b62d2d56a2bc24e790cb9697057645103b607c281d82092bc5d53307
3
+ size 59970147
allocine/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2a25489c7f923475a11756071acc20df1a967b58042ca853698800164e731aa
3
+ size 7575258
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"allocine": {"description": " Allocine Dataset: A Large-Scale French Movie Reviews Dataset.\n This is a dataset for binary sentiment classification, made of user reviews scraped from Allocine.fr.\n It contains 100k positive and 100k negative reviews divided into 3 balanced splits: train (160k reviews), val (20k) and test (20k).\n", "citation": "@misc{blard2019allocine,\n author = {Blard, Theophile},\n title = {french-sentiment-analysis-with-bert},\n year = {2020},\n publisher = {GitHub},\n journal = {GitHub repository},\n howpublished={\\url{https://github.com/TheophileBlard/french-sentiment-analysis-with-bert}},\n}\n", "homepage": "https://github.com/TheophileBlard/french-sentiment-analysis-with-bert", "license": "", "features": {"review": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["neg", "pos"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": [{"task": "text-classification", "text_column": "review", "label_column": "label", "labels": ["neg", "pos"]}], "builder_name": "allocine_dataset", "config_name": "allocine", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 91330696, "num_examples": 160000, "dataset_name": "allocine_dataset"}, "validation": {"name": "validation", "num_bytes": 11546250, "num_examples": 20000, "dataset_name": "allocine_dataset"}, "test": {"name": "test", "num_bytes": 11547697, "num_examples": 20000, "dataset_name": "allocine_dataset"}}, "download_checksums": {"https://github.com/TheophileBlard/french-sentiment-analysis-with-bert/raw/master/allocine_dataset/data.tar.bz2": {"num_bytes": 66625305, "checksum": "8c49a8cac783da201697ed1a91b36d2f6618222b3b7ea1c2996f2a3fbc37dfb4"}}, "download_size": 66625305, "post_processing_size": null, "dataset_size": 114424643, "size_in_bytes": 181049948}}