parquet-converter commited on
Commit
093723c
•
1 Parent(s): 5686df4

Update parquet files

Browse files
imdb_pt.py DELETED
@@ -1,86 +0,0 @@
1
- """IMDB movie reviews dataset translated to Portuguese."""
2
-
3
- import csv
4
-
5
- import datasets
6
- from datasets.tasks import TextClassification
7
-
8
- _DESCRIPTION = """\
9
- Large Movie Review Dataset.
10
- This is a dataset for binary sentiment classification containing substantially \
11
- more data than previous benchmark datasets. We provide a set of 25,000 highly \
12
- polar movie reviews for training, and 25,000 for testing. There is additional \
13
- unlabeled data for use as well.\
14
- """
15
-
16
- _CITATION = """\
17
- @InProceedings{maas-EtAl:2011:ACL-HLT2011,
18
- author = {Maas, Andrew L. and Daly, Raymond E. and Pham, Peter T. and Huang, Dan and Ng, Andrew Y. and Potts, Christopher},
19
- title = {Learning Word Vectors for Sentiment Analysis},
20
- booktitle = {Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies},
21
- month = {June},
22
- year = {2011},
23
- address = {Portland, Oregon, USA},
24
- publisher = {Association for Computational Linguistics},
25
- pages = {142--150},
26
- url = {http://www.aclweb.org/anthology/P11-1015}
27
- }
28
- """
29
-
30
- _DOWNLOAD_URL = "https://huggingface.co/datasets/maritaca-ai/imdb_pt/resolve/main"
31
-
32
- class IMDBReviewsConfig(datasets.BuilderConfig):
33
- """BuilderConfig for IMDBReviews."""
34
-
35
- def __init__(self, **kwargs):
36
- """BuilderConfig for IMDBReviews.
37
- Args:
38
- **kwargs: keyword arguments forwarded to super.
39
- """
40
- super().__init__(version=datasets.Version("1.0.0", ""), **kwargs)
41
-
42
- class Imdb(datasets.GeneratorBasedBuilder):
43
- """IMDB movie reviews dataset translated to Portuguese."""
44
-
45
- BUILDER_CONFIGS = [
46
- IMDBReviewsConfig(
47
- name="plain_text",
48
- description="Plain text",
49
- )
50
- ]
51
-
52
- def _info(self):
53
- return datasets.DatasetInfo(
54
- description=_DESCRIPTION,
55
- features=datasets.Features(
56
- {"text": datasets.Value("string"), "label": datasets.features.ClassLabel(names=["negativo", "posisivo"])}
57
- ),
58
- supervised_keys=None,
59
- homepage="http://ai.stanford.edu/~amaas/data/sentiment/",
60
- citation=_CITATION,
61
- task_templates=[TextClassification(text_column="text", label_column="label")],
62
- )
63
-
64
- def _split_generators(self, dl_manager):
65
- train_path = dl_manager.download_and_extract(f"{_DOWNLOAD_URL}/train.csv")
66
- test_path = dl_manager.download_and_extract(f"{_DOWNLOAD_URL}/test.csv")
67
- return [
68
- datasets.SplitGenerator(
69
- name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path, "split": "train"}
70
- ),
71
- datasets.SplitGenerator(
72
- name=datasets.Split.TEST, gen_kwargs={"filepath": test_path, "split": "test"}
73
- ),
74
- ]
75
-
76
- def _generate_examples(self, filepath, split):
77
- """Generate aclImdb examples."""
78
- with open(filepath, encoding="utf-8") as csv_file:
79
- csv_reader = csv.reader(
80
- csv_file, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True
81
- )
82
- for id_, row in enumerate(csv_reader):
83
- if id_ == 0:
84
- continue
85
- text, label = row
86
- yield id_, {"text": text, "label": label}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
test.csv → plain_text/imdb_pt-test.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:67a549c2f0d5f9f06db7c59c43728f224a707ddb2e93698eb6bb99c23a81bb7f
3
- size 32799465
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d4cd262e652734b1ab93e063051d1dca65cb42d0437c7ec7f06939c726d5f27
3
+ size 20279716
train.csv → plain_text/imdb_pt-train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2fcdc5f90812858d4bd9ce8d3107583c60fcdef6ab071f649058c016e2c2966d
3
- size 33342600
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa94b8de669ec7c9e8924a99b41cdff454e075b77746d0ac6ecdcd00d65aefb9
3
+ size 20607461