Hugo Abonizio commited on
Commit
fda4f25
1 Parent(s): 5664973

Initial commit

Browse files
Files changed (4) hide show
  1. .gitattributes +1 -0
  2. sst2_pt.py +65 -0
  3. train.csv +3 -0
  4. validation.csv +3 -0
.gitattributes CHANGED
@@ -52,3 +52,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
52
  *.jpg filter=lfs diff=lfs merge=lfs -text
53
  *.jpeg filter=lfs diff=lfs merge=lfs -text
54
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
52
  *.jpg filter=lfs diff=lfs merge=lfs -text
53
  *.jpeg filter=lfs diff=lfs merge=lfs -text
54
  *.webp filter=lfs diff=lfs merge=lfs -text
55
+ *.csv filter=lfs diff=lfs merge=lfs -text
sst2_pt.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """IMDB movie reviews dataset translated to Portuguese."""
2
+
3
+ import csv
4
+
5
+ import datasets
6
+ from datasets.tasks import TextClassification
7
+
8
+ _DESCRIPTION = """\
9
+ The Stanford Sentiment Treebank consists of sentences from movie reviews and
10
+ human annotations of their sentiment. The task is to predict the sentiment of a
11
+ given sentence. We use the two-way (positive/negative) class split, and use only
12
+ sentence-level labels.
13
+ """
14
+
15
+ _CITATION = """\
16
+ @inproceedings{socher2013recursive,
17
+ title={Recursive deep models for semantic compositionality over a sentiment treebank},
18
+ author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},
19
+ booktitle={Proceedings of the 2013 conference on empirical methods in natural language processing},
20
+ pages={1631--1642},
21
+ year={2013}
22
+ }
23
+ """
24
+
25
+ _HOMEPAGE = "https://nlp.stanford.edu/sentiment/"
26
+
27
+ _DOWNLOAD_URL = "https://huggingface.co/datasets/maritaca-ai/sst2_pt/resolve/main"
28
+
29
+ class Imdb(datasets.GeneratorBasedBuilder):
30
+ """The Stanford Sentiment Treebank to Portuguese."""
31
+
32
+ def _info(self):
33
+ return datasets.DatasetInfo(
34
+ description=_DESCRIPTION,
35
+ features=datasets.Features(
36
+ {"text": datasets.Value("string"), "label": datasets.features.ClassLabel(names=["negativo", "positivo"])}
37
+ ),
38
+ supervised_keys=None,
39
+ homepage=_HOMEPAGE,
40
+ citation=_CITATION,
41
+ task_templates=[TextClassification(text_column="text", label_column="label")],
42
+ )
43
+
44
+ def _split_generators(self, dl_manager):
45
+ train_path = dl_manager.download_and_extract(f"{_DOWNLOAD_URL}/train.csv")
46
+ test_path = dl_manager.download_and_extract(f"{_DOWNLOAD_URL}/test.csv")
47
+ return [
48
+ datasets.SplitGenerator(
49
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path, "split": "train"}
50
+ ),
51
+ datasets.SplitGenerator(
52
+ name=datasets.Split.TEST, gen_kwargs={"filepath": test_path, "split": "test"}
53
+ ),
54
+ ]
55
+
56
+ def _generate_examples(self, filepath, split):
57
+ with open(filepath, encoding="utf-8") as csv_file:
58
+ csv_reader = csv.reader(
59
+ csv_file, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True
60
+ )
61
+ for row in csv_reader:
62
+ if id_ == 0:
63
+ continue
64
+ idx, text, label = row
65
+ yield idx, {"text": text, "label": label}
train.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71cced99a8182ce47e314b20eb91b843ca24b95fa0b96aa723a59f4778c7cbaf
3
+ size 4406612
validation.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30b859576d8f5e60283669869344c7ee1db64ccec80b28dd76abbd246c727b91
3
+ size 103379