Convert dataset to Parquet

#6
by greghinch - opened
README.md CHANGED
@@ -23,8 +23,16 @@ dataset_info:
23
  - name: test
24
  num_bytes: 72971
25
  num_examples: 498
26
- download_size: 81363704
27
  dataset_size: 224615661
 
 
 
 
 
 
 
 
28
  train-eval-index:
29
  - config: sentiment140
30
  task: text-classification
 
23
  - name: test
24
  num_bytes: 72971
25
  num_examples: 498
26
+ download_size: 123652057
27
  dataset_size: 224615661
28
+ configs:
29
+ - config_name: sentiment140
30
+ data_files:
31
+ - split: train
32
+ path: sentiment140/train-*
33
+ - split: test
34
+ path: sentiment140/test-*
35
+ default: true
36
  train-eval-index:
37
  - config: sentiment140
38
  task: text-classification
sentiment140.py DELETED
@@ -1,115 +0,0 @@
1
- import csv
2
- import os
3
-
4
- import datasets
5
-
6
-
7
- _CITATION = """\
8
- @article{go2009twitter,
9
- title={Twitter sentiment classification using distant supervision},
10
- author={Go, Alec and Bhayani, Richa and Huang, Lei},
11
- journal={CS224N project report, Stanford},
12
- volume={1},
13
- number={12},
14
- pages={2009},
15
- year={2009}
16
- }
17
- """
18
-
19
- _DESCRIPTION = """\
20
- Sentiment140 consists of Twitter messages with emoticons, which are used as noisy labels for
21
- sentiment classification. For more detailed information please refer to the paper.
22
- """
23
- _URL = "http://help.sentiment140.com/home"
24
- _DATA_URL = "https://cs.stanford.edu/people/alecmgo/trainingandtestdata.zip"
25
-
26
- _TEST_FILE_NAME = "testdata.manual.2009.06.14.csv"
27
- _TRAIN_FILE_NAME = "training.1600000.processed.noemoticon.csv"
28
-
29
-
30
- class Sentiment140Config(datasets.BuilderConfig):
31
-
32
- """BuilderConfig for Break"""
33
-
34
- def __init__(self, data_url, **kwargs):
35
- """BuilderConfig for BlogAuthorship
36
-
37
- Args:
38
- data_url: `string`, url to the dataset (word or raw level)
39
- **kwargs: keyword arguments forwarded to super.
40
- """
41
- super(Sentiment140Config, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
42
- self.data_url = data_url
43
-
44
-
45
- class Sentiment140(datasets.GeneratorBasedBuilder):
46
-
47
- VERSION = datasets.Version("0.1.0")
48
- BUILDER_CONFIGS = [
49
- Sentiment140Config(
50
- name="sentiment140",
51
- data_url=_DATA_URL,
52
- description="sentiment classification dataset. Twitter messages are classified as either 'positive'=0, 'neutral'=1 or 'negative'=2.",
53
- )
54
- ]
55
-
56
- def _info(self):
57
- return datasets.DatasetInfo(
58
- # This is the description that will appear on the datasets page.
59
- description=_DESCRIPTION,
60
- # datasets.features.FeatureConnectors
61
- features=datasets.Features(
62
- {
63
- "text": datasets.Value("string"),
64
- "date": datasets.Value("string"),
65
- "user": datasets.Value("string"),
66
- "sentiment": datasets.Value("int32"),
67
- "query": datasets.Value("string"),
68
- }
69
- ),
70
- # If there's a common (input, target) tuple from the features,
71
- # specify them here. They'll be used if as_supervised=True in
72
- # builder.as_dataset.
73
- supervised_keys=None,
74
- # Homepage of the dataset for documentation
75
- homepage=_URL,
76
- citation=_CITATION,
77
- )
78
-
79
- def _split_generators(self, dl_manager):
80
- """Returns SplitGenerators."""
81
- data_dir = dl_manager.download_and_extract(_DATA_URL)
82
-
83
- test_csv_file = os.path.join(data_dir, _TEST_FILE_NAME)
84
- train_csv_file = os.path.join(data_dir, _TRAIN_FILE_NAME)
85
-
86
- if self.config.name == "sentiment140":
87
- return [
88
- datasets.SplitGenerator(
89
- name=datasets.Split.TRAIN,
90
- # These kwargs will be passed to _generate_examples
91
- gen_kwargs={"file_path": train_csv_file},
92
- ),
93
- datasets.SplitGenerator(
94
- name=datasets.Split.TEST,
95
- # These kwargs will be passed to _generate_examples
96
- gen_kwargs={"file_path": test_csv_file},
97
- ),
98
- ]
99
- else:
100
- raise NotImplementedError(f"{self.config.name} does not exist")
101
-
102
- def _generate_examples(self, file_path):
103
- """Yields examples."""
104
-
105
- with open(file_path, encoding="ISO-8859-1") as f:
106
- data = csv.reader(f, delimiter=",", quotechar='"')
107
- for row_id, row in enumerate(data):
108
- sentiment, tweet_id, date, query, user_name, message = row
109
- yield f"{row_id}_{tweet_id}", {
110
- "text": message,
111
- "date": date,
112
- "user": user_name,
113
- "sentiment": int(sentiment),
114
- "query": query,
115
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
sentiment140/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e0b33d3a0459e33b71adb3dff4daa2c0b4d9f873eaff415d63d73139a352569
3
+ size 46124
sentiment140/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2af69cd9e640bc5cf04d4fc5c504832df388fa0b00fff2f790d74593e0f2eea6
3
+ size 123605933