Yijun Xiao commited on
Commit
6f241aa
1 Parent(s): 892731e

Added initial script

Browse files
Files changed (1) hide show
  1. amazon.py +208 -0
amazon.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import os
3
+
4
+ import datasets
5
+
6
+ logger = datasets.logging.get_logger(__name__)
7
+
8
+ _DESCRIPTION = """
9
+ """
10
+
11
+ _URLS = {
12
+ "clothing": "https://drive.google.com/u/0/uc?id=1HP3EPX9Q8JffUUZz2czXD7qudzvitscq&export=download",
13
+ "electronics": "https://drive.google.com/u/0/uc?id=1W50FNd0707qK1CCktEF30nlDqsImLg3X&export=download",
14
+ "office": "https://drive.google.com/u/0/uc?id=1lsttnBIjFD4nQw9idZYQNUWKSzj5VibD&export=download",
15
+ }
16
+
17
+ _FIELDS = ["date", "rating", "reviewText", "summary"]
18
+ _RATINGS = ["1", "2", "3", "4", "5"]
19
+
20
+
21
+ class AmazonConfig(datasets.BuilderConfig):
22
+ def __init__(
23
+ self,
24
+ training_files,
25
+ testing_files,
26
+ url,
27
+ label_classes=_RATINGS,
28
+ **kwargs,
29
+ ):
30
+ super().__init__(version=datasets.Version("1.0.0", ""), **kwargs)
31
+ self.label_classes = label_classes
32
+ self.training_files = training_files
33
+ self.testing_files = testing_files
34
+ self.url = url
35
+
36
+
37
+ class Amazon(datasets.GeneratorBasedBuilder):
38
+ BUILDER_CONFIGS = [
39
+ AmazonConfig(
40
+ name="clothing_majorshift01",
41
+ description="",
42
+ url=_URLS["clothing"],
43
+ training_files=[
44
+ "201011.csv",
45
+ "201012.csv",
46
+ "201101.csv",
47
+ "201102.csv",
48
+ "201103.csv",
49
+ "201104.csv",
50
+ "201105.csv",
51
+ "201106.csv",
52
+ "201107.csv",
53
+ "201108.csv",
54
+ "201109.csv",
55
+ "201110.csv",
56
+ "201111.csv",
57
+ "201112.csv",
58
+ "201201.csv",
59
+ "201202.csv",
60
+ "201203.csv",
61
+ "201204.csv",
62
+ "201205.csv",
63
+ "201206.csv",
64
+ "201207.csv",
65
+ "201208.csv",
66
+ "201209.csv",
67
+ "201210.csv",
68
+ ],
69
+ testing_files=[
70
+ "201211.csv",
71
+ "201212.csv",
72
+ "201301.csv",
73
+ "201302.csv",
74
+ "201303.csv",
75
+ "201304.csv",
76
+ ],
77
+ ),
78
+ AmazonConfig(
79
+ name="clothing_majorshift02",
80
+ description="",
81
+ url=_URLS["clothing"],
82
+ training_files=[
83
+ "200808.csv",
84
+ "200809.csv",
85
+ "200810.csv",
86
+ "200811.csv",
87
+ "200812.csv",
88
+ "200901.csv",
89
+ "200902.csv",
90
+ "200903.csv",
91
+ "200904.csv",
92
+ "200905.csv",
93
+ "200906.csv",
94
+ "200907.csv",
95
+ "200908.csv",
96
+ "200909.csv",
97
+ "200910.csv",
98
+ "200911.csv",
99
+ "200912.csv",
100
+ "201001.csv",
101
+ "201002.csv",
102
+ "201003.csv",
103
+ "201004.csv",
104
+ "201005.csv",
105
+ "201006.csv",
106
+ "201007.csv",
107
+ ],
108
+ testing_files=[
109
+ "201008.csv",
110
+ "201009.csv",
111
+ "201010.csv",
112
+ "201011.csv",
113
+ "201012.csv",
114
+ "201101.csv",
115
+ ],
116
+ ),
117
+ AmazonConfig(
118
+ name="clothing_majorshift03",
119
+ description="",
120
+ url=_URLS["clothing"],
121
+ training_files=[
122
+ "201602.csv",
123
+ "201603.csv",
124
+ "201604.csv",
125
+ "201605.csv",
126
+ "201606.csv",
127
+ "201607.csv",
128
+ "201608.csv",
129
+ "201609.csv",
130
+ "201610.csv",
131
+ "201611.csv",
132
+ "201612.csv",
133
+ "201701.csv",
134
+ "201702.csv",
135
+ "201703.csv",
136
+ "201704.csv",
137
+ "201705.csv",
138
+ "201706.csv",
139
+ "201707.csv",
140
+ "201708.csv",
141
+ "201709.csv",
142
+ "201710.csv",
143
+ "201711.csv",
144
+ "201712.csv",
145
+ "201801.csv",
146
+ ],
147
+ testing_files=[
148
+ "201802.csv",
149
+ "201803.csv",
150
+ "201804.csv",
151
+ "201805.csv",
152
+ "201806.csv",
153
+ "201807.csv",
154
+ ],
155
+ ),
156
+ ]
157
+
158
+ def _info(self):
159
+ features = {
160
+ "date": datasets.Value("string"),
161
+ "id": datasets.Value("int32"),
162
+ "label": datasets.features.ClassLabel(names=self.config.label_classes),
163
+ "text": datasets.Value("string"),
164
+ }
165
+ return datasets.DatasetInfo(
166
+ description=_DESCRIPTION,
167
+ features=datasets.Features(features),
168
+ )
169
+
170
+ def _split_generators(self, dl_manager):
171
+ dirname = dl_manager.download_and_extract(self.config.url)
172
+ logger.info(str(dirname))
173
+ category = self.config.name.split("_")[
174
+ 0
175
+ ] # extract category name from the config
176
+ train_filepaths = tuple(
177
+ os.path.join(dirname, category, fname)
178
+ for fname in self.config.training_files
179
+ )
180
+ test_filepaths = tuple(
181
+ os.path.join(dirname, category, fname)
182
+ for fname in self.config.testing_files
183
+ )
184
+ return [
185
+ datasets.SplitGenerator(
186
+ name=datasets.Split.TRAIN,
187
+ gen_kwargs={"filepaths": train_filepaths},
188
+ ),
189
+ datasets.SplitGenerator(
190
+ name=datasets.Split.TEST,
191
+ gen_kwargs={"filepaths": test_filepaths},
192
+ ),
193
+ ]
194
+
195
+ def _generate_examples(self, filepaths):
196
+ logger.info(f"generating examples from {len(filepaths)} files")
197
+ idx = 0
198
+ for filepath in filepaths:
199
+ with open(filepath, encoding="utf-8") as f:
200
+ reader = csv.DictReader(f, fieldnames=_FIELDS)
201
+ for row in reader:
202
+ yield idx, {
203
+ "date": row["date"],
204
+ "id": idx,
205
+ "label": row["rating"],
206
+ "text": row["reviewText"],
207
+ }
208
+ idx += 1