frankier commited on
Commit
627a1c0
1 Parent(s): 6993016

Add dataset script

Browse files
Files changed (2) hide show
  1. README.md +33 -1
  2. multiscale_rt_critics.py +360 -0
README.md CHANGED
@@ -1,3 +1,35 @@
1
  ---
2
- license: unknown
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ language:
3
+ - en
4
+ language_creators:
5
+ - found
6
+ license: cc0-1.0
7
+ multilinguality:
8
+ - monolingual
9
+ size_categories:
10
+ - 100K<n<1M
11
+ tags:
12
+ - reviews
13
+ - ratings
14
+ - ordinal
15
+ - text
16
+ task_categories:
17
+ - text-classification
18
+ task_ids:
19
+ - text-scoring
20
+ - sentiment-scoring
21
  ---
22
+
23
+ Cleaned up version of the rotten tomatoes critic reviews dataset. The original
24
+ is obtained from Kaggle:
25
+ https://www.kaggle.com/datasets/stefanoleone992/rotten-tomatoes-movies-and-critic-reviews-dataset
26
+
27
+ Data has been scraped from the publicly available website
28
+ https://www.rottentomatoes.com as of 2020-10-31.
29
+
30
+ The clean up process drops anything without both a review and a rating, as well
31
+ as standardising the ratings onto several integer, ordinal scales.
32
+
33
+ Requires the `kaggle` library to be installed, and kaggle API keys passed
34
+ through environment variables or in ~/.kaggle/kaggle.json. See [the Kaggle
35
+ docs](https://www.kaggle.com/docs/api#authentication).
multiscale_rt_critics.py ADDED
@@ -0,0 +1,360 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 Frankie Robertson and The HuggingFace Datasets Authors
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ Cleaned up version of the rotten tomatoes critic reviews dataset. The original
17
+ is obtained from Kaggle:
18
+ https://www.kaggle.com/datasets/stefanoleone992/rotten-tomatoes-movies-and-critic-reviews-dataset
19
+
20
+ Data has been scraped from the publicly available website
21
+ https://www.rottentomatoes.com as of 2020-10-31.
22
+
23
+ The clean up process drops anything without both a review and a rating, as well
24
+ as standardising the ratings onto several integer, ordinal scales.
25
+ """
26
+
27
+ import datasets
28
+ from dataclasses import dataclass
29
+ from fractions import Fraction
30
+ from typing import Callable, Any
31
+ import operator
32
+ import shutil
33
+ import os
34
+ import sys
35
+ import pandas
36
+ import numpy
37
+ from os.path import join as pjoin
38
+ from datasets import Dataset
39
+
40
+
41
+ KAGGLE_REPO = "stefanoleone992/rotten-tomatoes-movies-and-critic-reviews-dataset"
42
+ SHORT_LETTER_SCALE = ["F", "E", "D", "C", "B", "A"]
43
+ LONG_LETTER_SCALE = ["F-", "F", "F+" "E-", "E", "E+", "D-", "D", "D+", "C-", "C", "C+", "B-", "B", "B+", "A-", "A", "A+"]
44
+
45
+
46
+ _kaggle_api = None
47
+
48
+
49
+ def get_kaggle_api():
50
+ global _kaggle_api
51
+ if _kaggle_api is not None:
52
+ return _kaggle_api
53
+ from kaggle.api.kaggle_api_extended import KaggleApi
54
+ _kaggle_api = KaggleApi()
55
+ _kaggle_api.authenticate()
56
+ return _kaggle_api
57
+
58
+
59
+ @dataclass
60
+ class KaggleSrc:
61
+ name: str
62
+ file: str
63
+
64
+ def load(self):
65
+ if hasattr(self, "_cached"):
66
+ return self._cached
67
+ kaggle_api = get_kaggle_api()
68
+ dir_name = self.name.replace("/", "__")
69
+ if os.path.exists(dir_name):
70
+ shutil.rmtree(dir_name)
71
+ os.mkdir(dir_name)
72
+ try:
73
+ file_path = pjoin(dir_name, self.file.rsplit("/", 1)[-1])
74
+ kaggle_api.dataset_download_file(self.name, self.file, path=dir_name)
75
+ return pandas.read_csv(file_path + ".zip")
76
+ finally:
77
+ shutil.rmtree(dir_name)
78
+
79
+
80
+ def is_floatable(f):
81
+ try:
82
+ float(f)
83
+ return True
84
+ except ValueError:
85
+ return False
86
+
87
+
88
+ def is_frac_str(s):
89
+ bits = s.split("/")
90
+ return len(bits) == 2 and is_floatable(bits[0]) and is_floatable(bits[1])
91
+
92
+
93
+ def is_barenum_str(s):
94
+ return s.count("/") == 0 and is_floatable(s)
95
+
96
+
97
+ def is_dec_denom(s):
98
+ bits = s.split("/")
99
+ return len(bits) == 2 and "." in bits[1]
100
+
101
+
102
+ def drop_because(df, pred, reason):
103
+ print(f"Dropping {pred.sum()} ({pred.mean() * 100:.2f}%) of reviews with {reason}")
104
+ return df[~pred]
105
+
106
+
107
+ def drop_unrated(df):
108
+ df = drop_because(df, df["review_score"].isna(), "no rating")
109
+ df = drop_because(df, df["review_content"].isna(), "missing review")
110
+ return df
111
+
112
+
113
+ def drop_odd_grade_types(df):
114
+ is_any_letter = df["review_score"].isin(LONG_LETTER_SCALE)
115
+ is_frac = df["review_score"].map(is_frac_str)
116
+ is_barenum = df["review_score"].map(is_barenum_str)
117
+ assert len(df[~is_frac & ~is_any_letter & ~is_barenum]) == 0
118
+ df = drop_because(df, is_barenum, "bare number rating (i.e. no denominator)")
119
+ is_frac_denom = df["review_score"].map(is_dec_denom)
120
+ return drop_because(df, is_frac_denom, "fractional denominator")
121
+
122
+
123
+ def split_scores(df):
124
+ nums = numpy.empty(len(df))
125
+ denoms = numpy.empty(len(df))
126
+ for idx, score in enumerate(df["review_score"]):
127
+ if "/" in score:
128
+ num, denom = score.split("/", 1)
129
+ nums[idx] = float(num)
130
+ denoms[idx] = float(denom)
131
+ else:
132
+ nums[idx] = nan
133
+ denoms[idx] = nan
134
+ df.insert(len(df.columns), "orig_num", nums)
135
+ df.insert(len(df.columns), "orig_denom", denoms)
136
+
137
+
138
+ nan = float("nan")
139
+
140
+
141
+ def np_round(arr):
142
+ return (arr + 0.5).astype(numpy.int32)
143
+
144
+
145
+ def common_denom_grades(group_df):
146
+ if group_df.iloc[0]["is_any_letter"]:
147
+ group_df["multiplier"] = 1
148
+ group_df["non_neg_error"] = False
149
+ if group_df.iloc[0]["letter_implies_short"]:
150
+ group_df["num"] = SHORT_LETTER_SCALE.index(group_df.iloc[0]["review_score"]) + 1
151
+ group_df["denom"] = len(SHORT_LETTER_SCALE)
152
+ else:
153
+ group_df["num"] = LONG_LETTER_SCALE.index(group_df.iloc[0]["review_score"]) + 1
154
+ group_df["denom"] = len(LONG_LETTER_SCALE)
155
+ return group_df
156
+ denoms = numpy.empty(len(group_df), dtype=numpy.int32)
157
+ for idx, num in enumerate(group_df["orig_num"]):
158
+ frac = Fraction.from_float(num)
159
+ denoms[idx] = frac.limit_denominator(100).denominator
160
+ common_denom = numpy.lcm.reduce(denoms)
161
+ group_df["multiplier"] = common_denom
162
+ num = common_denom * group_df["orig_num"].to_numpy()
163
+ denom = common_denom * group_df["orig_denom"].to_numpy()
164
+ group_df["num"] = np_round(num)
165
+ group_df["denom"] = np_round(denom)
166
+ group_df["non_neg_error"] = (abs(group_df["num"] - num) >= 0.05) | (abs(group_df["denom"] - denom) >= 0.05)
167
+ return group_df
168
+
169
+
170
+ def normalize_reviews(review_df):
171
+ # Drop unrated
172
+ review_df = drop_unrated(review_df)
173
+
174
+ # Strip whitespace from grades
175
+ review_df["review_score"] = review_df["review_score"].str.replace("\s+", "", regex=True)
176
+
177
+ # Copy to get version to do calculations with
178
+ working_review_df = review_df.copy()
179
+
180
+ # Drop all rows where the review score occurs 2 or less times in the whole data set
181
+ working_review_df = working_review_df.groupby("review_score").filter(lambda x: len(x) > 2)
182
+
183
+ # Check/ensure that all grades are short letter, long letter, fraction or barenum
184
+ working_review_df = drop_odd_grade_types(working_review_df)
185
+
186
+ # Split fraction scores into numerator and denominator
187
+ split_scores(working_review_df)
188
+
189
+ # Divide letter scales into short and long
190
+ # If a publisher has a mix of short and long, they're using long, otherwise short
191
+ is_any_letter = working_review_df["review_score"].isin(LONG_LETTER_SCALE)
192
+ is_short_letter = working_review_df["review_score"].isin(SHORT_LETTER_SCALE)
193
+ #is_long_letter = is_any_letter & ~is_short_letter
194
+ publisher_letter_implies_short = pandas.DataFrame.from_dict(dict(
195
+ publisher_name=working_review_df["publisher_name"],
196
+ letter_implies_short=is_short_letter | ~is_any_letter
197
+ )).groupby("publisher_name").all()
198
+ working_review_df = working_review_df.join(publisher_letter_implies_short, on="publisher_name")
199
+ working_review_df["is_any_letter"] = is_any_letter
200
+
201
+ # Now divide everything into grade types: either short letter, long letter
202
+ # or the denominator of the fraction
203
+ def get_orig_grade_type(row):
204
+ if row["is_any_letter"]:
205
+ if row["letter_implies_short"]:
206
+ return "short_letter"
207
+ else:
208
+ return "long_letter"
209
+ else:
210
+ return str(int(row["orig_denom"]))
211
+
212
+ working_review_df["orig_grade_type"] = working_review_df.apply(get_orig_grade_type, axis="columns")
213
+
214
+ # Now we can filter out rare grade types
215
+ working_review_df = working_review_df.join(working_review_df["orig_grade_type"].value_counts().rename("grade_type_count"), on="orig_grade_type")
216
+ working_review_df = drop_because(working_review_df, working_review_df["grade_type_count"] < 50, "grade type with less than 50 reviews")
217
+
218
+ # Print out some summary stats
219
+ print("grades type counts", working_review_df["orig_grade_type"].value_counts())
220
+ print("unique grades", working_review_df["orig_grade_type"].nunique())
221
+ print("unique publishers", working_review_df["publisher_name"].nunique())
222
+ print("unique grade/publisher combinations", working_review_df.groupby(["orig_grade_type", "publisher_name"]).ngroups)
223
+
224
+ # Now we can find common denominators on a (publisher, grade type) combination basis
225
+ working_review_df = working_review_df.groupby(["publisher_name", "orig_grade_type"], group_keys=False).apply(common_denom_grades)
226
+ working_review_df = drop_because(working_review_df, working_review_df["multiplier"] > 500, "multiplier > 500")
227
+ assert working_review_df["non_neg_error"].sum() == 0
228
+
229
+ # More summary stats
230
+ print("non-neg error count", working_review_df["non_neg_error"].sum())
231
+ print("multipliers", working_review_df["multiplier"].value_counts())
232
+
233
+ # TODO: Add back in rare review_scores dropped at the beginning when they
234
+ # are compatible with some common denominator + grade type from the same
235
+ # publisher
236
+
237
+ print("number of reviews left", len(working_review_df))
238
+ print("reviews per publisher", working_review_df.groupby(["publisher_name", "orig_grade_type"].value_counts()))
239
+
240
+ # Delete working columns
241
+ del working_review_df["letter_implies_short"]
242
+ del working_review_df["is_any_letter"]
243
+ del working_review_df["grade_type_count"]
244
+ del working_review_df["non_neg_error"]
245
+
246
+ return working_review_df
247
+
248
+
249
+ def save_normalised(output_path):
250
+ review_df = KaggleSrc(KAGGLE_REPO, "rotten_tomatoes_critic_reviews.csv").load()
251
+ review_df = normalize_reviews(review_df)
252
+ review_df.to_csv(output_path)
253
+
254
+
255
+ def get_datasets():
256
+ movies_df = KaggleSrc(KAGGLE_REPO, "rotten_tomatoes_movies.csv").load()
257
+ review_df = KaggleSrc(KAGGLE_REPO, "rotten_tomatoes_critic_reviews.csv").load()
258
+ review_df = normalize_reviews(review_df)
259
+ joined_df = review_df.join(movies_df.set_index("rotten_tomatoes_link"), "rotten_tomatoes_link")
260
+ train_dfs = []
261
+ test_dfs = []
262
+ for group_df in review_df.groupby("publisher_name", "orig_grade_type"):
263
+ if len(group_df) >= 50:
264
+ train_df, test_df = train_test_split(group_df, test_size=0.2)
265
+ train_dfs.append(train_df)
266
+ test_dfs.append(test_df)
267
+ else:
268
+ train_dfs.append(group_df)
269
+ return (
270
+ Dataset.from_pandas(joined_df),
271
+ Dataset.from_pandas(pandas.concat(train_dfs)),
272
+ Dataset.from_pandas(pandas.concat(test_dfs)),
273
+ )
274
+
275
+
276
+ _DESCRIPTION = __doc__
277
+
278
+ _HOMEPAGE = ""
279
+
280
+ _LICENSE = "CC0"
281
+
282
+
283
+ class MultiscaleRTCritics(datasets.GeneratorBasedBuilder):
284
+ _DESCRIPTION
285
+
286
+ VERSION = datasets.Version("1.0.0")
287
+
288
+ def _info(self):
289
+ features = datasets.Features(
290
+ {
291
+ #"rotten_tomatoes_link": datasets.Value("string"),
292
+ #"rating": datasets.Value("uint8"),
293
+ #"out_of": datasets.Value("uint8"),
294
+ #"dataset": datasets.Value("string")
295
+ "movie_title"
296
+ "movie_info"
297
+ "critics_consensus"
298
+ "content_rating"
299
+ "genres"
300
+ "directors"
301
+ "authors"
302
+ "actors"
303
+ "original_release_date"
304
+ "critic_name"
305
+ "top_critic"
306
+ "publisher_name"
307
+ "review_type"
308
+ "review_score"
309
+ "review_date"
310
+ "review_content"
311
+ }
312
+ )
313
+ return datasets.DatasetInfo(
314
+ # This is the description that will appear on the datasets page.
315
+ description=_DESCRIPTION,
316
+ # This defines the different columns of the dataset and their types
317
+ features=features, # Here we define them above because they are different between the two configurations
318
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
319
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
320
+ supervised_keys=("text", "rating"),
321
+ # Homepage of the dataset for documentation
322
+ homepage=_HOMEPAGE,
323
+ # License for the dataset if available
324
+ license=_LICENSE,
325
+ # Citation for the dataset
326
+ citation="",
327
+ )
328
+
329
+ def _split_generators(self, dl_manager):
330
+ return [
331
+ datasets.SplitGenerator(
332
+ name=datasets.Split.TRAIN,
333
+ gen_kwargs={
334
+ "split": "train",
335
+ },
336
+ ),
337
+ datasets.SplitGenerator(
338
+ name=datasets.Split.TEST,
339
+ gen_kwargs={
340
+ "split": "test"
341
+ },
342
+ ),
343
+ datasets.SplitGenerator(
344
+ name=datasets.Split.ALL,
345
+ gen_kwargs={
346
+ "split": "all",
347
+ },
348
+ ),
349
+ ]
350
+
351
+ def _generate_examples(self, split):
352
+ if not hasattr(self, "_datasets"):
353
+ self._datasets = get_datasets()
354
+ all_dataset, train_dataset, test_dataset = self._datasets
355
+ if split == "train":
356
+ yield from train_dataset
357
+ elif split == "test":
358
+ yield from test_dataset
359
+ else:
360
+ yield from all_dataset