frankier commited on
Commit
5c54202
1 Parent(s): 627a1c0

Add train/test splits to dataset, also:

Browse files

* Print info from normalizing process more neatly
* Keep only selected columns from dataset

Files changed (1) hide show
  1. multiscale_rt_critics.py +89 -62
multiscale_rt_critics.py CHANGED
@@ -36,6 +36,7 @@ import pandas
36
  import numpy
37
  from os.path import join as pjoin
38
  from datasets import Dataset
 
39
 
40
 
41
  KAGGLE_REPO = "stefanoleone992/rotten-tomatoes-movies-and-critic-reviews-dataset"
@@ -200,7 +201,7 @@ def normalize_reviews(review_df):
200
 
201
  # Now divide everything into grade types: either short letter, long letter
202
  # or the denominator of the fraction
203
- def get_orig_grade_type(row):
204
  if row["is_any_letter"]:
205
  if row["letter_implies_short"]:
206
  return "short_letter"
@@ -209,33 +210,36 @@ def normalize_reviews(review_df):
209
  else:
210
  return str(int(row["orig_denom"]))
211
 
212
- working_review_df["orig_grade_type"] = working_review_df.apply(get_orig_grade_type, axis="columns")
213
 
214
  # Now we can filter out rare grade types
215
- working_review_df = working_review_df.join(working_review_df["orig_grade_type"].value_counts().rename("grade_type_count"), on="orig_grade_type")
216
  working_review_df = drop_because(working_review_df, working_review_df["grade_type_count"] < 50, "grade type with less than 50 reviews")
217
 
218
  # Print out some summary stats
219
- print("grades type counts", working_review_df["orig_grade_type"].value_counts())
220
- print("unique grades", working_review_df["orig_grade_type"].nunique())
 
221
  print("unique publishers", working_review_df["publisher_name"].nunique())
222
- print("unique grade/publisher combinations", working_review_df.groupby(["orig_grade_type", "publisher_name"]).ngroups)
223
 
224
  # Now we can find common denominators on a (publisher, grade type) combination basis
225
- working_review_df = working_review_df.groupby(["publisher_name", "orig_grade_type"], group_keys=False).apply(common_denom_grades)
226
  working_review_df = drop_because(working_review_df, working_review_df["multiplier"] > 500, "multiplier > 500")
227
  assert working_review_df["non_neg_error"].sum() == 0
228
 
229
  # More summary stats
230
  print("non-neg error count", working_review_df["non_neg_error"].sum())
231
- print("multipliers", working_review_df["multiplier"].value_counts())
 
232
 
233
  # TODO: Add back in rare review_scores dropped at the beginning when they
234
  # are compatible with some common denominator + grade type from the same
235
  # publisher
236
 
237
  print("number of reviews left", len(working_review_df))
238
- print("reviews per publisher", working_review_df.groupby(["publisher_name", "orig_grade_type"].value_counts()))
 
239
 
240
  # Delete working columns
241
  del working_review_df["letter_implies_short"]
@@ -252,24 +256,57 @@ def save_normalised(output_path):
252
  review_df.to_csv(output_path)
253
 
254
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
255
  def get_datasets():
256
  movies_df = KaggleSrc(KAGGLE_REPO, "rotten_tomatoes_movies.csv").load()
257
  review_df = KaggleSrc(KAGGLE_REPO, "rotten_tomatoes_critic_reviews.csv").load()
258
  review_df = normalize_reviews(review_df)
259
  joined_df = review_df.join(movies_df.set_index("rotten_tomatoes_link"), "rotten_tomatoes_link")
260
- train_dfs = []
261
- test_dfs = []
262
- for group_df in review_df.groupby("publisher_name", "orig_grade_type"):
263
- if len(group_df) >= 50:
264
- train_df, test_df = train_test_split(group_df, test_size=0.2)
265
- train_dfs.append(train_df)
266
- test_dfs.append(test_df)
267
- else:
268
- train_dfs.append(group_df)
269
  return (
270
- Dataset.from_pandas(joined_df),
271
- Dataset.from_pandas(pandas.concat(train_dfs)),
272
- Dataset.from_pandas(pandas.concat(test_dfs)),
 
273
  )
274
 
275
 
@@ -280,44 +317,37 @@ _HOMEPAGE = ""
280
  _LICENSE = "CC0"
281
 
282
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
283
  class MultiscaleRTCritics(datasets.GeneratorBasedBuilder):
284
  _DESCRIPTION
285
 
286
  VERSION = datasets.Version("1.0.0")
287
 
288
  def _info(self):
289
- features = datasets.Features(
290
- {
291
- #"rotten_tomatoes_link": datasets.Value("string"),
292
- #"rating": datasets.Value("uint8"),
293
- #"out_of": datasets.Value("uint8"),
294
- #"dataset": datasets.Value("string")
295
- "movie_title"
296
- "movie_info"
297
- "critics_consensus"
298
- "content_rating"
299
- "genres"
300
- "directors"
301
- "authors"
302
- "actors"
303
- "original_release_date"
304
- "critic_name"
305
- "top_critic"
306
- "publisher_name"
307
- "review_type"
308
- "review_score"
309
- "review_date"
310
- "review_content"
311
- }
312
- )
313
  return datasets.DatasetInfo(
314
  # This is the description that will appear on the datasets page.
315
  description=_DESCRIPTION,
316
- # This defines the different columns of the dataset and their types
317
- features=features, # Here we define them above because they are different between the two configurations
318
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
319
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
320
- supervised_keys=("text", "rating"),
321
  # Homepage of the dataset for documentation
322
  homepage=_HOMEPAGE,
323
  # License for the dataset if available
@@ -340,21 +370,18 @@ class MultiscaleRTCritics(datasets.GeneratorBasedBuilder):
340
  "split": "test"
341
  },
342
  ),
343
- datasets.SplitGenerator(
344
- name=datasets.Split.ALL,
345
- gen_kwargs={
346
- "split": "all",
347
- },
348
- ),
349
  ]
350
 
351
  def _generate_examples(self, split):
352
  if not hasattr(self, "_datasets"):
353
  self._datasets = get_datasets()
354
- all_dataset, train_dataset, test_dataset = self._datasets
355
- if split == "train":
356
- yield from train_dataset
 
 
 
357
  elif split == "test":
358
- yield from test_dataset
359
- else:
360
- yield from all_dataset
 
36
  import numpy
37
  from os.path import join as pjoin
38
  from datasets import Dataset
39
+ from sklearn.model_selection import train_test_split
40
 
41
 
42
  KAGGLE_REPO = "stefanoleone992/rotten-tomatoes-movies-and-critic-reviews-dataset"
 
201
 
202
  # Now divide everything into grade types: either short letter, long letter
203
  # or the denominator of the fraction
204
+ def get_grade_type(row):
205
  if row["is_any_letter"]:
206
  if row["letter_implies_short"]:
207
  return "short_letter"
 
210
  else:
211
  return str(int(row["orig_denom"]))
212
 
213
+ working_review_df["grade_type"] = working_review_df.apply(get_grade_type, axis="columns")
214
 
215
  # Now we can filter out rare grade types
216
+ working_review_df = working_review_df.join(working_review_df["grade_type"].value_counts().rename("grade_type_count"), on="grade_type")
217
  working_review_df = drop_because(working_review_df, working_review_df["grade_type_count"] < 50, "grade type with less than 50 reviews")
218
 
219
  # Print out some summary stats
220
+ print("grades type counts")
221
+ print(working_review_df["grade_type"].value_counts())
222
+ print("unique grades", working_review_df["grade_type"].nunique())
223
  print("unique publishers", working_review_df["publisher_name"].nunique())
224
+ print("unique grade/publisher combinations", working_review_df.groupby(["grade_type", "publisher_name"]).ngroups)
225
 
226
  # Now we can find common denominators on a (publisher, grade type) combination basis
227
+ working_review_df = working_review_df.groupby(["publisher_name", "grade_type"], group_keys=False).apply(common_denom_grades)
228
  working_review_df = drop_because(working_review_df, working_review_df["multiplier"] > 500, "multiplier > 500")
229
  assert working_review_df["non_neg_error"].sum() == 0
230
 
231
  # More summary stats
232
  print("non-neg error count", working_review_df["non_neg_error"].sum())
233
+ print("multipliers")
234
+ print(working_review_df["multiplier"].value_counts())
235
 
236
  # TODO: Add back in rare review_scores dropped at the beginning when they
237
  # are compatible with some common denominator + grade type from the same
238
  # publisher
239
 
240
  print("number of reviews left", len(working_review_df))
241
+ print("reviews per publisher")
242
+ print(working_review_df.value_counts(["publisher_name", "grade_type"]))
243
 
244
  # Delete working columns
245
  del working_review_df["letter_implies_short"]
 
256
  review_df.to_csv(output_path)
257
 
258
 
259
+ def split_dfs(df):
260
+ train_dfs = []
261
+ test_dfs = []
262
+ split_groups = []
263
+ small_groups = []
264
+ for (publisher_name, grade_type), group_df in df.groupby(["publisher_name", "grade_type"]):
265
+ if len(group_df) < 50:
266
+ small_groups.append((publisher_name, grade_type, group_df))
267
+ else:
268
+ split_groups.append((publisher_name, grade_type, group_df))
269
+ group_id = 0
270
+ group_cols = {"publisher_name": [], "grade_type": [], "group_id": [], "scale_points": []}
271
+
272
+ def add_group(group_df, publisher_name, grade_type):
273
+ nonlocal group_id
274
+ group_cols["publisher_name"].append(publisher_name)
275
+ group_cols["grade_type"].append(grade_type)
276
+ group_cols["group_id"].append(group_id)
277
+ group_cols["scale_points"].append(group_df.iloc[0]["denom"])
278
+ group_id += 1
279
+
280
+ for publisher_name, grade_type, group_df in split_groups:
281
+ train_df, test_df = train_test_split(group_df, test_size=0.2)
282
+ train_dfs.append(train_df)
283
+ test_dfs.append(test_df)
284
+ add_group(group_df, publisher_name, grade_type)
285
+ for publisher_name, grade_type, group_df in small_groups:
286
+ train_dfs.append(group_df)
287
+ add_group(group_df, publisher_name, grade_type)
288
+ train_df = pandas.concat(train_dfs)
289
+ test_df = pandas.concat(test_dfs)
290
+ group_id_df = pandas.DataFrame.from_dict({k: v for k, v in group_cols.items() if k != "scale_points"})
291
+ group_id_df.set_index(["publisher_name", "grade_type"], inplace=True)
292
+ train_df = train_df.join(group_id_df, on=["publisher_name", "grade_type"])
293
+ test_df = test_df.join(group_id_df, on=["publisher_name", "grade_type"])
294
+ df = df.join(group_id_df, on=["publisher_name", "grade_type"])
295
+ group_df = pandas.DataFrame.from_dict(group_cols)
296
+ return df, train_df, test_df, group_df
297
+
298
+
299
  def get_datasets():
300
  movies_df = KaggleSrc(KAGGLE_REPO, "rotten_tomatoes_movies.csv").load()
301
  review_df = KaggleSrc(KAGGLE_REPO, "rotten_tomatoes_critic_reviews.csv").load()
302
  review_df = normalize_reviews(review_df)
303
  joined_df = review_df.join(movies_df.set_index("rotten_tomatoes_link"), "rotten_tomatoes_link")
304
+ all_df, train_df, test_df, group_df = split_dfs(joined_df)
 
 
 
 
 
 
 
 
305
  return (
306
+ all_df,
307
+ train_df,
308
+ test_df,
309
+ group_df,
310
  )
311
 
312
 
 
317
  _LICENSE = "CC0"
318
 
319
 
320
+ def iter_pandas_df(df, cols):
321
+ for tpl in df.itertuples():
322
+ yield tpl.Index, {k: v for k, v in tpl._asdict().items() if k in cols}
323
+
324
+
325
+ NORMAL_FEATURES = datasets.Features({
326
+ "movie_title": datasets.Value("string"),
327
+ "publisher_name": datasets.Value("string"),
328
+ "critic_name": datasets.Value("string"),
329
+ "review_content": datasets.Value("string"),
330
+ "review_score": datasets.Value("string"),
331
+ "grade_type": datasets.Value("string"),
332
+ "orig_num": datasets.Value("float"),
333
+ "orig_denom": datasets.Value("float"),
334
+ "num": datasets.Value("uint8"),
335
+ "denom": datasets.Value("uint8"),
336
+ "multiplier": datasets.Value("uint8"),
337
+ "group_id": datasets.Value("uint32"),
338
+ })
339
+
340
+
341
  class MultiscaleRTCritics(datasets.GeneratorBasedBuilder):
342
  _DESCRIPTION
343
 
344
  VERSION = datasets.Version("1.0.0")
345
 
346
  def _info(self):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
347
  return datasets.DatasetInfo(
348
  # This is the description that will appear on the datasets page.
349
  description=_DESCRIPTION,
350
+ features=NORMAL_FEATURES,
 
 
 
 
351
  # Homepage of the dataset for documentation
352
  homepage=_HOMEPAGE,
353
  # License for the dataset if available
 
370
  "split": "test"
371
  },
372
  ),
 
 
 
 
 
 
373
  ]
374
 
375
  def _generate_examples(self, split):
376
  if not hasattr(self, "_datasets"):
377
  self._datasets = get_datasets()
378
+ all_dataset, train_dataset, test_dataset, group_df = self._datasets
379
+ cols = set(NORMAL_FEATURES.keys())
380
+ if split == "all":
381
+ yield from iter_pandas_df(all_dataset, cols)
382
+ elif split == "train":
383
+ yield from iter_pandas_df(train_dataset, cols)
384
  elif split == "test":
385
+ yield from iter_pandas_df(test_dataset, cols)
386
+ #else:
387
+ #yield from iter_pandas_df(group_df)