frankier commited on
Commit
6a9536b
1 Parent(s): f2e1c02

Add in compensation for whether numerical grading scales include zero or not

Browse files
Files changed (1) hide show
  1. multiscale_rt_critics.py +44 -17
multiscale_rt_critics.py CHANGED
@@ -34,6 +34,7 @@ import os
34
  import sys
35
  import pandas
36
  import numpy
 
37
  from os.path import join as pjoin
38
  from datasets import Dataset
39
  from sklearn.model_selection import train_test_split
@@ -143,17 +144,38 @@ def np_round(arr):
143
  return (arr + 0.5).astype(numpy.int32)
144
 
145
 
146
- def common_denom_grades(group_df):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  if group_df.iloc[0]["is_any_letter"]:
148
- group_df["multiplier"] = 1
149
- group_df["non_neg_error"] = False
150
- if group_df.iloc[0]["letter_implies_short"]:
151
- group_df["num"] = SHORT_LETTER_SCALE.index(group_df.iloc[0]["review_score"]) + 1
152
- group_df["denom"] = len(SHORT_LETTER_SCALE)
153
- else:
154
- group_df["num"] = LONG_LETTER_SCALE.index(group_df.iloc[0]["review_score"]) + 1
155
- group_df["denom"] = len(LONG_LETTER_SCALE)
156
- return group_df
157
  denoms = numpy.empty(len(group_df), dtype=numpy.int32)
158
  for idx, num in enumerate(group_df["orig_num"]):
159
  frac = Fraction.from_float(num)
@@ -162,13 +184,14 @@ def common_denom_grades(group_df):
162
  group_df["multiplier"] = common_denom
163
  num = common_denom * group_df["orig_num"].to_numpy()
164
  denom = common_denom * group_df["orig_denom"].to_numpy()
165
- group_df["num"] = np_round(num)
166
- group_df["denom"] = np_round(denom)
167
- group_df["non_neg_error"] = (abs(group_df["num"] - num) >= 0.05) | (abs(group_df["denom"] - denom) >= 0.05)
168
  return group_df
169
 
170
 
171
  def normalize_reviews(review_df):
 
172
  # Drop unrated
173
  review_df = drop_unrated(review_df)
174
 
@@ -224,7 +247,7 @@ def normalize_reviews(review_df):
224
  print("unique grade/publisher combinations", working_review_df.groupby(["grade_type", "publisher_name"]).ngroups)
225
 
226
  # Now we can find common denominators on a (publisher, grade type) combination basis
227
- working_review_df = working_review_df.groupby(["publisher_name", "grade_type"], group_keys=False).apply(common_denom_grades)
228
  working_review_df = drop_because(working_review_df, working_review_df["multiplier"] > 500, "multiplier > 500")
229
  assert working_review_df["non_neg_error"].sum() == 0
230
 
@@ -232,6 +255,10 @@ def normalize_reviews(review_df):
232
  print("non-neg error count", working_review_df["non_neg_error"].sum())
233
  print("multipliers")
234
  print(working_review_df["multiplier"].value_counts())
 
 
 
 
235
 
236
  # TODO: Add back in rare review_scores dropped at the beginning when they
237
  # are compatible with some common denominator + grade type from the same
@@ -274,7 +301,7 @@ def split_dfs(df):
274
  group_cols["publisher_name"].append(publisher_name)
275
  group_cols["grade_type"].append(grade_type)
276
  group_cols["group_id"].append(group_id)
277
- group_cols["scale_points"].append(group_df.iloc[0]["denom"])
278
  group_id += 1
279
 
280
  for publisher_name, grade_type, group_df in split_groups:
@@ -331,8 +358,8 @@ NORMAL_FEATURES = datasets.Features({
331
  "grade_type": datasets.Value("string"),
332
  "orig_num": datasets.Value("float"),
333
  "orig_denom": datasets.Value("float"),
334
- "num": datasets.Value("uint8"),
335
- "denom": datasets.Value("uint8"),
336
  "multiplier": datasets.Value("uint8"),
337
  "group_id": datasets.Value("uint32"),
338
  })
34
  import sys
35
  import pandas
36
  import numpy
37
+ import math
38
  from os.path import join as pjoin
39
  from datasets import Dataset
40
  from sklearn.model_selection import train_test_split
144
  return (arr + 0.5).astype(numpy.int32)
145
 
146
 
147
+ def process_letter_grade_group(group_df):
148
+ group_df["includes_zero"] = False
149
+ group_df["multiplier"] = 1
150
+ group_df["non_neg_error"] = False
151
+ if group_df.iloc[0]["letter_implies_short"]:
152
+ group_df["label"] = SHORT_LETTER_SCALE.index(group_df.iloc[0]["review_score"])
153
+ group_df["scale_points"] = len(SHORT_LETTER_SCALE)
154
+ else:
155
+ group_df["label"] = LONG_LETTER_SCALE.index(group_df.iloc[0]["review_score"])
156
+ group_df["scale_points"] = len(LONG_LETTER_SCALE)
157
+ return group_df
158
+
159
+
160
+ def process_includes_zero(group_df):
161
+ multiplier = group_df.iloc[0]["multiplier"]
162
+ includes_zero = any((label < multiplier for label in group_df["label"]))
163
+ group_df["includes_zero"] = includes_zero
164
+ if not includes_zero:
165
+ group_df["label"] -= multiplier
166
+ group_df["scale_points"] -= multiplier
167
+ return group_df
168
+
169
+
170
+ def find_effective_nom_denom(group_df):
171
  if group_df.iloc[0]["is_any_letter"]:
172
+ return process_letter_grade_group(group_df)
173
+ else:
174
+ group_df = common_denom_grades(group_df)
175
+ return process_includes_zero(group_df)
176
+
177
+
178
+ def common_denom_grades(group_df):
 
 
179
  denoms = numpy.empty(len(group_df), dtype=numpy.int32)
180
  for idx, num in enumerate(group_df["orig_num"]):
181
  frac = Fraction.from_float(num)
184
  group_df["multiplier"] = common_denom
185
  num = common_denom * group_df["orig_num"].to_numpy()
186
  denom = common_denom * group_df["orig_denom"].to_numpy()
187
+ group_df["label"] = np_round(num)
188
+ group_df["scale_points"] = np_round(denom)
189
+ group_df["non_neg_error"] = (abs(group_df["label"] - num) >= 0.05) | (abs(group_df["scale_points"] - denom) >= 0.05)
190
  return group_df
191
 
192
 
193
  def normalize_reviews(review_df):
194
+ print()
195
  # Drop unrated
196
  review_df = drop_unrated(review_df)
197
 
247
  print("unique grade/publisher combinations", working_review_df.groupby(["grade_type", "publisher_name"]).ngroups)
248
 
249
  # Now we can find common denominators on a (publisher, grade type) combination basis
250
+ working_review_df = working_review_df.groupby(["publisher_name", "grade_type"], group_keys=False).apply(find_effective_nom_denom)
251
  working_review_df = drop_because(working_review_df, working_review_df["multiplier"] > 500, "multiplier > 500")
252
  assert working_review_df["non_neg_error"].sum() == 0
253
 
255
  print("non-neg error count", working_review_df["non_neg_error"].sum())
256
  print("multipliers")
257
  print(working_review_df["multiplier"].value_counts())
258
+ print("includes_zero")
259
+ print(working_review_df["includes_zero"].value_counts())
260
+ print("grade breakdown")
261
+ print(working_review_df.value_counts(["grade_type", "multiplier", "includes_zero", "scale_points"]))
262
 
263
  # TODO: Add back in rare review_scores dropped at the beginning when they
264
  # are compatible with some common denominator + grade type from the same
301
  group_cols["publisher_name"].append(publisher_name)
302
  group_cols["grade_type"].append(grade_type)
303
  group_cols["group_id"].append(group_id)
304
+ group_cols["scale_points"].append(group_df.iloc[0]["scale_points"])
305
  group_id += 1
306
 
307
  for publisher_name, grade_type, group_df in split_groups:
358
  "grade_type": datasets.Value("string"),
359
  "orig_num": datasets.Value("float"),
360
  "orig_denom": datasets.Value("float"),
361
+ "label": datasets.Value("uint8"),
362
+ "scale_points": datasets.Value("uint8"),
363
  "multiplier": datasets.Value("uint8"),
364
  "group_id": datasets.Value("uint32"),
365
  })