helloai0 commited on
Commit
ef24038
·
verified ·
1 Parent(s): c5d9519

Update metric.py

Browse files
Files changed (1) hide show
  1. metric.py +39 -38
metric.py CHANGED
@@ -46,53 +46,54 @@ def compute(params):
46
  private_solution_df = private_solution_df.sort_values(params.submission_id_col).reset_index(drop=True)
47
  private_submission_df = private_submission_df.sort_values(params.submission_id_col).reset_index(drop=True)
48
 
49
- # METRICS Calculation Evaluation
50
- # _metric = SOME METRIC FUNCTION
51
- def _metric(outputs, targets):
52
- # input example: public_solution_df[target_cols], public_submission_df[target_cols]
53
-
54
- score = 0.5
55
- return score
56
-
57
- target_cols = [col for col in solution_df.columns if col not in [params.submission_id_col, "split"]]
58
- public_score = _metric(public_solution_df[target_cols], public_submission_df[target_cols])
59
- private_score = _metric(private_solution_df[target_cols], private_submission_df[target_cols])
60
-
61
 
 
 
 
 
 
 
 
 
 
 
62
 
63
- ## LLM Scoring Evaluation
64
 
65
 
 
66
  def _metric(outputs, targets):
67
- # input example: public_solution_df[target_cols], public_submission_df[target_cols]
68
-
69
- score = 0.5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  return score
71
-
72
- submitted_answer = str(submission_df.iloc[0]['pred'])
73
- gt = str(solution_df.iloc[0]['pred'])
74
-
75
- prompt=f"Give me a score from 1 to 10 (higher is better) judging how similar these two captions are. Caption one: {submitted_answer}. Caption two: {gt}\nScore:"
76
-
77
- try:
78
- response = client.completions.create(
79
- engine="gpt-3.5-turbo-instruct",
80
- prompt=prompt,
81
- temperature=0,
82
- max_tokens=1,
83
- )
84
-
85
- public_score = int(response.choices[0].text.strip())
86
-
87
- except:
88
- print("Error w/ api")
89
 
90
-
 
 
91
 
92
- private_score = public_score
93
 
94
- metric_dict = {"public_score": {"metric1": public_score},
95
- "private_score": {"metric1": private_score}
96
  }
97
 
98
  return metric_dict
 
46
  private_solution_df = private_solution_df.sort_values(params.submission_id_col).reset_index(drop=True)
47
  private_submission_df = private_submission_df.sort_values(params.submission_id_col).reset_index(drop=True)
48
 
 
 
 
 
 
 
 
 
 
 
 
 
49
 
50
+ print('public_solution_df', public_solution_df)
51
+ print('private_solution_df', private_solution_df)
52
+
53
+ # # METRICS Calculation Evaluation
54
+ # # _metric = SOME METRIC FUNCTION
55
+ # def _metric(outputs, targets):
56
+ # # input example: public_solution_df[target_cols], public_submission_df[target_cols]
57
+
58
+ # score = 0.5
59
+ # return score
60
 
 
61
 
62
 
63
+ ## LLM Scoring Evaluation
64
  def _metric(outputs, targets):
65
+ # inputs: public_solution_df[target_cols], public_submission_df[target_cols]
66
+ # output: score
67
+ for row, output in enumerate(outputs):
68
+ answer = output['pred']
69
+ label = str(targets.iloc[row]['pred'])
70
+
71
+ prompt=f"Give me a score from 1 to 10 (higher is better) judging how similar these two captions are. Caption one: {answer}. Caption two: {label}\nScore:"
72
+
73
+ try:
74
+ response = client.completions.create(
75
+ engine="gpt-3.5-turbo-instruct",
76
+ prompt=prompt,
77
+ temperature=0,
78
+ max_tokens=1,
79
+ )
80
+
81
+ score = int(response.choices[0].text.strip())
82
+
83
+ except:
84
+ print("Error: API Calling")
85
+ return
86
+
87
  return score
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
 
89
+ target_cols = [col for col in solution_df.columns if col not in [params.submission_id_col, "split"]]
90
+ public_score = _metric(public_solution_df[target_cols], public_submission_df[target_cols])
91
+ private_score = _metric(private_solution_df[target_cols], private_submission_df[target_cols])
92
 
93
+ metric_name = "example_metric"
94
 
95
+ metric_dict = {"public_score": {metric_name: public_score},
96
+ "private_score": {metric_name: private_score}
97
  }
98
 
99
  return metric_dict