helloai0 commited on
Commit
a86c725
1 Parent(s): f0e7be6

Update metric.py

Browse files
Files changed (1) hide show
  1. metric.py +48 -3
metric.py CHANGED
@@ -1,5 +1,10 @@
 
1
  from openai import OpenAI
2
- client = OpenAI(api_key = '')
 
 
 
 
3
 
4
  import pandas as pd
5
  from huggingface_hub import hf_hub_download
@@ -26,9 +31,47 @@ def compute(params):
26
  )
27
  submission_df = pd.read_csv(submission_file)
28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  submitted_answer = str(submission_df.iloc[0]['pred'])
30
  gt = str(solution_df.iloc[0]['pred'])
31
-
32
  prompt=f"Give me a score from 1 to 10 (higher is better) judging how similar these two captions are. Caption one: {submitted_answer}. Caption two: {gt}\nScore:"
33
 
34
  try:
@@ -44,8 +87,10 @@ def compute(params):
44
  except:
45
  print("Error w/ api")
46
 
 
 
47
  private_score = public_score
48
-
49
  metric_dict = {"public_score": {"metric1": public_score},
50
  "private_score": {"metric1": private_score}
51
  }
 
1
+ import os
2
  from openai import OpenAI
3
+ if "OPENAI" in os.environ:
4
+ pass
5
+ else:
6
+ print('Doesn\'t find OPENAI')
7
+ client = OpenAI(api_key = os.environ['OPENAI'])
8
 
9
  import pandas as pd
10
  from huggingface_hub import hf_hub_download
 
31
  )
32
  submission_df = pd.read_csv(submission_file)
33
 
34
+ public_ids = solution_df[solution_df.split == "public"][params.submission_id_col].values
35
+ private_ids = solution_df[solution_df.split == "private"][params.submission_id_col].values
36
+
37
+ public_solution_df = solution_df[solution_df[params.submission_id_col].isin(public_ids)]
38
+ public_submission_df = submission_df[submission_df[params.submission_id_col].isin(public_ids)]
39
+
40
+ private_solution_df = solution_df[solution_df[params.submission_id_col].isin(private_ids)]
41
+ private_submission_df = submission_df[submission_df[params.submission_id_col].isin(private_ids)]
42
+
43
+ public_solution_df = public_solution_df.sort_values(params.submission_id_col).reset_index(drop=True)
44
+ public_submission_df = public_submission_df.sort_values(params.submission_id_col).reset_index(drop=True)
45
+
46
+ private_solution_df = private_solution_df.sort_values(params.submission_id_col).reset_index(drop=True)
47
+ private_submission_df = private_submission_df.sort_values(params.submission_id_col).reset_index(drop=True)
48
+
49
+ # METRICS Calculation Evaluation
50
+ # _metric = SOME METRIC FUNCTION
51
+ def _metric(outputs, targets):
52
+ # input example: public_solution_df[target_cols], public_submission_df[target_cols]
53
+
54
+ score = 0.5
55
+ return score
56
+
57
+ target_cols = [col for col in solution_df.columns if col not in [params.submission_id_col, "split"]]
58
+ public_score = _metric(public_solution_df[target_cols], public_submission_df[target_cols])
59
+ private_score = _metric(private_solution_df[target_cols], private_submission_df[target_cols])
60
+
61
+
62
+
63
+ ## LLM Scoring Evaluation
64
+
65
+
66
+ def _metric(outputs, targets):
67
+ # input example: public_solution_df[target_cols], public_submission_df[target_cols]
68
+
69
+ score = 0.5
70
+ return score
71
+
72
  submitted_answer = str(submission_df.iloc[0]['pred'])
73
  gt = str(solution_df.iloc[0]['pred'])
74
+
75
  prompt=f"Give me a score from 1 to 10 (higher is better) judging how similar these two captions are. Caption one: {submitted_answer}. Caption two: {gt}\nScore:"
76
 
77
  try:
 
87
  except:
88
  print("Error w/ api")
89
 
90
+
91
+
92
  private_score = public_score
93
+
94
  metric_dict = {"public_score": {"metric1": public_score},
95
  "private_score": {"metric1": private_score}
96
  }