Update my_model/results/evaluation.py
Browse files- my_model/results/evaluation.py +74 -17
my_model/results/evaluation.py
CHANGED
@@ -5,19 +5,27 @@ from nltk.stem import PorterStemmer
|
|
5 |
from ast import literal_eval
|
6 |
from typing import Union, List
|
7 |
import streamlit as st
|
|
|
8 |
|
9 |
class KBVQAEvaluator:
|
10 |
def __init__(self):
|
11 |
"""
|
12 |
Initialize the VQA Processor with the dataset and configuration settings.
|
13 |
"""
|
14 |
-
self.data_path =
|
15 |
-
self.use_fuzzy =
|
16 |
self.stemmer = PorterStemmer()
|
17 |
self.scores_df = pd.read_excel(self.data_path, sheet_name="Scores")
|
18 |
self.df = pd.read_excel(self.data_path, sheet_name="Main Data")
|
19 |
self.vqa_scores = {}
|
20 |
self.exact_match_scores = {}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
def stem_answers(self, answers: Union[str, List[str]]) -> Union[str, List[str]]:
|
23 |
"""
|
@@ -34,7 +42,7 @@ class KBVQAEvaluator:
|
|
34 |
Calculate VQA score based on the number of matching answers, with optional fuzzy matching.
|
35 |
"""
|
36 |
if self.use_fuzzy:
|
37 |
-
fuzzy_matches = sum(fuzz.partial_ratio(model_answer, gt) >=
|
38 |
return min(fuzzy_matches / 3, 1)
|
39 |
else:
|
40 |
count = Counter(ground_truths)
|
@@ -45,20 +53,18 @@ class KBVQAEvaluator:
|
|
45 |
Calculate Exact Match score, with optional fuzzy matching.
|
46 |
"""
|
47 |
if self.use_fuzzy:
|
48 |
-
return int(any(fuzz.partial_ratio(model_answer, gt) >=
|
49 |
else:
|
50 |
return int(model_answer in ground_truths)
|
51 |
|
52 |
-
def
|
53 |
"""
|
54 |
Process the DataFrame: stem answers, calculate scores, and store results.
|
55 |
"""
|
56 |
self.df['raw_answers_stemmed'] = self.df['raw_answers'].apply(literal_eval).apply(self.stem_answers)
|
57 |
-
model_configurations = ['caption+detic', 'caption+yolov5', 'only_caption', 'only_detic', 'only_yolov5']
|
58 |
-
model_names = ['13b', '7b']
|
59 |
|
60 |
-
for name in model_names:
|
61 |
-
for config in model_configurations:
|
62 |
full_config = f'{name}_{config}'
|
63 |
self.df[f'{full_config}_stemmed'] = self.df[full_config].apply(self.stem_answers)
|
64 |
|
@@ -67,8 +73,43 @@ class KBVQAEvaluator:
|
|
67 |
|
68 |
self.vqa_scores[full_config] = round(self.df[f'vqa_score_{full_config}'].mean()*100, 2)
|
69 |
self.exact_match_scores[full_config] = round(self.df[f'exact_match_score_{full_config}'].mean()*100, 2)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
|
71 |
-
def save_results(self):
|
72 |
# Create a DataFrame for the scores
|
73 |
scores_data = {
|
74 |
'Model Configuration': list(self.vqa_scores.keys()),
|
@@ -78,12 +119,28 @@ class KBVQAEvaluator:
|
|
78 |
scores_df = pd.DataFrame(scores_data)
|
79 |
|
80 |
# Saving the scores DataFrame to an Excel file
|
81 |
-
with pd.ExcelWriter('
|
|
|
82 |
scores_df.to_excel(writer, sheet_name='Scores', index=False)
|
83 |
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
from ast import literal_eval
|
6 |
from typing import Union, List
|
7 |
import streamlit as st
|
8 |
+
from my_model.config import evaluation_config as config
|
9 |
|
10 |
class KBVQAEvaluator:
|
11 |
def __init__(self):
|
12 |
"""
|
13 |
Initialize the VQA Processor with the dataset and configuration settings.
|
14 |
"""
|
15 |
+
self.data_path = config.EVALUATION_DATA_PATH
|
16 |
+
self.use_fuzzy = config.USE_FUZZY
|
17 |
self.stemmer = PorterStemmer()
|
18 |
self.scores_df = pd.read_excel(self.data_path, sheet_name="Scores")
|
19 |
self.df = pd.read_excel(self.data_path, sheet_name="Main Data")
|
20 |
self.vqa_scores = {}
|
21 |
self.exact_match_scores = {}
|
22 |
+
self.fuzzy_threshold = config.FUZZY_SCORE
|
23 |
+
self.openai_api_key = config.OPENAI_API_KEY
|
24 |
+
self.model_names = config.MODEL_NAMES
|
25 |
+
self.model_configurations = config.MODEL_CONFIGURATIONS # ['caption+detic', 'caption+yolov5', 'only_caption', 'only_detic', 'only_yolov5']
|
26 |
+
self.gpt4_seed = config.GPT4_SEED
|
27 |
+
self.gpt4_max_tokens = config.GPT4_MAX_TOKENS
|
28 |
+
self.gpt4_temperature = config.GPT4_TEMPERATURE
|
29 |
|
30 |
def stem_answers(self, answers: Union[str, List[str]]) -> Union[str, List[str]]:
|
31 |
"""
|
|
|
42 |
Calculate VQA score based on the number of matching answers, with optional fuzzy matching.
|
43 |
"""
|
44 |
if self.use_fuzzy:
|
45 |
+
fuzzy_matches = sum(fuzz.partial_ratio(model_answer, gt) >= self.fuzzy_threshold for gt in ground_truths)
|
46 |
return min(fuzzy_matches / 3, 1)
|
47 |
else:
|
48 |
count = Counter(ground_truths)
|
|
|
53 |
Calculate Exact Match score, with optional fuzzy matching.
|
54 |
"""
|
55 |
if self.use_fuzzy:
|
56 |
+
return int(any(fuzz.partial_ratio(model_answer, gt) >= self.fuzzy_threshold for gt in ground_truths))
|
57 |
else:
|
58 |
return int(model_answer in ground_truths)
|
59 |
|
60 |
+
def syntactic_evaluation(self):
|
61 |
"""
|
62 |
Process the DataFrame: stem answers, calculate scores, and store results.
|
63 |
"""
|
64 |
self.df['raw_answers_stemmed'] = self.df['raw_answers'].apply(literal_eval).apply(self.stem_answers)
|
|
|
|
|
65 |
|
66 |
+
for name in self.model_names:
|
67 |
+
for config in self.model_configurations:
|
68 |
full_config = f'{name}_{config}'
|
69 |
self.df[f'{full_config}_stemmed'] = self.df[full_config].apply(self.stem_answers)
|
70 |
|
|
|
73 |
|
74 |
self.vqa_scores[full_config] = round(self.df[f'vqa_score_{full_config}'].mean()*100, 2)
|
75 |
self.exact_match_scores[full_config] = round(self.df[f'exact_match_score_{full_config}'].mean()*100, 2)
|
76 |
+
|
77 |
+
def create_GPT4_messages_template(self, question, ground_truths, model_answer):
|
78 |
+
"""
|
79 |
+
Create a message list for the GPT-4 API call based on the question, ground truths, and model answer.
|
80 |
+
"""
|
81 |
+
system_message = {
|
82 |
+
"role": "system",
|
83 |
+
"content": """You are an AI trained to evaluate the equivalence of AI-generated answers to a set of ground truth answers for a given question. Upon reviewing a model's answer, determine if it matches the ground truths. Use the following rating system: 1 if you find that the model answer matches more than 25% of the ground truth answers, 2 if you find that the model answer matches only less than 25% of the ground truth answers, and 3 if the model answer is incorrect. Respond in the format below for easy parsing:
|
84 |
+
Rating: {1/2/3}
|
85 |
+
"""
|
86 |
+
}
|
87 |
+
|
88 |
+
user_message = {
|
89 |
+
"role": "user",
|
90 |
+
"content": f"Question : {question}\nGround Truth: {ground_truths}\nModel's Response: {model_answer}"
|
91 |
+
}
|
92 |
+
|
93 |
+
return [system_message, user_message]
|
94 |
+
|
95 |
+
|
96 |
+
def semantic_evaluation(self):
|
97 |
+
"""
|
98 |
+
Perform semantic evaluation using GPT-4 for each model configuration.
|
99 |
+
"""
|
100 |
+
openai.api_key = self.openai_api_key
|
101 |
+
model_configurations_for_semantic_evaluation = self.model_configurations[:2] # considering only main model configs ['caption+detic', 'caption+yolov5'] without ablation, due to the cost involved.
|
102 |
+
for name in self.model_names:
|
103 |
+
for config in model_configurations_for_semantic_evaluation:
|
104 |
+
# Iterate over rows and send requests
|
105 |
+
for index, row in self.df.iterrows():
|
106 |
+
messages = self.create_GPT4_messages_template(row['question'], row['raw_answers'][1:-1], row[name+'_'+config])
|
107 |
+
response = openai.ChatCompletion.create(model="gpt-4", messages=messages, max_tokens=self.gpt4_max_tokens, temperature=self.gpt4_temperature, seed=self.gpt4_seed)
|
108 |
+
evaluation = response["choices"][0]["message"]["content"]
|
109 |
+
rating = int(evaluation.split('\n')[0].split(":")[1].strip())
|
110 |
+
self.df.at[index, f'gpt4_rating_{config}'] = rating
|
111 |
|
112 |
+
def save_results(self, save_filename):
|
113 |
# Create a DataFrame for the scores
|
114 |
scores_data = {
|
115 |
'Model Configuration': list(self.vqa_scores.keys()),
|
|
|
119 |
scores_df = pd.DataFrame(scores_data)
|
120 |
|
121 |
# Saving the scores DataFrame to an Excel file
|
122 |
+
with pd.ExcelWriter(filename+'.xlsx', engine='openpyxl', mode='w') as writer:
|
123 |
+
self.df.to_excel(writer, sheet_name='Main Data', index=False)
|
124 |
scores_df.to_excel(writer, sheet_name='Scores', index=False)
|
125 |
|
126 |
+
def run_evaluation(save=False, save_filename="results"):
|
127 |
+
"""
|
128 |
+
Run the full evaluation process using KBVQAEvaluator and save the results to an Excel file.
|
129 |
+
"""
|
130 |
+
# Instantiate the evaluator
|
131 |
+
evaluator = KBVQAEvaluator()
|
132 |
+
|
133 |
+
# Run syntactic evaluation
|
134 |
+
evaluator.syntactic_evaluation()
|
135 |
+
|
136 |
+
# Optionally, run semantic evaluation if required (can be cost-intensive)
|
137 |
+
evaluator.semantic_evaluation()
|
138 |
+
|
139 |
+
if save:
|
140 |
+
# Save results
|
141 |
+
evaluator.save_results(save_filename)
|
142 |
+
|
143 |
+
# Call run_evaluation() to execute the evaluation process
|
144 |
+
if __name__ == "__main__":
|
145 |
+
#run_evaluation(save=True, save_filename="results")
|
146 |
+
pass
|