|
import os |
|
import argparse |
|
import json |
|
import ast |
|
import traceback |
|
from tqdm import tqdm |
|
from multiprocessing.pool import Pool |
|
|
|
from openai import AzureOpenAI |
|
|
|
|
|
def init(): |
|
client = AzureOpenAI( |
|
azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT"), |
|
api_key=os.getenv("AZURE_OPENAI_KEY"), |
|
api_version="2024-02-15-preview" |
|
) |
|
|
|
return client |
|
|
|
|
|
def interaction(client, message_text): |
|
completion = client.chat.completions.create( |
|
model=os.getenv("AZURE_OPENAI_DEPLOYNAME"), |
|
messages = message_text, |
|
temperature=0.7, |
|
max_tokens=800, |
|
top_p=0.95, |
|
frequency_penalty=0, |
|
presence_penalty=0, |
|
stop=None |
|
) |
|
|
|
return completion |
|
|
|
|
|
def annotate(prediction_set, caption_files, output_dir, args): |
|
""" |
|
Evaluates question and answer pairs using GPT-3 and |
|
returns a score for consistency. |
|
""" |
|
|
|
for file in tqdm(caption_files): |
|
key = file[:-5] |
|
qa_set = prediction_set[key] |
|
question1 = qa_set['q1'] |
|
question2 = qa_set['q2'] |
|
answer = qa_set['a'] |
|
pred1 = qa_set['p1'] |
|
pred2 = qa_set['p2'] |
|
try: |
|
message = [ |
|
{ |
|
"role": "system", |
|
"content": |
|
"You are an intelligent chatbot designed for evaluating the consistency of generative outputs for similar video-based question-answer pairs. " |
|
"You will be given two very similar questions, a common answer common to both the questions and predicted answers for the two questions ." |
|
"Your task is to compare the predicted answers for two very similar question, with a common correct answer and determine if they are consistent. Here's how you can accomplish the task:" |
|
"------" |
|
"##INSTRUCTIONS: " |
|
"- Focus on the consistency between the two predicted answers and the correct answer. Both predicted answers should correspond to the correct answer and to each other, and should not contain any contradictions or significant differences in the conveyed information.\n" |
|
"- Both predicted answers must be consistent with each other and the correct answer, in terms of the information they provide about the video content.\n" |
|
"- Consider synonyms or paraphrases as valid matches, but only if they maintain the consistency in the conveyed information.\n" |
|
"- Evaluate the consistency of the two predicted answers compared to the correct answer." |
|
}, |
|
{ |
|
"role": "user", |
|
"content": |
|
"Please evaluate the following video-based question-answer pair:\n\n" |
|
f"Question 1: {question1}\n" |
|
f"Question 2: {question2}\n" |
|
f"Correct Answer: {answer}\n" |
|
f"Predicted Answer to Question 1: {pred1}\n" |
|
f"Predicted Answer to Question 2: {pred2}\n\n" |
|
"Provide your evaluation only as a consistency score where the consistency score is an integer value between 0 and 5, with 5 indicating the highest level of consistency. " |
|
"Please generate the response in the form of a Python dictionary string with keys 'score', where its value is the consistency score in INTEGER, not STRING." |
|
"DO NOT PROVIDE ANY OTHER OUTPUT TEXT OR EXPLANATION. Only provide the Python dictionary string. " |
|
"For example, your response should look like this: {''score': 4.8}." |
|
} |
|
] |
|
|
|
completion = interaction(client, message) |
|
|
|
response_message = completion.choices[0].message.content |
|
response_dict = ast.literal_eval(response_message) |
|
result_qa_pair = [response_dict, qa_set] |
|
|
|
|
|
with open(f"{output_dir}/{key}.json", "w") as f: |
|
json.dump(result_qa_pair, f) |
|
|
|
except Exception as e: |
|
print(f"Error processing file '{key}': {e}") |
|
|
|
|
|
def main(args): |
|
pred_contents = [eval(line) for line in open(args.pred_path, 'r').readlines()] |
|
|
|
|
|
video_id_counts = {} |
|
new_pred_contents = [] |
|
|
|
|
|
for sample in pred_contents: |
|
video_id = sample['video_name'] |
|
if video_id in video_id_counts: |
|
video_id_counts[video_id] += 1 |
|
else: |
|
video_id_counts[video_id] = 0 |
|
|
|
|
|
new_sample = sample |
|
new_sample['video_name'] = f"{video_id}_{video_id_counts[video_id]}" |
|
new_pred_contents.append(new_sample) |
|
|
|
|
|
id_list = [x['video_name'] for x in new_pred_contents] |
|
caption_files = [f"{id}.json" for id in id_list] |
|
|
|
output_dir = args.output_dir |
|
|
|
if not os.path.exists(output_dir): |
|
os.makedirs(output_dir) |
|
|
|
|
|
prediction_set = {} |
|
for sample in new_pred_contents: |
|
id = sample['video_name'] |
|
question1 = sample['Q1'] |
|
question2 = sample['Q2'] |
|
answer = sample['A'] |
|
pred1 = sample['P1'] |
|
pred2 = sample['P2'] |
|
qa_set = {"q1": question1, "q2": question2, "a": answer, "p1": pred1, "p2": pred2} |
|
prediction_set[id] = qa_set |
|
|
|
|
|
|
|
num_tasks = args.num_tasks |
|
|
|
|
|
while True: |
|
try: |
|
|
|
completed_files = os.listdir(output_dir) |
|
print(f"completed_files: {len(completed_files)}") |
|
|
|
|
|
incomplete_files = [f for f in caption_files if f not in completed_files] |
|
print(f"incomplete_files: {len(incomplete_files)}") |
|
|
|
|
|
if len(incomplete_files) == 0: |
|
break |
|
if len(incomplete_files) <= num_tasks: |
|
num_tasks = 1 |
|
|
|
|
|
part_len = len(incomplete_files) // num_tasks |
|
all_parts = [incomplete_files[i:i + part_len] for i in range(0, len(incomplete_files), part_len)] |
|
task_args = [(prediction_set, part, args.output_dir, args) for part in all_parts] |
|
|
|
|
|
with Pool() as pool: |
|
pool.starmap(annotate, task_args) |
|
|
|
except Exception as e: |
|
print(f"Error: {e}") |
|
|
|
|
|
combined_contents = {} |
|
json_path = args.output_json |
|
|
|
|
|
for file_name in os.listdir(output_dir): |
|
if file_name.endswith(".json"): |
|
file_path = os.path.join(output_dir, file_name) |
|
with open(file_path, "r") as json_file: |
|
content = json.load(json_file) |
|
combined_contents[file_name[:-5]] = content |
|
|
|
|
|
with open(json_path, "w") as json_file: |
|
json.dump(combined_contents, json_file) |
|
print("All evaluation completed!") |
|
|
|
|
|
score_sum = 0 |
|
count = 0 |
|
for key, result in combined_contents.items(): |
|
count += 1 |
|
score_match = result[0]['score'] |
|
score = int(score_match) |
|
score_sum += score |
|
average_score = score_sum / count |
|
|
|
print("Average score for consistency:", average_score) |
|
|
|
|
|
if __name__ == "__main__": |
|
parser = argparse.ArgumentParser(description="question-answer-generation-using-gpt-3") |
|
parser.add_argument("--pred-path", required=True, help="The path to file containing prediction.") |
|
parser.add_argument("--output-dir", required=True, help="The path to save annotation json files.") |
|
parser.add_argument("--output-json", required=True, help="The path to save annotation final combined json file.") |
|
parser.add_argument("--num-tasks", required=True, type=int, help="Number of splits.") |
|
parser.add_argument("--api-key", required=True, type=str, help="Azure Openai API key.") |
|
parser.add_argument("--api-endpoint", required=True, type=str, help="Azure Openai API endpoint.") |
|
parser.add_argument("--api-deployname", required=True, type=str, help="Azure Openai API deployname.") |
|
args = parser.parse_args() |
|
|
|
|
|
os.environ["AZURE_OPENAI_KEY"] = args.api_key |
|
os.environ["AZURE_OPENAI_ENDPOINT"] = args.api_endpoint |
|
os.environ["AZURE_OPENAI_DEPLOYNAME"] = args.api_deployname |
|
|
|
client = init() |
|
|
|
main(args) |
|
|