File size: 1,401 Bytes
ca7c7f9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
import os
import argparse
import json
from llava.eval.m4c_evaluator import EvalAIAnswerProcessor
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--annotation-file", type=str, required=True)
parser.add_argument("--result-file", type=str, required=True)
parser.add_argument("--result-upload-file", type=str, required=True)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
os.makedirs(os.path.dirname(args.result_upload_file), exist_ok=True)
results = []
error_line = 0
for line_idx, line in enumerate(open(args.result_file)):
try:
results.append(json.loads(line))
except:
error_line += 1
results = {x["question_id"]: x["text"] for x in results}
test_split = [json.loads(line) for line in open(args.annotation_file)]
split_ids = set([x["question_id"] for x in test_split])
print(f"total results: {len(results)}, total split: {len(test_split)}, error_line: {error_line}")
all_answers = []
answer_processor = EvalAIAnswerProcessor()
for x in test_split:
# import pdb; pdb.set_trace()
assert x["question_id"] in results, print(x)
all_answers.append({"image": x["image"], "answer": answer_processor(results[x["question_id"]])})
with open(args.result_upload_file, "w") as f:
json.dump(all_answers, f)
|