Spaces:
Runtime error
Runtime error
File size: 5,257 Bytes
5a7ab71 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 |
import argparse
import json
import os
import time
import openai
import tqdm
import ray
import shortuuid
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
MAX_API_RETRY = 5
REQ_TIME_GAP = 10
@ray.remote(num_cpus=4)
def get_eval(sys_prompt, user_prompt: str, max_tokens: int):
logging.basicConfig(level=logging.INFO)
for i in range(MAX_API_RETRY):
try:
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{"role": "system", "content": sys_prompt},
{
"role": "user",
"content": user_prompt,
},
],
temperature=0.2, # TODO: figure out which temperature is best for evaluation
max_tokens=max_tokens,
)
content = response["choices"][0]["message"]["content"]
logger.info(content)
return content
except Exception as e:
logger.error(e)
time.sleep(5)
logger.error(f"Failed after {MAX_API_RETRY} retries.")
return "error"
def parse_score(review):
try:
score_pair = review.split("\n")[0]
score_pair = score_pair.replace(",", " ")
sp = score_pair.split(" ")
if len(sp) == 2:
return [float(sp[0]), float(sp[1])]
else:
raise Exception("Invalid score pair.")
except Exception as e:
logger.error(
f"{e}\nContent: {review}\n" "You must manually fix the score pair."
)
return [-1, -1]
def gen_prompt(reviewer_jsons, prompt_jsons, cat, ques, ans1, ans2):
# Default to general category (index=0)
reviewer_idx = 0
for idx, reviewer in enumerate(reviewer_jsons):
if reviewer["category"] == cat:
reviewer_idx = idx
break
prompt_id = reviewer_jsons[reviewer_idx]["prompt_id"]
prompt_json = prompt_jsons[prompt_id - 1]
assert prompt_json["prompt_id"] == prompt_id
sys_prompt = prompt_json["system_prompt"]
prompt_template = prompt_json["prompt_template"]
defaults = prompt_json["defaults"]
prompt = prompt_template.format(
question=ques, answer_1=ans1, answer_2=ans2, **defaults
)
return sys_prompt, prompt, reviewer_idx + 1
def get_json_list(file_path):
file_path = os.path.expanduser(file_path)
with open(file_path, "r") as f:
json_list = []
for line in f:
json_list.append(json.loads(line))
return json_list
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="ChatGPT-based QA evaluation.")
parser.add_argument("-q", "--question-file")
parser.add_argument("-a", "--answer-file-list", nargs="+", default=[])
parser.add_argument("-p", "--prompt-file")
parser.add_argument("-r", "--reviewer-file")
parser.add_argument("-o", "--output-review-file")
parser.add_argument(
"--max-tokens",
type=int,
default=1024,
help="maximum number of tokens produced in the output",
)
args = parser.parse_args()
ray.init()
question_jsons = get_json_list(args.question_file)
answer1_jsons = get_json_list(args.answer_file_list[0])
answer2_jsons = get_json_list(args.answer_file_list[1])
reviewer_jsons = get_json_list(args.reviewer_file)
prompt_jsons = get_json_list(args.prompt_file)
# check if # of questions, answers are the same
assert len(question_jsons) == len(answer1_jsons) == len(answer2_jsons)
handles = []
review_jsons = []
total_len = len(question_jsons)
question_idx_list = list(range(total_len))
for i in question_idx_list:
assert (
answer1_jsons[i]["question_id"]
== question_jsons[i]["question_id"]
== answer2_jsons[i]["question_id"]
)
ques = question_jsons[i]["text"]
cat = question_jsons[i]["category"]
ans1 = answer1_jsons[i]["text"]
ans2 = answer2_jsons[i]["text"]
sys_prompt, prompt, reviewer_id = gen_prompt(
reviewer_jsons, prompt_jsons, cat, ques, ans1, ans2
)
review_id = shortuuid.uuid()
review_jsons.append(
{
"review_id": review_id,
"question_id": question_jsons[i]["question_id"],
"answer1_id": answer1_jsons[i]["answer_id"],
"answer2_id": answer2_jsons[i]["answer_id"],
"reviewer_id": reviewer_id,
"metadata": {},
}
)
# To avoid the rate limit set by OpenAI
handles.append(get_eval.remote(sys_prompt, prompt, args.max_tokens))
logger.info(
f"Waiting for {REQ_TIME_GAP} seconds before sending the next request."
)
time.sleep(REQ_TIME_GAP)
reviews = ray.get(handles)
with open(f"{args.output_review_file}", "w") as output_review_file:
for idx, review in enumerate(reviews):
scores = parse_score(review)
review_jsons[idx]["text"] = review
review_jsons[idx]["score"] = scores
output_review_file.write(json.dumps(review_jsons[idx]) + "\n")
|