|
import time |
|
import openai |
|
|
|
|
|
|
|
|
|
|
|
def decoder_for_gpt3(input, max_length, temperature=0, engine="text-davinci-003"): |
|
|
|
if engine == "gpt-3.5-turbo": |
|
time.sleep(1) |
|
response = openai.ChatCompletion.create( |
|
model=engine, |
|
messages=[ |
|
|
|
{"role": "user", "content": input} |
|
], |
|
max_tokens=max_length, |
|
temperature=temperature, |
|
stop=None |
|
) |
|
response = response["choices"][0]["message"]["content"] |
|
|
|
else: |
|
time.sleep(1) |
|
response = openai.Completion.create( |
|
model=engine, |
|
prompt=input, |
|
max_tokens=max_length, |
|
stop=None, |
|
temperature=temperature |
|
) |
|
response = response["choices"][0]["text"] |
|
return response |
|
|
|
def decoder_for_gpt3_consistency(input, max_length, temp=0.7, n=5, engine="text-davinci-003"): |
|
|
|
if engine == "gpt-3.5-turbo": |
|
time.sleep(1) |
|
responses = openai.ChatCompletion.create( |
|
model=engine, |
|
messages=[ |
|
{"role": "user", "content": input} |
|
], |
|
max_tokens=max_length, |
|
temperature=temp, |
|
top_p=1, |
|
n=5, |
|
stop=["\n"], |
|
) |
|
responses = [responses["choices"][i]["message"]["content"] for i in range(n)] |
|
else: |
|
time.sleep(1) |
|
responses = openai.Completion.create( |
|
model=engine, |
|
prompt=input, |
|
max_tokens=max_length, |
|
temperature=temp, |
|
stop=["\n"], |
|
n=5, |
|
logprobs=5, |
|
top_p=1, |
|
) |
|
responses = [responses["choices"][i]["text"] for i in range(n)] |
|
|
|
return responses |
|
|
|
def zero_shot(question): |
|
input = question + " " + "Among A through E, the answer is" |
|
response = openai.ChatCompletion.create( |
|
model="gpt-3.5-turbo", |
|
messages=[ |
|
{"role": "system", "content": "You are a helpful assistant that answer commonsense questions."}, |
|
{"role": "user", "content": input} |
|
] |
|
) |
|
response = response["choices"][0]["message"]["content"] |
|
return response |