Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -65,6 +65,24 @@ def gpt3_short(question,vqa_answer,caption):
|
|
65 |
llm_ans=vqa_answer[ord(llm_ans)-ord("A")]
|
66 |
answer=llm_ans
|
67 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
return answer
|
69 |
def gpt3(question,vqa_answer,caption):
|
70 |
prompt=caption+"\n"+question+"\n"+vqa_answer+"\n Tell me the right answer."
|
@@ -94,8 +112,9 @@ def inference_chat(input_image,input_text):
|
|
94 |
# out=processor.batch_decode(out, skip_special_tokens=True)
|
95 |
|
96 |
out=vle(input_image,input_text)
|
97 |
-
vqa="\n".join(out[0])
|
98 |
-
gpt3_out=gpt3(input_text,vqa,cap)
|
|
|
99 |
gpt3_out1=gpt3_short(input_text,out,cap)
|
100 |
return out[0][0], gpt3_out,gpt3_out1
|
101 |
title = """# VQA with VLE and LLM"""
|
|
|
65 |
llm_ans=vqa_answer[ord(llm_ans)-ord("A")]
|
66 |
answer=llm_ans
|
67 |
|
68 |
+
return answer
|
69 |
+
def gpt3_long(question,vqa_answer,caption):
|
70 |
+
vqa_answer,vqa_score=vqa_answer
|
71 |
+
prompt="prompt: This is the caption of a picture: "+caption+". Question: "+question+" VQA model predicts:"+"A: "+vqa_answer[0]+"socre:"+str(vqa_score[0])+\
|
72 |
+
" B: "+vqa_answer[1]+" score:"+str(vqa_score[1])+" C: "+vqa_answer[2]+" score:"+str(vqa_score[2])+\
|
73 |
+
" D: "+vqa_answer[3]+'score:'+str(vqa_score[3])+\
|
74 |
+
"Tell me the right answer with a long sentence."
|
75 |
+
|
76 |
+
# prompt=caption+"\n"+question+"\n"+vqa_answer+"\n Tell me the right answer."
|
77 |
+
response = openai.Completion.create(
|
78 |
+
engine="text-davinci-003",
|
79 |
+
prompt=prompt,
|
80 |
+
max_tokens=30,
|
81 |
+
n=1,
|
82 |
+
stop=None,
|
83 |
+
temperature=0.7,
|
84 |
+
)
|
85 |
+
answer = response.choices[0].text.strip()
|
86 |
return answer
|
87 |
def gpt3(question,vqa_answer,caption):
|
88 |
prompt=caption+"\n"+question+"\n"+vqa_answer+"\n Tell me the right answer."
|
|
|
112 |
# out=processor.batch_decode(out, skip_special_tokens=True)
|
113 |
|
114 |
out=vle(input_image,input_text)
|
115 |
+
# vqa="\n".join(out[0])
|
116 |
+
# gpt3_out=gpt3(input_text,vqa,cap)
|
117 |
+
gpt3_out=gpt3_long(input_text,out,cap)
|
118 |
gpt3_out1=gpt3_short(input_text,out,cap)
|
119 |
return out[0][0], gpt3_out,gpt3_out1
|
120 |
title = """# VQA with VLE and LLM"""
|