import string import gradio as gr import requests import torch from models.VLE import VLEForVQA, VLEProcessor, VLEForVQAPipeline from PIL import Image device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') print("device:",device) model_name="hfl/vle-base-for-vqa" model = VLEForVQA.from_pretrained(model_name) vle_processor = VLEProcessor.from_pretrained(model_name) vqa_pipeline = VLEForVQAPipeline(model=model, device=device, vle_processor=vle_processor) from transformers import BlipProcessor, BlipForConditionalGeneration cap_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base") cap_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base") print("cap_model device:",cap_model.device) cap_model.to(device) print("cap_model device:",cap_model.device) def caption(input_image): inputs = cap_processor(input_image, return_tensors="pt").to(device) # inputs["num_beams"] = 1 # no num_beams use greedy search # inputs['num_return_sequences'] =1 out = cap_model.generate(**inputs) return "\n".join(cap_processor.batch_decode(out, skip_special_tokens=True)) import openai import os openai.api_key= os.getenv('openai_appkey') def gpt3_short(question,vqa_answer,caption): vqa_answer,vqa_score=vqa_answer prompt="This is the caption of a picture: "+caption+". Question: "+question+" VQA model predicts:"+"A: "+vqa_answer[0]+", socre: "+f"{vqa_score[0]:.2f}"+\ "; B: "+vqa_answer[1]+", score: "+f"{vqa_score[1]:.2f}"+"; C: "+vqa_answer[2]+", score: "+f"{vqa_score[2]:.2f}"+\ "; D: "+vqa_answer[3]+", score: "+f"{vqa_score[3]:.2f}"+\ ". Choose A if A is not in conflict with the description of the picture, otherwise A might be incorrect, and choose the B, C or D based on the description. Answer with A or B or C or D." # prompt=caption+"\n"+question+"\n"+vqa_answer+"\n Tell me the right answer." response = openai.Completion.create( engine="text-davinci-003", prompt=prompt, max_tokens=30, n=1, stop=None, temperature=0.7, ) answer = response.choices[0].text.strip() llm_ans=answer choice=set(["A","B","C","D"]) llm_ans=llm_ans.replace("\n"," ").replace(":"," ").replace("."," " ).replace(","," ") sllm_ans=llm_ans.split(" ") for cho in sllm_ans: if cho in choice: llm_ans=cho break if llm_ans not in choice: llm_ans="A" llm_ans=vqa_answer[ord(llm_ans)-ord("A")] answer=llm_ans return answer def gpt3_long(question,vqa_answer,caption): vqa_answer,vqa_score=vqa_answer # prompt="prompt: This is the caption of a picture: "+caption+". Question: "+question+" VQA model predicts:"+"A: "+vqa_answer[0]+"socre:"+str(vqa_score[0])+\ # " B: "+vqa_answer[1]+" score:"+str(vqa_score[1])+" C: "+vqa_answer[2]+" score:"+str(vqa_score[2])+\ # " D: "+vqa_answer[3]+'score:'+str(vqa_score[3])+\ # "Tell me the right answer with a long sentence." prompt="This is the caption of a picture: "+caption+". Question: "+question+" VQA model predicts:"+" "+vqa_answer[0]+", socre:"+f"{vqa_score[0]:.2f}"+\ "; "+vqa_answer[1]+", score:"+f"{vqa_score[1]:.2f}"+"; "+vqa_answer[2]+", score:"+f"{vqa_score[2]:.2f}"+\ "; "+vqa_answer[3]+', score:'+f"{vqa_score[3]:.2f}"+\ ". Answer the question with a sentence without mentioning the VQA model and the score." # prompt="prompt: This is the caption of a picture: "+caption+". Question: "+question+" VQA model predicts:"+" "+vqa_answer[0]+" socre:"+str(vqa_score[0])+\ # " "+vqa_answer[1]+" score:"+str(vqa_score[1])+" "+vqa_answer[2]+" score:"+str(vqa_score[2])+\ # " "+vqa_answer[3]+'score:'+str(vqa_score[3])+\ # "Tell me the right answer with a long sentence." # prompt=caption+"\n"+question+"\n"+vqa_answer+"\n Tell me the right answer." response = openai.Completion.create( engine="text-davinci-003", prompt=prompt, max_tokens=50, n=1, stop=None, temperature=0.7, ) answer = response.choices[0].text.strip() return answer def gpt3(question,vqa_answer,caption): prompt=caption+"\n"+question+"\n"+vqa_answer+"\n Tell me the right answer." response = openai.Completion.create( engine="text-davinci-003", prompt=prompt, max_tokens=50, n=1, stop=None, temperature=0.7, ) answer = response.choices[0].text.strip() # return "input_text:\n"+prompt+"\n\n output_answer:\n"+answer return answer def vle(input_image,input_text): vqa_answers = vqa_pipeline({"image":input_image, "question":input_text}, top_k=4) # return [" ".join([str(value) for key,value in vqa.items()] )for vqa in vqa_answers] return [vqa['answer'] for vqa in vqa_answers],[vqa['score'] for vqa in vqa_answers] def inference_chat(input_image,input_text): input_text=input_text[:200] input_text=" ".join(input_text.split(" ")[:60]) cap=caption(input_image) # inputs = processor(images=input_image, text=input_text,return_tensors="pt") # inputs["max_length"] = 10 # inputs["num_beams"] = 5 # inputs['num_return_sequences'] =4 # out = model_vqa.generate(**inputs) # out=processor.batch_decode(out, skip_special_tokens=True) print("Caption:",cap) out=vle(input_image,input_text) print("VQA: ",out) # vqa="\n".join(out[0]) # gpt3_out=gpt3(input_text,vqa,cap) gpt3_out=gpt3_long(input_text,out,cap) # gpt3_out1=gpt3_short(input_text,out,cap) return out[0][0], gpt3_out #,gpt3_out1 title = """