xxx1 commited on
Commit
f66ceb8
1 Parent(s): eef074b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -10
app.py CHANGED
@@ -5,23 +5,23 @@ import torch
5
  from models.VLE import VLEForVQA, VLEProcessor, VLEForVQAPipeline
6
  from PIL import Image
7
 
8
- # model_name="hfl/vle-base-for-vqa"
9
- # model = VLEForVQA.from_pretrained(model_name)
10
- # vle_processor = VLEProcessor.from_pretrained(model_name)
11
- # vqa_pipeline = VLEForVQAPipeline(model=model, device='cpu', vle_processor=vle_processor)
12
 
13
 
14
  from transformers import BlipForQuestionAnswering, BlipProcessor
15
 
16
- # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
17
 
18
- # processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-capfilt-large")
19
- # model_vqa = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-capfilt-large").to(device)
20
 
21
  from transformers import BlipProcessor, BlipForConditionalGeneration
22
 
23
- # cap_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
24
- # cap_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
25
 
26
 
27
 
@@ -165,7 +165,7 @@ with gr.Blocks(
165
  '''
166
  with gr.Column():
167
  gr.Markdown(description1)
168
- caption_output = gr.Textbox(lines=0, label="* VQA + LLM (short answer):\n The captioning model generates a caption \n of the image. We feed the caption")
169
  caption_output_v1 = gr.Textbox(lines=0, label="VQA + LLM (short answer)")
170
  gpt3_output_v1 = gr.Textbox(lines=0, label="VQA+LLM (long answer)")
171
 
 
5
  from models.VLE import VLEForVQA, VLEProcessor, VLEForVQAPipeline
6
  from PIL import Image
7
 
8
+ model_name="hfl/vle-base-for-vqa"
9
+ model = VLEForVQA.from_pretrained(model_name)
10
+ vle_processor = VLEProcessor.from_pretrained(model_name)
11
+ vqa_pipeline = VLEForVQAPipeline(model=model, device='cpu', vle_processor=vle_processor)
12
 
13
 
14
  from transformers import BlipForQuestionAnswering, BlipProcessor
15
 
16
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
17
 
18
+ processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-capfilt-large")
19
+ model_vqa = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-capfilt-large").to(device)
20
 
21
  from transformers import BlipProcessor, BlipForConditionalGeneration
22
 
23
+ cap_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
24
+ cap_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
25
 
26
 
27
 
 
165
  '''
166
  with gr.Column():
167
  gr.Markdown(description1)
168
+ caption_output = gr.Textbox(lines=0, label="VQA")
169
  caption_output_v1 = gr.Textbox(lines=0, label="VQA + LLM (short answer)")
170
  gpt3_output_v1 = gr.Textbox(lines=0, label="VQA+LLM (long answer)")
171