codino1315 commited on
Commit
f9f2488
1 Parent(s): 0579b7b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -32
app.py CHANGED
@@ -1,45 +1,45 @@
1
- # from PIL import Image
2
- # import requests
3
- # import gradio as gr
4
 
5
- # from transformers import BlipProcessor, BlipForConditionalGeneration
6
 
7
- # model_id = "Salesforce/blip-image-captioning-base"
8
 
9
- # model = BlipForConditionalGeneration.from_pretrained(model_id)
10
- # processor = BlipProcessor.from_pretrained(model_id)
11
 
12
- # def launch(input):
13
- # image = Image.open(requests.get(input, stream=True).raw).convert('RGB')
14
- # inputs = processor(image, return_tensors="pt")
15
- # out = model.generate(**inputs)
16
- # return processor.decode(out[0], skip_special_tokens=True)
17
 
18
- # iface = gr.Interface(launch, inputs="text", outputs="text")
19
- # iface.launch()
20
 
21
- from PIL import Image
22
- import requests
23
- from transformers import Blip2Processor, Blip2ForConditionalGeneration
24
- import torch
25
 
26
- device = "cuda" if torch.cuda.is_available() else "cpu"
27
 
28
- processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
29
- model = Blip2ForConditionalGeneration.from_pretrained(
30
- "Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16
31
- )
32
- model.to(device)
33
 
34
- def launch(input):
35
 
36
- image = Image.open(requests.get(input, stream=True).raw)
37
 
38
- inputs = processor(images=image, return_tensors="pt").to(device, torch.float16)
39
 
40
- generated_ids = model.generate(**inputs)
41
- generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
42
- return generated_text
43
 
44
- iface = gr.Interface(launch, inputs="text", outputs="text")
45
- iface.launch()
 
1
+ from PIL import Image
2
+ import requests
3
+ import gradio as gr
4
 
5
+ from transformers import BlipProcessor, BlipForConditionalGeneration
6
 
7
+ model_id = "Salesforce/blip-image-captioning-base"
8
 
9
+ model = BlipForConditionalGeneration.from_pretrained(model_id)
10
+ processor = BlipProcessor.from_pretrained(model_id)
11
 
12
+ def launch(input):
13
+ image = Image.open(requests.get(input, stream=True).raw).convert('RGB')
14
+ inputs = processor(image, return_tensors="pt")
15
+ out = model.generate(**inputs)
16
+ return processor.decode(out[0], skip_special_tokens=True)
17
 
18
+ iface = gr.Interface(launch, inputs="text", outputs="text")
19
+ iface.launch()
20
 
21
+ # from PIL import Image
22
+ # import requests
23
+ # from transformers import Blip2Processor, Blip2ForConditionalGeneration
24
+ # import torch
25
 
26
+ # device = "cuda" if torch.cuda.is_available() else "cpu"
27
 
28
+ # processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
29
+ # model = Blip2ForConditionalGeneration.from_pretrained(
30
+ # "Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16
31
+ # )
32
+ # model.to(device)
33
 
34
+ # def launch(input):
35
 
36
+ # image = Image.open(requests.get(input, stream=True).raw)
37
 
38
+ # inputs = processor(images=image, return_tensors="pt").to(device, torch.float16)
39
 
40
+ # generated_ids = model.generate(**inputs)
41
+ # generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
42
+ # return generated_text
43
 
44
+ # iface = gr.Interface(launch, inputs="text", outputs="text")
45
+ # iface.launch()