jaimin commited on
Commit
aa4d252
1 Parent(s): 18d14fb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -20
app.py CHANGED
@@ -1,36 +1,52 @@
1
  from PIL import Image
2
  import requests
3
  import gradio as gr
 
 
 
4
 
5
- from transformers import BlipProcessor, BlipForConditionalGeneration
6
 
7
 
 
 
8
 
9
- model = BlipForConditionalGeneration.from_pretrained('jaimin/Imagecap')
10
- processor = BlipProcessor.from_pretrained('jaimin/Imagecap')
11
 
 
 
 
12
 
 
 
13
 
14
- def predict(image,max_length=64, num_beams=4):
15
- image = image.convert('RGB')
16
- #image = feature_extractor(image, return_tensors="pt").pixel_values.to(device)
17
- inputs = processor(image, return_tensors="pt")
18
- #clean_text = lambda x: x.replace('<|endoftext|>','').split('\n')[0]
19
- caption_ids = model.generate(inputs, max_length = max_length)[0]
20
- caption_text = tokenizer.decode(caption_ids)
21
- return processor.decode(caption_ids[0], skip_special_tokens=True)
22
 
 
 
 
 
23
 
 
 
24
 
25
- input = gr.inputs.Image(label="Upload your Image", type = 'pil', optional=True)
26
- output = gr.outputs.Textbox(label="Captions")
27
 
28
- title = "ImageCap"
29
 
30
- interface = gr.Interface(
31
- fn=predict,
32
- inputs = input,
33
- outputs=output,
34
- title=title,
35
- )
 
 
 
36
  interface.launch(debug=True)
 
1
  from PIL import Image
2
  import requests
3
  import gradio as gr
4
+ from transformers import AutoProcessor, AutoTokenizer, AutoImageProcessor, AutoModelForCausalLM, BlipForConditionalGeneration, VisionEncoderDecoderModel
5
+ import torch
6
+ from label import predict_environment,recursion_change_bn,load_labels,hook_feature,returnCAM,returnTF,load_model
7
 
 
8
 
9
 
10
+ git_processor = AutoProcessor.from_pretrained("microsoft/git-large-r-textcaps")
11
+ git_model = AutoModelForCausalLM.from_pretrained("microsoft/git-large-r-textcaps")
12
 
13
+ blip_processor = AutoProcessor.from_pretrained("jaimin/Imagecap")
14
+ blip_model = BlipForConditionalGeneration.from_pretrained("jaimin/Imagecap")
15
 
16
+ device = "cuda" if torch.cuda.is_available() else "cpu"
17
+ git_model_large_textcaps.to(device)
18
+ blip_model_large.to(device)
19
 
20
+ def generate_caption(processor, model, image, use_float_16=False):
21
+ inputs = processor(images=image, return_tensors="pt").to(device)
22
 
23
+ if use_float_16:
24
+ inputs = inputs.to(torch.float16)
25
+
26
+ generated_ids = model.generate(pixel_values=inputs.pixel_values, max_length=50)
27
+ generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
28
+
29
+ return generated_caption
 
30
 
31
+ def generate_captions(image):
32
+
33
+ img = Image.open(image)
34
+ caption_git = generate_caption(git_processor, git_model, img)
35
 
36
+ caption_blip = generate_caption(blip_processor, blip_model, img)
37
+ env, scene = predict_environment(img)
38
 
39
+ return env,scene,caption_git_large_textcaps, caption_blip_large
 
40
 
41
+ outputs = [gr.outputs.Textbox(label="Environment"), gr.outputs.Textbox(label="Objects detected"), gr.outputs.Textbox(label="Caption generated by GIT"), gr.outputs.Textbox(label="Caption generated by BLIP")]
42
 
43
+ title = "Image Cap with Scene"
44
+ description = " Image caption with scene"
45
+
46
+ interface = gr.Interface(fn=generate_captions,
47
+ inputs=gr.inputs.Image(type="pil"),
48
+ outputs=outputs,
49
+ title=title,
50
+ description=description,
51
+ enable_queue=True)
52
  interface.launch(debug=True)