funnyPhani commited on
Commit
9e3a587
β€’
1 Parent(s): a3515c7

Upload 2 files

Browse files
Files changed (2) hide show
  1. app (1).py +83 -0
  2. requirements (1).txt +7 -0
app (1).py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoProcessor, AutoTokenizer, AutoImageProcessor, AutoModelForCausalLM, BlipForConditionalGeneration, Blip2ForConditionalGeneration, VisionEncoderDecoderModel, InstructBlipForConditionalGeneration
3
+ import torch
4
+ import open_clip
5
+
6
+ from huggingface_hub import hf_hub_download
7
+
8
+ device = "cuda" if torch.cuda.is_available() else "cpu"
9
+
10
+ torch.hub.download_url_to_file('http://images.cocodataset.org/val2017/000000039769.jpg', 'cats.jpg')
11
+ torch.hub.download_url_to_file('https://huggingface.co/datasets/nielsr/textcaps-sample/resolve/main/stop_sign.png', 'stop_sign.png')
12
+ torch.hub.download_url_to_file('https://cdn.openai.com/dall-e-2/demos/text2im/astronaut/horse/photo/0.jpg', 'astronaut.jpg')
13
+
14
+ git_processor_large_coco = AutoProcessor.from_pretrained("microsoft/git-large-coco")
15
+ git_model_large_coco = AutoModelForCausalLM.from_pretrained("microsoft/git-large-coco").to(device)
16
+
17
+ blip_processor_large = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
18
+ blip_model_large = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large").to(device)
19
+
20
+ blip2_processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-6.7b-coco")
21
+ blip2_model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-6.7b-coco", device_map="auto", load_in_4bit=True, torch_dtype=torch.float16)
22
+
23
+ instructblip_processor = AutoProcessor.from_pretrained("Salesforce/instructblip-vicuna-7b")
24
+ instructblip_model = InstructBlipForConditionalGeneration.from_pretrained("Salesforce/instructblip-vicuna-7b", device_map="auto", load_in_4bit=True, torch_dtype=torch.float16)
25
+
26
+ def generate_caption(processor, model, image, tokenizer=None, use_float_16=False):
27
+ inputs = processor(images=image, return_tensors="pt").to(device)
28
+
29
+ if use_float_16:
30
+ inputs = inputs.to(torch.float16)
31
+
32
+ generated_ids = model.generate(pixel_values=inputs.pixel_values, num_beams=3, max_length=20, min_length=5)
33
+
34
+ if tokenizer is not None:
35
+ generated_caption = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
36
+ else:
37
+ generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
38
+
39
+ return generated_caption
40
+
41
+
42
+ def generate_caption_blip2(processor, model, image, replace_token=False):
43
+ prompt = "A photo of"
44
+ inputs = processor(images=image, text=prompt, return_tensors="pt").to(device=model.device, dtype=torch.float16)
45
+
46
+ generated_ids = model.generate(**inputs,
47
+ num_beams=5, max_length=50, min_length=1, top_p=0.9,
48
+ repetition_penalty=1.5, length_penalty=1.0, temperature=1)
49
+ if replace_token:
50
+ # TODO remove once https://github.com/huggingface/transformers/pull/24492 is merged
51
+ generated_ids[generated_ids == 0] = 2
52
+
53
+ return processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
54
+
55
+
56
+ def generate_captions(image):
57
+ caption_git_large_coco = generate_caption(git_processor_large_coco, git_model_large_coco, image)
58
+
59
+ caption_blip_large = generate_caption(blip_processor_large, blip_model_large, image)
60
+
61
+ caption_blip2 = generate_caption_blip2(blip2_processor, blip2_model, image).strip()
62
+
63
+ caption_instructblip = generate_caption_blip2(instructblip_processor, instructblip_model, image, replace_token=True)
64
+
65
+ return caption_git_large_coco, caption_blip_large, caption_blip2, caption_instructblip
66
+
67
+
68
+ examples = [["cats.jpg"], ["stop_sign.png"], ["astronaut.jpg"]]
69
+ outputs = [gr.outputs.Textbox(label="Caption generated by GIT-large fine-tuned on COCO"), gr.outputs.Textbox(label="Caption generated by BLIP-large"), gr.outputs.Textbox(label="Caption generated by BLIP-2 OPT 6.7b"), gr.outputs.Textbox(label="Caption generated by Swin Transformer with GPT-2"), ]
70
+
71
+ title = "Interactive demo: comparing image captioning models"
72
+ description = "Gradio Demo to compare GIT, BLIP, BLIP-2 and InstructBLIP, 4 state-of-the-art vision+language models. To use it, simply upload your image and click 'submit', or click one of the examples to load them. Read more at the links below."
73
+ article = "<p style='text-align: center'><a href='https://huggingface.co/docs/transformers/main/model_doc/blip' target='_blank'>BLIP docs</a> | <a href='https://huggingface.co/docs/transformers/main/model_doc/git' target='_blank'>GIT docs</a></p>"
74
+
75
+ interface = gr.Interface(fn=generate_captions,
76
+ inputs=gr.inputs.Image(type="pil"),
77
+ outputs=outputs,
78
+ examples=examples,
79
+ title=title,
80
+ description=description,
81
+ article=article,
82
+ enable_queue=True)
83
+ interface.launch(debug=True,share = True)
requirements (1).txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ git+https://github.com/huggingface/transformers.git@main
2
+ torch
3
+ open_clip_torch
4
+ accelerate
5
+ bitsandbytes
6
+ scipy
7
+ gradio