lotrlol freddyaboulton HF staff commited on
Commit
e119ebb
β€’
0 Parent(s):

Duplicate from gradio-client-demos/comparing-captioning-models

Browse files

Co-authored-by: Freddy Boulton <freddyaboulton@users.noreply.huggingface.co>

Files changed (4) hide show
  1. .gitattributes +34 -0
  2. README.md +13 -0
  3. app.py +113 -0
  4. requirements.txt +5 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Comparing Captioning Models
3
+ emoji: πŸ”₯
4
+ colorFrom: yellow
5
+ colorTo: pink
6
+ sdk: gradio
7
+ sdk_version: 3.15.0
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: gradio-client-demos/comparing-captioning-models
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoProcessor, AutoTokenizer, AutoImageProcessor, AutoModelForCausalLM, BlipForConditionalGeneration, Blip2ForConditionalGeneration, VisionEncoderDecoderModel
3
+ import torch
4
+ import open_clip
5
+
6
+ from huggingface_hub import hf_hub_download
7
+
8
+ torch.hub.download_url_to_file('http://images.cocodataset.org/val2017/000000039769.jpg', 'cats.jpg')
9
+ torch.hub.download_url_to_file('https://huggingface.co/datasets/nielsr/textcaps-sample/resolve/main/stop_sign.png', 'stop_sign.png')
10
+ torch.hub.download_url_to_file('https://cdn.openai.com/dall-e-2/demos/text2im/astronaut/horse/photo/0.jpg', 'astronaut.jpg')
11
+
12
+ # git_processor_base = AutoProcessor.from_pretrained("microsoft/git-base-coco")
13
+ # git_model_base = AutoModelForCausalLM.from_pretrained("microsoft/git-base-coco")
14
+
15
+ git_processor_large_coco = AutoProcessor.from_pretrained("microsoft/git-large-coco")
16
+ git_model_large_coco = AutoModelForCausalLM.from_pretrained("microsoft/git-large-coco")
17
+
18
+ git_processor_large_textcaps = AutoProcessor.from_pretrained("microsoft/git-large-r-textcaps")
19
+ git_model_large_textcaps = AutoModelForCausalLM.from_pretrained("microsoft/git-large-r-textcaps")
20
+
21
+ # blip_processor_base = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
22
+ # blip_model_base = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
23
+
24
+ blip_processor_large = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
25
+ blip_model_large = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
26
+
27
+ # blip2_processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b")
28
+ # blip2_model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16)
29
+
30
+ blip2_processor_8_bit = AutoProcessor.from_pretrained("Salesforce/blip2-opt-6.7b")
31
+ blip2_model_8_bit = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-6.7b", device_map="auto", load_in_8bit=True)
32
+
33
+ # vitgpt_processor = AutoImageProcessor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
34
+ # vitgpt_model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
35
+ # vitgpt_tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
36
+
37
+ coca_model, _, coca_transform = open_clip.create_model_and_transforms(
38
+ model_name="coca_ViT-L-14",
39
+ pretrained="mscoco_finetuned_laion2B-s13B-b90k"
40
+ )
41
+
42
+ device = "cuda" if torch.cuda.is_available() else "cpu"
43
+
44
+ # git_model_base.to(device)
45
+ # blip_model_base.to(device)
46
+ git_model_large_coco.to(device)
47
+ git_model_large_textcaps.to(device)
48
+ blip_model_large.to(device)
49
+ # vitgpt_model.to(device)
50
+ coca_model.to(device)
51
+ # blip2_model.to(device)
52
+
53
+ def generate_caption(processor, model, image, tokenizer=None, use_float_16=False):
54
+ inputs = processor(images=image, return_tensors="pt").to(device)
55
+
56
+ if use_float_16:
57
+ inputs = inputs.to(torch.float16)
58
+
59
+ generated_ids = model.generate(pixel_values=inputs.pixel_values, max_length=50)
60
+
61
+ if tokenizer is not None:
62
+ generated_caption = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
63
+ else:
64
+ generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
65
+
66
+ return generated_caption
67
+
68
+
69
+ def generate_caption_coca(model, transform, image):
70
+ im = transform(image).unsqueeze(0).to(device)
71
+ with torch.no_grad(), torch.cuda.amp.autocast():
72
+ generated = model.generate(im, seq_len=20)
73
+ return open_clip.decode(generated[0].detach()).split("<end_of_text>")[0].replace("<start_of_text>", "")
74
+
75
+
76
+ def generate_captions(image):
77
+ # caption_git_base = generate_caption(git_processor_base, git_model_base, image)
78
+
79
+ caption_git_large_coco = generate_caption(git_processor_large_coco, git_model_large_coco, image)
80
+
81
+ caption_git_large_textcaps = generate_caption(git_processor_large_textcaps, git_model_large_textcaps, image)
82
+
83
+ # caption_blip_base = generate_caption(blip_processor_base, blip_model_base, image)
84
+
85
+ caption_blip_large = generate_caption(blip_processor_large, blip_model_large, image)
86
+
87
+ # caption_vitgpt = generate_caption(vitgpt_processor, vitgpt_model, image, vitgpt_tokenizer)
88
+
89
+ caption_coca = generate_caption_coca(coca_model, coca_transform, image)
90
+
91
+ # caption_blip2 = generate_caption(blip2_processor, blip2_model, image, use_float_16=True).strip()
92
+
93
+ caption_blip2_8_bit = generate_caption(blip2_processor_8_bit, blip2_model_8_bit, image, use_float_16=True).strip()
94
+
95
+ return caption_git_large_coco, caption_git_large_textcaps, caption_blip_large, caption_coca, caption_blip2_8_bit
96
+
97
+
98
+ examples = [["cats.jpg"], ["stop_sign.png"], ["astronaut.jpg"]]
99
+ outputs = [gr.outputs.Textbox(label="Caption generated by GIT-large fine-tuned on COCO"), gr.outputs.Textbox(label="Caption generated by GIT-large fine-tuned on TextCaps"), gr.outputs.Textbox(label="Caption generated by BLIP-large"), gr.outputs.Textbox(label="Caption generated by CoCa"), gr.outputs.Textbox(label="Caption generated by BLIP-2 OPT 6.7b")]
100
+
101
+ title = "Interactive demo: comparing image captioning models"
102
+ description = "Gradio Demo to compare GIT, BLIP, CoCa, and BLIP-2, 4 state-of-the-art vision+language models. To use it, simply upload your image and click 'submit', or click one of the examples to load them. Read more at the links below."
103
+ article = "<p style='text-align: center'><a href='https://huggingface.co/docs/transformers/main/model_doc/blip' target='_blank'>BLIP docs</a> | <a href='https://huggingface.co/docs/transformers/main/model_doc/git' target='_blank'>GIT docs</a></p>"
104
+
105
+ interface = gr.Interface(fn=generate_captions,
106
+ inputs=gr.inputs.Image(type="pil"),
107
+ outputs=outputs,
108
+ examples=examples,
109
+ title=title,
110
+ description=description,
111
+ article=article,
112
+ enable_queue=True)
113
+ interface.launch(debug=True)
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
1
+ git+https://github.com/huggingface/transformers.git@main
2
+ torch
3
+ open_clip_torch
4
+ accelerate
5
+ bitsandbytes