File size: 2,493 Bytes
aac5437
95efa40
 
 
 
 
 
8334aa7
 
418a286
cfadd82
418a286
cfadd82
 
8334aa7
 
95efa40
 
 
 
 
 
8606aa2
cfadd82
8334aa7
cfadd82
418a286
95efa40
 
418a286
95efa40
85f65db
6bf02ba
85f65db
 
 
05aaa8f
95efa40
 
cfadd82
4d2d71e
cfadd82
 
418a286
 
 
95efa40
cfadd82
85f65db
95efa40
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import os
import gradio as gr

from haystack.nodes import TransformersImageToText
from haystack.nodes import PromptNode, PromptTemplate
from haystack import Pipeline

description = """
# Captionate ✨ 📸
### Create Instagram captions for your pics! 

`OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5` and `tiiuae/falcon-7b-instruct` perform the best but try out different model to see how they react to the same prompt.

Built by [Bilge Yucel](https://twitter.com/bilgeycl) using [Haystack](https://github.com/deepset-ai/haystack) 💙
"""

image_to_text = TransformersImageToText(
  model_name_or_path="nlpconnect/vit-gpt2-image-captioning",
  progress_bar=True
)

prompt_template = PromptTemplate(prompt="""
You will receive a descriptive text of a photo.
Try to come up with a nice Instagram caption that has a phrase rhyming with the text. Include emojis to the caption.
                                                                                                                                   
Descriptive text: {documents};
Instagram Caption:
""")

hf_api_key = "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"

def generate_caption(image_file_paths, model_name):
    captioning_pipeline = Pipeline()
    prompt_node = PromptNode(model_name_or_path=model_name, api_key=hf_api_key, default_prompt_template=prompt_template, model_kwargs={"trust_remote_code":True})
    captioning_pipeline.add_node(component=image_to_text, name="image_to_text", inputs=["File"])
    captioning_pipeline.add_node(component=prompt_node, name="prompt_node", inputs=["image_to_text"])
    caption = captioning_pipeline.run(file_paths=[image_file_paths])
    return caption["results"][0]

with gr.Blocks(theme="monochrome") as demo:
    gr.Markdown(value=description)
    with gr.Row():
        image = gr.Image(type="filepath")
        with gr.Column():
            model_name = gr.Dropdown(["OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5", "tiiuae/falcon-7b-instruct", "tiiuae/falcon-7b", "EleutherAI/gpt-neox-20b", "HuggingFaceH4/starchat-beta", "bigscience/bloom"], value="OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5", label="Choose your model!")
            gr.Examples(["./whale.png"], examples_per_page=1, inputs=image, label="Click on any example") 
    submit_btn = gr.Button("✨ Captionate ✨")
    caption = gr.Textbox(label="Caption", show_copy_button=True)
    submit_btn.click(fn=generate_caption, inputs=[image, model_name], outputs=[caption])

if __name__ == "__main__":
    demo.launch()