File size: 4,217 Bytes
7453da0
540056e
 
e8fd75c
 
540056e
 
 
e8fd75c
540056e
e8fd75c
 
540056e
e8fd75c
 
540056e
80aa4e5
508045d
 
 
 
 
455006b
98d08a5
e8fd75c
 
455006b
318d728
 
 
 
455006b
318d728
 
 
455006b
 
efc72c4
508045d
fc0768e
508045d
 
 
540056e
e8fd75c
540056e
e8fd75c
f7b9ef5
e8fd75c
 
20fea69
508045d
e8fd75c
508045d
80aa4e5
 
 
e8fd75c
 
80aa4e5
508045d
23bb5b3
 
b9bed89
 
 
 
e8fd75c
b9bed89
 
 
 
 
 
 
23bb5b3
b9bed89
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import gradio as gr
from gradio_client import Client

#fusecap_client = Client("https://noamrot-fusecap-image-captioning.hf.space/")
fuyu_client = Client("https://adept-fuyu-8b-demo.hf.space/")

def get_caption(image_in):
    
    fuyu_result = fuyu_client.predict(
	    image_in,	# str representing input in 'raw_image' Image component
	    False,	# bool  in 'Enable detailed captioning' Checkbox component
		fn_index=2
    )
    print(f"IMAGE CAPTION: {fuyu_result}")
    return fuyu_result

import re
import torch
from transformers import pipeline

pipe = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-beta", torch_dtype=torch.bfloat16, device_map="auto")

agent_maker_sys = f"""
You are an AI whose job it is to help users create their own chatbots, based on the image description the user provide. In particular, you need to respond succintly in a friendly tone, write a system prompt for an LLM, a catchy title for the chatbot, and a very short example user input. Make sure each part is included.
You'll use the image description to create a chatbot whose personality MSUT reflects informations provided by the user.
For example, if a user says, "a picture of a man in a black suit and tie riding a black dragon", first do a friendly response, then add the title, system prompt, and example user input. Immediately STOP after the example input. It should be EXACTLY in this format:
Sure, I'd be happy to help you build a bot! I'm generating a title, system prompt, and an example input. How do they sound? Feel free to give me feedback!
Title: Dragon Trainer
System prompt: As an LLM, your job is to provide guidance and tips on mastering dragons. Use a friendly and informative tone.
Example input: How can I train a dragon to breathe fire?
Here's another example. If a user types, "a picture of a young girl with long brown hair and black glasses sits on a blanket in a park, reading an open book", respond: 
Sure, I'd be happy to help you build a bot! I'm generating a title, system prompt, and an example input. How do they sound? Feel free to give me feedback!
Title: Book Buddy
System prompt: Your job as an LLM is to provide book recommendations based on the preferences of the user. You are a friendly and knowledgeable librarian who loves to read. Be helpful and encouraging, but also make sure your suggestions are age-appropriate for the user in the image. 
Example input: What books would you recommend for a 9-year-old girl who loves animals and adventure?
"""

instruction = f"""
<|system|>
{agent_maker_sys}</s>
<|user|>
"""

def infer(image_in):
    gr.Info("Getting image caption with Fuyu...")
    user_prompt = get_caption(image_in)
    
    prompt = f"{instruction.strip()}\n{user_prompt}</s>"    
    #print(f"PROMPT: {prompt}")
    
    gr.Info("Building a system according to the image caption ...")
    outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
    

    pattern = r'\<\|system\|\>(.*?)\<\|assistant\|\>'
    cleaned_text = re.sub(pattern, '', outputs[0]["generated_text"], flags=re.DOTALL)
    
    print(f"SUGGESTED LLM: {cleaned_text}")
    
    return cleaned_text

title = f"LLM Agent from a Picture",
description = f"Get a LLM system prompt from a picture so you can use it in <a href='https://huggingface.co/spaces/abidlabs/GPT-Baker'>GPT-Baker</a>."

css = """
#col-container{
    margin: 0 auto;
    max-width: 640px;
    text-align: left;
}
"""

with gr.Blocks(css=css) as demo:
    with gr.Column(elem_id="col-container"):
        gr.HTML(f"""
        <h2 style="text-align: center;">LLM Agent from a Picture</h2>
        <p style="text-align: center;">{description}</p>
        """)
    with gr.Row():
        with gr.Column():
            image_in = gr.Image(
                label = "Image reference",
                type = "filepath"
            )
            submit_btn = gr.Button("Make LLM system from my pic !")
        with gr.Column():
            result = gr.Textbox(
                label ="Suggested System"
            )

    submit_btn.click(
        fn = infer,
        inputs = [
            image_in
        ],
        outputs =[
            result
        ]
    )

demo.queue().launch()