File size: 7,863 Bytes
9491afb
 
 
 
f2953c9
9491afb
52168c1
9491afb
 
52168c1
 
1623898
 
5b16b6d
c72498d
47fef84
 
9491afb
5f72895
9491afb
 
 
523ea6f
5f72895
 
9491afb
 
 
c72498d
9491afb
 
bcc58dd
0593151
94e12b2
1d0cf9b
 
bcc58dd
 
d02e668
0593151
 
97c0552
61000a5
 
 
c80f0d8
 
 
 
 
 
 
 
 
bcc58dd
c80f0d8
a73a75f
c80f0d8
d02e668
b7701f0
d02e668
f969f8c
d02e668
 
 
b7701f0
f969f8c
 
a9ba1be
 
1e317fe
 
 
 
9491afb
d02e668
5b16b6d
52168c1
d02e668
1d0cf9b
 
9491afb
 
 
52168c1
9491afb
 
 
 
 
 
52168c1
9491afb
 
 
 
048d51f
fcc8348
1d0cf9b
fcc8348
 
d02e668
fcc8348
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9491afb
 
1d0cf9b
52168c1
1d0cf9b
9b203df
 
1d0cf9b
e786e50
fcc8348
 
9b203df
e786e50
1d0cf9b
e786e50
 
1d0cf9b
040cd5b
 
6c45b6a
 
 
9b203df
e786e50
 
 
 
 
 
 
 
 
 
040cd5b
e786e50
040cd5b
e786e50
 
1d0cf9b
9b203df
040cd5b
fcc8348
9491afb
f60f93c
 
cd5da21
 
 
1039972
 
 
17dd268
cd5da21
9fbc7a1
17dd268
 
cd5da21
f60f93c
2642660
1039972
52168c1
9491afb
5f72895
3cc9495
91705ce
 
9491afb
bcc58dd
 
 
 
 
 
 
 
 
 
 
5f72895
 
 
 
 
0593151
5f72895
 
0593151
bcc58dd
0593151
d5f6a98
 
9491afb
2642660
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
import os
import random
from huggingface_hub import InferenceClient
import gradio as gr
#from utils import parse_action, parse_file_content, read_python_module_structure
from datetime import datetime
from PIL import Image
import agent
from models import models
import urllib.request
import uuid
import requests
import io
from chat_models import models as c_models


    
loaded_model=[]
chat_model=[]
for i,model in enumerate(models):
    loaded_model.append(gr.load(f'models/{model}'))
print (loaded_model)
for i,model_c in enumerate(c_models):
    chat_model.append(model_c)
print (chat_model)
now = datetime.now()
date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")

#client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
history = []

def gen_from_infer(purpose,history,image,model_drop,chat_drop,choice,seed,im_seed):
    #out_img = infer(out_prompt)
    history.clear()
    if seed == 0:
        seed = random.randint(1,1111111111111111)
    if im_seed == 0:
        im_seed = random.randint(1,1111111111111111)        
    out_prompt=generate(purpose,history,chat_drop,seed)
    history.append((purpose,out_prompt))
    yield (history,None)        
    infer_model = models[int(model_drop)]
    print (infer_model)
    infer=InferenceClient(f'{infer_model}')
    print (infer)
    
    out_img=infer.text_to_image(
        prompt=out_prompt,
        negative_prompt=None,
        height=512,
        width=512,
        num_inference_steps=None,
        guidance_scale=None,
        model=None,
        seed=im_seed,
        )
    yield (history,out_img)

def format_prompt(message, history,seed):
    #print (f'HISTORY ::: {history}')
    prompt = "<s>"
    t=False
    for user_prompt, bot_response in history:
        prompt += f"[INST] {user_prompt} [/INST]"
        prompt += f" {bot_response}</s> "
        print(f'MESSAGE :: {message}, USER_PROMPT :: {user_prompt}')
        if user_prompt == message:
            t=True
    if t==True:        
        prompt = "<s>"+f"[INST] {message} [/INST]"
        return prompt
    else:
        prompt += f"[INST] {message} [/INST]"
        return prompt

def run_gpt(in_prompt,history,model_drop,seed):
    client = InferenceClient(c_models[int(model_drop)])
    print(f'history :: {history}')
    prompt=format_prompt(in_prompt,history,seed)
    if seed == 0:
        seed = random.randint(1,1111111111111111)
    print (seed)
    generate_kwargs = dict(
        temperature=1.0,
        max_new_tokens=1048,
        top_p=0.99,
        repetition_penalty=1.0,
        do_sample=True,
        seed=seed,
    )
    content = agent.GENERATE_PROMPT + prompt
    print(content)
    stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False)
    resp = ""
    for response in stream:
        resp += response.token.text
    return resp
    
def run_idefics(in_prompt,history,model_drop,seed):
    client = InferenceClient("HuggingFaceM4/idefics-9b-instruct")
    print(f'history :: {history}')
    prompt=format_prompt(in_prompt,history,seed)
    seed = random.randint(1,1111111111111111)
    print (seed)
    generate_kwargs = dict(
        temperature=1.0,
        max_new_tokens=1048,
        top_p=0.99,
        repetition_penalty=1.0,
        do_sample=True,
        seed=seed,
    )
    content = agent.GENERATE_PROMPT + prompt
    print(content)
    stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False)
    resp = ""
    for response in stream:
        resp += response.token.text
    print (resp)
    return resp


def generate(purpose,history,chat_drop,seed):
    print (history)
    out_prompt = run_gpt(purpose,history,chat_drop,seed)
    return out_prompt

def describe(purpose,history,image,chat_drop,seed):
    print (history)
    purpose=f"{purpose},![]({image})"
    out_prompt = run_idefics(purpose,history,chat_drop)
    return out_prompt
   
def run(purpose,history,image,model_drop,chat_drop,choice,seed):
    if choice == "Generate":
        #out_img = infer(out_prompt)
        out_prompt=generate(purpose,history,chat_drop,seed)
        history.append((purpose,out_prompt))
        yield (history,None)        
        model=loaded_model[int(model_drop)]

        out_img=model(out_prompt)
        #return (history,None)
        print(out_img)
        url=f'https://johann22-chat-diffusion-describe.hf.space/file={out_img}'
        print(url)
        uid = uuid.uuid4()
        #urllib.request.urlretrieve(image, 'tmp.png')
        #out=Image.open('tmp.png')
        r = requests.get(url, stream=True)
        if r.status_code == 200:
            out = Image.open(io.BytesIO(r.content))
            #yield ([(purpose,out_prompt)],out)
            yield (history,out)
        else:
            yield ([(purpose,"an Error occured")],None)
    if choice == "Describe":
        #out_img = infer(out_prompt)
        out_prompt=describe(purpose,history,image,model_drop,chat_drop,seed)
        history.append((purpose,out_prompt))
        yield (history,None)        
        
################################################
style="""
.top_head{
    background: no-repeat;
    background-image: url(https://huggingface.co/spaces/johann22/chat-diffusion/resolve/main/image.png);
    background-position-y: bottom;
    height: 180px;
    background-position-x: center;
}

.top_h1{
    color: white!important;
    -webkit-text-stroke-width: medium;
}

"""
with gr.Blocks(css=style) as iface:
    gr.HTML("""<div class="top_head"><center><br><h1 class="top_h1">Mixtral Chat Diffusion</h1><br><h3 class="top_h1">This chatbot will generate images</h3></center></div?""")
    #chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
    with gr.Row():
        with gr.Column(scale=1):
            chatbot=gr.Chatbot(show_copy_button=True, layout='panel')
            with gr.Row():
                agent_choice = gr.Radio(choices=["Generate","Describe"],value="Generate")
            msg = gr.Textbox()
            with gr.Accordion("Controls", open=False):
                model_drop=gr.Dropdown(label="Diffusion Models", type="index", choices=[m for m in models], value=models[0])
                chat_model_drop=gr.Dropdown(label="Chatbot Models", type="index", choices=[m for m in c_models], value=c_models[0])
                chat_seed=gr.Slider(label="Prompt Seed", minimum=0,maximum=1000000000000,
                                    value=random.randint(1,1000000000000),step=1,
                                    interactive=True,
                                   info="Set Seed to 0 to randomize the session")
                image_seed=gr.Slider(label="Image Seed", minimum=0,maximum=1000000000000,
                                    value=random.randint(1,1000000000000),step=1,
                                    interactive=True,
                                   info="Set Seed to 0 to randomize the session")
            with gr.Group():
                with gr.Row():
                    submit_b = gr.Button()
                    stop_b = gr.Button("Stop")
                    clear = gr.ClearButton([msg, chatbot])
            test_btn = gr.Button("Test")
        with gr.Column(scale=2):
            sumbox=gr.Image(label="Image")

    run_test = test_btn.click(gen_from_infer, [msg,chatbot,sumbox,model_drop,chat_model_drop,agent_choice,chat_seed,image_seed],[chatbot,sumbox],concurrency_limit=20)
    
    sub_b = submit_b.click(run, [msg,chatbot,sumbox,model_drop,chat_model_drop,agent_choice,chat_seed],[chatbot,sumbox],concurrency_limit=20)
    sub_e = msg.submit(run, [msg, chatbot,sumbox,model_drop,chat_model_drop,agent_choice,chat_seed], [chatbot,sumbox],concurrency_limit=20)
    stop_b.click(None,None,None, cancels=[sub_b,sub_e])
iface.queue(default_concurrency_limit=None).launch()