File size: 4,300 Bytes
9491afb
 
 
 
f2953c9
9491afb
52168c1
9491afb
 
52168c1
 
1623898
 
c72498d
 
9491afb
5f72895
9491afb
 
 
5f72895
 
 
9491afb
 
 
c72498d
9491afb
 
 
 
 
 
 
 
 
 
5f72895
 
52168c1
9491afb
 
 
 
 
52168c1
9491afb
 
 
 
 
 
52168c1
9491afb
 
 
 
048d51f
9491afb
 
5f72895
52168c1
1623898
52168c1
 
 
 
1623898
 
5f72895
8a1ff97
048d51f
 
1623898
 
 
 
459deb9
1623898
 
 
 
 
 
 
8a1ff97
048d51f
459deb9
 
1623898
9491afb
f60f93c
 
cd5da21
 
 
1039972
 
 
17dd268
cd5da21
9fbc7a1
17dd268
 
cd5da21
f60f93c
4ecb0d7
1039972
52168c1
9491afb
5f72895
3cc9495
9491afb
 
5f72895
 
 
 
 
 
 
 
 
 
9491afb
2743076
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
import os
import random
from huggingface_hub import InferenceClient
import gradio as gr
#from utils import parse_action, parse_file_content, read_python_module_structure
from datetime import datetime
from PIL import Image
import agent
from models import models
import urllib.request
import uuid
import requests
import io
import chat_models

loaded_model=[]
chat_model=[]
for i,model in enumerate(models):
    loaded_model.append(gr.load(f'models/{model}'))
print (loaded_model)
for i,model_c in enumerate(chat_models.models):
    chat_model.append(model_c)
print (chat_model)
now = datetime.now()
date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")

#client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
history = []

def format_prompt(message, history):
  prompt = "<s>"
  for user_prompt, bot_response in history:
    prompt += f"[INST] {user_prompt} [/INST]"
    prompt += f" {bot_response}</s> "
  prompt += f"[INST] {message} [/INST]"
  return prompt

def run_gpt(in_prompt,history,model_drop):
    client = InferenceClient(chat_models.models[int(model_drop)])
    print(f'history :: {history}')
    prompt=format_prompt(in_prompt,history)
    seed = random.randint(1,1111111111111111)
    print (seed)
    generate_kwargs = dict(
        temperature=1.0,
        max_new_tokens=1048,
        top_p=0.99,
        repetition_penalty=1.0,
        do_sample=True,
        seed=seed,
    )
    content = agent.GENERATE_PROMPT + prompt
    print(content)
    stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False)
    resp = ""
    for response in stream:
        resp += response.token.text
    return resp


def run(purpose,history,model_drop,chat_drop):
    print (history)
    task=None
    #if history:
    #    history=str(history).strip("[]")
    #if not history:
    #    history = ""

    #action_name, action_input = parse_action(line)
    out_prompt = run_gpt(purpose,history,model_drop)
    #yield ([(purpose,out_prompt)],None)
    history.append((purpose,out_prompt))
    yield (history,None)
    #out_img = infer(out_prompt)
    model=loaded_model[int(model_drop)]
    out_img=model(out_prompt)
    print(out_img)
    url=f'https://johann22-chat-diffusion.hf.space/file={out_img}'
    print(url)
    uid = uuid.uuid4()
    #urllib.request.urlretrieve(image, 'tmp.png')
    #out=Image.open('tmp.png')
    r = requests.get(url, stream=True)
    if r.status_code == 200:
        out = Image.open(io.BytesIO(r.content))
        #yield ([(purpose,out_prompt)],out)
        yield (history,out)
    else:
        yield ([(purpose,"an Error occured")],None)

################################################
style="""
.top_head{
    background: no-repeat;
    background-image: url(https://huggingface.co/spaces/johann22/chat-diffusion/resolve/main/image.png);
    background-position-y: bottom;
    height: 180px;
    background-position-x: center;
}

.top_h1{
    color: white!important;
    -webkit-text-stroke-width: medium;
}

"""
with gr.Blocks(css=style) as iface:
    gr.HTML("""<div class="top_head"><center><br><h1 class="top_h1">Mixtral Chat Diffusion</h1><br><h3 class="top_h1">This chatbot will generate images</h3></center></div?""")
    #chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
    with gr.Row():
        with gr.Column(scale=1):
            chatbot=gr.Chatbot(show_copy_button=True, layout='panel')
            msg = gr.Textbox()
            model_drop=gr.Dropdown(label="Diffusion Models", type="index", choices=[m for m in models], value=models[0])
            chat_model_drop=gr.Dropdown(label="Chatbot Models", type="index", choices=[m for m in chat_models.models], value=chat_models.models[0])
            with gr.Group():
                with gr.Row():
                    submit_b = gr.Button()
                    stop_b = gr.Button("Stop")
                    clear = gr.ClearButton([msg, chatbot])
        with gr.Column(scale=2):
            sumbox=gr.Image(label="Image")
    sub_b = submit_b.click(run, [msg,chatbot,model_drop,chat_model_drop],[chatbot,sumbox])
    sub_e = msg.submit(run, [msg, chatbot,model_drop,chat_model_drop], [chatbot,sumbox])
    stop_b.click(None,None,None, cancels=[sub_b,sub_e])
iface.launch()