File size: 4,036 Bytes
9491afb
 
 
 
f2953c9
9491afb
52168c1
9491afb
 
52168c1
 
1623898
 
9491afb
 
 
 
a10736f
9491afb
 
 
2743076
 
 
 
 
 
 
52168c1
 
 
9491afb
 
 
 
 
 
 
 
 
 
 
 
 
52168c1
 
 
 
 
 
 
9491afb
 
 
 
 
52168c1
9491afb
 
 
 
 
52168c1
 
9491afb
52168c1
 
 
 
 
 
9491afb
 
 
 
 
 
 
 
52168c1
1623898
 
 
 
52168c1
 
 
 
1623898
 
 
 
 
 
 
 
9491afb
1623898
 
 
 
52168c1
1623898
 
 
 
 
 
 
4833e32
9491afb
1623898
9491afb
 
 
 
 
4833e32
52168c1
9491afb
 
 
 
 
 
 
 
 
 
52168c1
9491afb
 
 
 
 
2743076
52168c1
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
import os
import random
from huggingface_hub import InferenceClient
import gradio as gr
#from utils import parse_action, parse_file_content, read_python_module_structure
from datetime import datetime
from PIL import Image
import agent
from models import models
import urllib.request
import uuid
import requests
import io
loaded_model=[]
for i,model in enumerate(models):
    loaded_model.append(gr.load(f'models/{model}'))
print (loaded_model)

now = datetime.now()
date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")

client = InferenceClient(
    "mistralai/Mixtral-8x7B-Instruct-v0.1"
)

############################################
model = gr.load("models/stabilityai/sdxl-turbo")

VERBOSE = True
MAX_HISTORY = 10000
#MODEL = "gpt-3.5-turbo"  # "gpt-4"
history = []

def infer(txt):
    return (model(txt))

def format_prompt(message, history):
  prompt = "<s>"
  for user_prompt, bot_response in history:
    prompt += f"[INST] {user_prompt} [/INST]"
    prompt += f" {bot_response}</s> "
  prompt += f"[INST] {message} [/INST]"
  return prompt



def run_gpt(
    in_prompt,
    history,
):
    print(f'history :: {history}')
    prompt=format_prompt(in_prompt,history)
    seed = random.randint(1,1111111111111111)
    print (seed)
    generate_kwargs = dict(
        temperature=1.0,
        max_new_tokens=1048,
        top_p=0.99,
        repetition_penalty=1.0,
        do_sample=True,
        seed=seed,
    )

    
    content = agent.GENERATE_PROMPT + prompt

    print(content)
    
    #formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
    #formatted_prompt = format_prompt(f'{content}', history)

    stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False)
    resp = ""
    for response in stream:
        resp += response.token.text
    return resp


def run(purpose,history,model_drop):
    print (history)
    #print(purpose)
    #print(hist)
    task=None
    directory="./"
    #if history:
    #    history=str(history).strip("[]")
    #if not history:
    #    history = ""

    #action_name, action_input = parse_action(line)
    out_prompt = run_gpt(
        purpose,
        history,
        
        )

    yield ("",[(purpose,out_prompt)],None)
    #out_img = infer(out_prompt)
    model=loaded_model[int(model_drop)]
    out_img=model(out_prompt)
    print(out_img)
    url=f'https://johann22-mixtral-chat-diffusion.hf.space/file={out_img}'
    print(url)
    uid = uuid.uuid4()
    #urllib.request.urlretrieve(image, 'tmp.png')
    #out=Image.open('tmp.png')
    r = requests.get(url, stream=True)
    if r.status_code == 200:
        out = Image.open(io.BytesIO(r.content))
        yield ([(purpose,out_prompt)],out)
        #return ("", [(purpose,history)])



################################################

with gr.Blocks() as iface:
    gr.HTML("""<center><h1>Mixtral Chat Diffusion</h1><br><h3>This chatbot will generate images</h3></center>""")
    #chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
    with gr.Row():
        with gr.Column():
            chatbot=gr.Chatbot()
            msg = gr.Textbox()
            model_drop=gr.Dropdown(label="Diffusion Models", type="index", choices=[m for m in models], value=models[0])
    with gr.Row():
        submit_b = gr.Button()
        stop_b = gr.Button("Stop")
        clear = gr.ClearButton([msg, chatbot])
    
    sumbox=gr.Image(label="Image")

        
    sub_b = submit_b.click(run, [msg,chatbot,model_drop],[msg,chatbot,sumbox])
    sub_e = msg.submit(run, [msg, chatbot,model_drop], [msg, chatbot,sumbox])
    stop_b.click(None,None,None, cancels=[sub_b,sub_e])
iface.launch()
'''
gr.ChatInterface(
    fn=run,
    chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
    title="Mixtral 46.7B\nMicro-Agent\nInternet Search <br> development test",
    examples=examples,
    concurrency_limit=20,
).launch(show_api=False)
'''