Update app.py
Browse files
app.py
CHANGED
@@ -16,12 +16,14 @@ models=[
|
|
16 |
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
17 |
"mistralai/Mixtral-8x7B-Instruct-v0.2",
|
18 |
]
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
]
|
|
|
|
|
25 |
|
26 |
VERBOSE=False
|
27 |
|
@@ -46,11 +48,11 @@ def format_prompt(message, history, cust_p):
|
|
46 |
prompt+=cust_p.replace("USER_INPUT",message)
|
47 |
return prompt
|
48 |
|
49 |
-
def chat_inf(system_prompt,prompt,history,memory,
|
50 |
#token max=8192
|
51 |
-
print(
|
52 |
hist_len=0
|
53 |
-
client=
|
54 |
if not history:
|
55 |
history = []
|
56 |
hist_len=0
|
@@ -112,6 +114,7 @@ def check_rand(inp,val):
|
|
112 |
return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=int(val))
|
113 |
|
114 |
with gr.Blocks() as app:
|
|
|
115 |
memory=gr.State()
|
116 |
gr.HTML("""<center><h1 style='font-size:xx-large;'>Huggingface Hub InferenceClient</h1><br><h3>Chatbot's</h3></center>""")
|
117 |
chat_b = gr.Chatbot(height=500)
|
@@ -153,12 +156,12 @@ with gr.Blocks() as app:
|
|
153 |
chatblock=gr.Dropdown(label="Chatblocks",info="Choose specific blocks of chat",choices=[c for c in range(1,40)],multiselect=True)
|
154 |
|
155 |
|
156 |
-
client_choice.change(load_models,client_choice,[chat_b])
|
157 |
-
app.load(load_models,client_choice,[chat_b])
|
158 |
|
159 |
im_go=im_btn.click(get_screenshot,[chat_b,im_height,im_width,chatblock,theme,wait_time],img)
|
160 |
|
161 |
-
chat_sub=inp.submit(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,
|
162 |
go=btn.click(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt],[chat_b,memory])
|
163 |
|
164 |
stop_btn.click(None,None,None,cancels=[go,im_go,chat_sub])
|
|
|
16 |
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
17 |
"mistralai/Mixtral-8x7B-Instruct-v0.2",
|
18 |
]
|
19 |
+
|
20 |
+
def load_models(inp):
|
21 |
+
print(type(inp))
|
22 |
+
print(inp)
|
23 |
+
print(models[inp])
|
24 |
+
model_state= InferenceClient(models[inp[z]])
|
25 |
+
out_box=(gr.update(label=models[inp]))
|
26 |
+
return out_box, model_state
|
27 |
|
28 |
VERBOSE=False
|
29 |
|
|
|
48 |
prompt+=cust_p.replace("USER_INPUT",message)
|
49 |
return prompt
|
50 |
|
51 |
+
def chat_inf(system_prompt,prompt,history,memory,model_state,seed,temp,tokens,top_p,rep_p,chat_mem,cust_p):
|
52 |
#token max=8192
|
53 |
+
print(model_state)
|
54 |
hist_len=0
|
55 |
+
client=model_state
|
56 |
if not history:
|
57 |
history = []
|
58 |
hist_len=0
|
|
|
114 |
return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=int(val))
|
115 |
|
116 |
with gr.Blocks() as app:
|
117 |
+
model_state=gr.State()
|
118 |
memory=gr.State()
|
119 |
gr.HTML("""<center><h1 style='font-size:xx-large;'>Huggingface Hub InferenceClient</h1><br><h3>Chatbot's</h3></center>""")
|
120 |
chat_b = gr.Chatbot(height=500)
|
|
|
156 |
chatblock=gr.Dropdown(label="Chatblocks",info="Choose specific blocks of chat",choices=[c for c in range(1,40)],multiselect=True)
|
157 |
|
158 |
|
159 |
+
client_choice.change(load_models,client_choice,[chat_b,model_state])
|
160 |
+
app.load(load_models,client_choice,[chat_b,model_state])
|
161 |
|
162 |
im_go=im_btn.click(get_screenshot,[chat_b,im_height,im_width,chatblock,theme,wait_time],img)
|
163 |
|
164 |
+
chat_sub=inp.submit(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,model_state,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt],[chat_b,memory])
|
165 |
go=btn.click(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt],[chat_b,memory])
|
166 |
|
167 |
stop_btn.click(None,None,None,cancels=[go,im_go,chat_sub])
|