prithivMLmods commited on
Commit
ceebfed
β€’
1 Parent(s): e445444

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -150
app.py CHANGED
@@ -1,155 +1,49 @@
1
- import gradio as gr
2
- from gradio_client import Client
3
  from huggingface_hub import InferenceClient
4
- import random
5
- ss_client = Client("https://omnibus-html-image-current-tab.hf.space/")
6
-
7
- models=[
8
- "mistralai/Mistral-7B-Instruct-v0.2",
9
- "mistralai/Mixtral-8x7B-Instruct-v0.1"
10
- ]
11
- clients=[
12
- InferenceClient(models[0]),
13
- InferenceClient(models[1]),
14
- ]
15
-
16
- VERBOSE=False
17
-
18
- def load_models(inp):
19
- if VERBOSE==True:
20
- print(type(inp))
21
- print(inp)
22
- print(models[inp])
23
-
24
- return gr.update(label=models[inp])
25
 
26
- def format_prompt(message, history, cust_p):
27
- prompt = ""
28
- if history:
29
- for user_prompt, bot_response in history:
30
- prompt += f"<start_of_turn>user{user_prompt}<end_of_turn>"
31
- prompt += f"<start_of_turn>model{bot_response}<end_of_turn>"
32
- if VERBOSE==True:
33
- print(prompt)
34
 
35
- prompt+=cust_p.replace("USER_INPUT",message)
 
 
 
 
 
 
 
36
  return prompt
37
 
38
- def chat_inf(system_prompt,prompt,history,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,cust_p):
39
- #token max=8192
40
- print(client_choice)
41
- hist_len=0
42
- client=clients[int(client_choice)-1]
43
- if not history:
44
- history = []
45
- hist_len=0
46
- if not memory:
47
- memory = []
48
- mem_len=0
49
- if memory:
50
- for ea in memory[0-chat_mem:]:
51
- hist_len+=len(str(ea))
52
- in_len=len(system_prompt+prompt)+hist_len
53
-
54
- if (in_len+tokens) > 8000:
55
- history.append((prompt,"Wait, that's too many tokens, please reduce the 'Chat Memory' value, or reduce the 'Max new tokens' value"))
56
- yield history,memory
57
- else:
58
- generate_kwargs = dict(
59
- temperature=temp,
60
- max_new_tokens=tokens,
61
- top_p=top_p,
62
- repetition_penalty=rep_p,
63
- do_sample=True,
64
- seed=seed,
65
- )
66
- if system_prompt:
67
- formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", memory[0-chat_mem:],cust_p)
68
- else:
69
- formatted_prompt = format_prompt(prompt, memory[0-chat_mem:],cust_p)
70
- stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
71
- output = ""
72
- for response in stream:
73
- output += response.token.text
74
- yield [(prompt,output)],memory
75
- history.append((prompt,output))
76
- memory.append((prompt,output))
77
- yield history,memory
78
-
79
- if VERBOSE==True:
80
- print("\n######### HIST "+str(in_len))
81
- print("\n######### TOKENS "+str(tokens))
82
-
83
- def get_screenshot(chat: list,height=5000,width=600,chatblock=[],theme="light",wait=3000,header=True):
84
- print(chatblock)
85
- tog = 0
86
- if chatblock:
87
- tog = 3
88
- result = ss_client.predict(str(chat),height,width,chatblock,header,theme,wait,api_name="/run_script")
89
- out = f'https://omnibus-html-image-current-tab.hf.space/file={result[tog]}'
90
- print(out)
91
- return out
92
-
93
- def clear_fn():
94
- return None,None,None,None
95
- rand_val=random.randint(1,1111111111111111)
96
-
97
- def check_rand(inp,val):
98
- if inp==True:
99
- return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=random.randint(1,1111111111111111))
100
- else:
101
- return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=int(val))
102
-
103
- with gr.Blocks(theme="bethecloud/storj_theme") as app:
104
- memory=gr.State()
105
- gr.HTML("""""")
106
- chat_b = gr.Chatbot(height=500)
107
- with gr.Group():
108
- with gr.Row():
109
- with gr.Column(scale=3):
110
- inp = gr.Textbox(label="Prompt")
111
- sys_inp = gr.Textbox(label="System Prompt (optional)")
112
- with gr.Accordion("Prompt Format",open=False):
113
- custom_prompt=gr.Textbox(label="Modify Prompt Format", info="For testing purposes. 'USER_INPUT' is where 'SYSTEM_PROMPT, PROMPT' will be placed", lines=3,value="<start_of_turn>userUSER_INPUT<end_of_turn><start_of_turn>model")
114
- with gr.Row():
115
- with gr.Column(scale=2):
116
- btn = gr.Button("Chat")
117
- with gr.Column(scale=1):
118
- with gr.Group():
119
- stop_btn=gr.Button("Stop")
120
- clear_btn=gr.Button("Clear")
121
- client_choice=gr.Dropdown(label="Models",type='index',choices=[c for c in models],value=models[0],interactive=True)
122
- with gr.Column(scale=1):
123
- with gr.Group():
124
- rand = gr.Checkbox(label="Random Seed", value=True)
125
- seed=gr.Slider(label="Seed", minimum=1, maximum=1111111111111111,step=1, value=rand_val)
126
- tokens = gr.Slider(label="Max new tokens",value=1600,minimum=0,maximum=8000,step=64,interactive=True, visible=True,info="The maximum number of tokens")
127
- temp=gr.Slider(label="Temperature",step=0.01, minimum=0.01, maximum=1.0, value=0.49)
128
- top_p=gr.Slider(label="Top-P",step=0.01, minimum=0.01, maximum=1.0, value=0.49)
129
- rep_p=gr.Slider(label="Repetition Penalty",step=0.01, minimum=0.1, maximum=2.0, value=0.99)
130
- chat_mem=gr.Number(label="Chat Memory", info="Number of previous chats to retain",value=4)
131
- with gr.Accordion(label="Screenshot",open=False):
132
- with gr.Row():
133
- with gr.Column(scale=3):
134
- im_btn=gr.Button("Screenshot")
135
- img=gr.Image(type='filepath')
136
- with gr.Column(scale=1):
137
- with gr.Row():
138
- im_height=gr.Number(label="Height",value=5000)
139
- im_width=gr.Number(label="Width",value=500)
140
- wait_time=gr.Number(label="Wait Time",value=3000)
141
- theme=gr.Radio(label="Theme", choices=["light","dark"],value="light")
142
- chatblock=gr.Dropdown(label="Chatblocks",info="Choose specific blocks of chat",choices=[c for c in range(1,40)],multiselect=True)
143
-
144
-
145
- client_choice.change(load_models,client_choice,[chat_b])
146
- app.load(load_models,client_choice,[chat_b])
147
-
148
- im_go=im_btn.click(get_screenshot,[chat_b,im_height,im_width,chatblock,theme,wait_time],img)
149
-
150
- chat_sub=inp.submit(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt],[chat_b,memory])
151
- go=btn.click(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem,custom_prompt],[chat_b,memory])
152
-
153
- stop_btn.click(None,None,None,cancels=[go,im_go,chat_sub])
154
- clear_btn.click(clear_fn,None,[inp,sys_inp,chat_b,memory])
155
- app.queue(default_concurrency_limit=10).launch()
 
 
 
1
  from huggingface_hub import InferenceClient
2
+ import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
+ client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
 
 
 
 
 
 
 
5
 
6
+ def format_prompt(message, history, system_prompt=None):
7
+ prompt = "<s>"
8
+ for user_prompt, bot_response in history:
9
+ prompt += f"[INST] {user_prompt} [/INST]"
10
+ prompt += f" {bot_response}</s> "
11
+ if system_prompt:
12
+ prompt += f"[SYS] {system_prompt} [/SYS]"
13
+ prompt += f"[INST] {message} [/INST]"
14
  return prompt
15
 
16
+ def generate(
17
+ prompt, history, system_prompt=None, temperature=0.2, max_new_tokens=1024, top_p=0.95, repetition_penalty=1.0,
18
+ ):
19
+ temperature = float(temperature)
20
+ if temperature < 1e-2:
21
+ temperature = 1e-2
22
+ top_p = float(top_p)
23
+
24
+ generate_kwargs = dict(
25
+ temperature=temperature,
26
+ max_new_tokens=max_new_tokens,
27
+ top_p=top_p,
28
+ repetition_penalty=repetition_penalty,
29
+ do_sample=True,
30
+ seed=42,
31
+ )
32
+
33
+ formatted_prompt = format_prompt(prompt, history, system_prompt)
34
+
35
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
36
+ output = ""
37
+
38
+ for response in stream:
39
+ output += response.token.text
40
+ yield output
41
+ return output
42
+
43
+ demo = gr.ChatInterface(
44
+ fn=generate,
45
+ title="",
46
+ theme="bethecloud/storj_theme"
47
+ )
48
+
49
+ demo.queue().launch(show_api=False)