BlinkDL commited on
Commit
c197986
1 Parent(s): fcdf9f0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +214 -35
app.py CHANGED
@@ -1,5 +1,5 @@
1
  import gradio as gr
2
- import os, gc, torch
3
  from datetime import datetime
4
  from huggingface_hub import hf_hub_download
5
  from pynvml import *
@@ -14,6 +14,7 @@ os.environ["RWKV_CUDA_ON"] = '1' # if '1' then use CUDA kernel for seq mode (muc
14
  from rwkv.model import RWKV
15
  model_path = hf_hub_download(repo_id="BlinkDL/rwkv-4-raven", filename=f"{title}.pth")
16
  model = RWKV(model=model_path, strategy='cuda fp16i8 *8 -> cuda fp16')
 
17
  from rwkv.utils import PIPELINE, PIPELINE_ARGS
18
  pipeline = PIPELINE(model, "20B_tokenizer.json")
19
 
@@ -57,9 +58,6 @@ def evaluate(
57
  input = input.strip()
58
  ctx = generate_prompt(instruction, input)
59
 
60
- gpu_info = nvmlDeviceGetMemoryInfo(gpu_h)
61
- print(f'vram {gpu_info.total} used {gpu_info.used} free {gpu_info.free}')
62
-
63
  all_tokens = []
64
  out_last = 0
65
  out_str = ''
@@ -84,41 +82,222 @@ def evaluate(
84
  out_str += tmp
85
  yield out_str.strip()
86
  out_last = i + 1
 
 
 
 
87
  gc.collect()
88
  torch.cuda.empty_cache()
89
  yield out_str.strip()
90
 
91
  examples = [
92
- ["Tell me about ravens.", "", 150, 1.0, 0.5, 0.4, 0.4],
93
- ["Write a python function to mine 1 BTC, with details and comments.", "", 150, 1.0, 0.5, 0.2, 0.2],
94
- ["Write a song about ravens.", "", 150, 1.0, 0.5, 0.4, 0.4],
95
- ["Explain the following metaphor: Life is like cats.", "", 150, 1.0, 0.5, 0.4, 0.4],
96
- ["Write a story using the following information", "A man named Alex chops a tree down", 150, 1.0, 0.5, 0.4, 0.4],
97
- ["Generate a list of adjectives that describe a person as brave.", "", 150, 1.0, 0.5, 0.4, 0.4],
98
- ["You have $100, and your goal is to turn that into as much money as possible with AI and Machine Learning. Please respond with detailed plan.", "", 150, 1.0, 0.5, 0.4, 0.4],
99
  ]
100
 
101
- g = gr.Interface(
102
- fn=evaluate,
103
- inputs=[
104
- gr.components.Textbox(lines=2, label="Instruction", value="Tell me about ravens."),
105
- gr.components.Textbox(lines=2, label="Input", placeholder="none"),
106
- gr.components.Slider(minimum=10, maximum=200, step=10, value=150), # token_count
107
- gr.components.Slider(minimum=0.2, maximum=2.0, step=0.1, value=1.0), # temperature
108
- gr.components.Slider(minimum=0, maximum=1, step=0.05, value=0.5), # top_p
109
- gr.components.Slider(0.0, 1.0, step=0.1, value=0.4), # presencePenalty
110
- gr.components.Slider(0.0, 1.0, step=0.1, value=0.4), # countPenalty
111
- ],
112
- outputs=[
113
- gr.inputs.Textbox(
114
- lines=5,
115
- label="Output",
116
- )
117
- ],
118
- title=f"🐦Raven - {title}",
119
- description="Raven is [RWKV 7B](https://github.com/BlinkDL/ChatRWKV) 100% RNN [RWKV-LM](https://github.com/BlinkDL/RWKV-LM) finetuned to follow instructions. *** Please try examples first (bottom of page) *** (edit them to use your question). Demo limited to ctxlen 1024. It is finetuned on [Stanford Alpaca](https://github.com/tatsu-lab/stanford_alpaca), codealpaca and more. For best results, *** keep you prompt short and clear ***.",
120
- examples=examples,
121
- cache_examples=False,
122
- )
123
- g.queue(concurrency_count=1, max_size=10)
124
- g.launch(share=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import os, gc, copy, torch
3
  from datetime import datetime
4
  from huggingface_hub import hf_hub_download
5
  from pynvml import *
 
14
  from rwkv.model import RWKV
15
  model_path = hf_hub_download(repo_id="BlinkDL/rwkv-4-raven", filename=f"{title}.pth")
16
  model = RWKV(model=model_path, strategy='cuda fp16i8 *8 -> cuda fp16')
17
+ # model = RWKV(model='D:/ChatRWKV/RWKV-4-Raven-7B-v9-Eng99%-Other1%-20230412-ctx8192.pth', strategy='cuda fp16i8 *10 -> cuda fp16')
18
  from rwkv.utils import PIPELINE, PIPELINE_ARGS
19
  pipeline = PIPELINE(model, "20B_tokenizer.json")
20
 
 
58
  input = input.strip()
59
  ctx = generate_prompt(instruction, input)
60
 
 
 
 
61
  all_tokens = []
62
  out_last = 0
63
  out_str = ''
 
82
  out_str += tmp
83
  yield out_str.strip()
84
  out_last = i + 1
85
+
86
+ gpu_info = nvmlDeviceGetMemoryInfo(gpu_h)
87
+ print(f'vram {gpu_info.total} used {gpu_info.used} free {gpu_info.free}')
88
+
89
  gc.collect()
90
  torch.cuda.empty_cache()
91
  yield out_str.strip()
92
 
93
  examples = [
94
+ ["Tell me about ravens.", "", 150, 1.2, 0.5, 0.3, 0.3],
95
+ ["Write a python function to mine 1 BTC, with details and comments.", "", 150, 1.2, 0.5, 0.3, 0.3],
96
+ ["Write a song about ravens.", "", 150, 1.2, 0.5, 0.3, 0.3],
97
+ ["Explain the following metaphor: Life is like cats.", "", 150, 1.2, 0.5, 0.3, 0.3],
98
+ ["Write a story using the following information", "A man named Alex chops a tree down", 150, 1.2, 0.5, 0.3, 0.3],
99
+ ["Generate a list of adjectives that describe a person as brave.", "", 150, 1.2, 0.5, 0.3, 0.3],
100
+ ["You have $100, and your goal is to turn that into as much money as possible with AI and Machine Learning. Please respond with detailed plan.", "", 150, 1.2, 0.5, 0.3, 0.3],
101
  ]
102
 
103
+ ##########################################################################
104
+
105
+ chat_intro = '''The following is a coherent verbose detailed conversation between <|user|> and an AI girl named <|bot|>.
106
+
107
+ <|user|>: Hi <|bot|>, Would you like to chat with me for a while?
108
+
109
+ <|bot|>: Hi <|user|>. Sure. What would you like to talk about? I'm listening.
110
+ '''
111
+
112
+ def user(message, chatbot):
113
+ chatbot = chatbot or []
114
+ print(f"User: {message}")
115
+ return "", chatbot + [[message, None]]
116
+
117
+ def alternative(chatbot, history):
118
+ if not chatbot or not history:
119
+ return chatbot, history
120
+
121
+ chatbot[-1][1] = None
122
+ history[0] = copy.deepcopy(history[1])
123
+
124
+ return chatbot, history
125
+
126
+ def chat(
127
+ prompt,
128
+ user,
129
+ bot,
130
+ chatbot,
131
+ history,
132
+ temperature=1.0,
133
+ top_p=0.8,
134
+ presence_penalty=0.1,
135
+ count_penalty=0.1,
136
+ ):
137
+ args = PIPELINE_ARGS(temperature=max(0.2, float(temperature)), top_p=float(top_p),
138
+ alpha_frequency=float(count_penalty),
139
+ alpha_presence=float(presence_penalty),
140
+ token_ban=[], # ban the generation of some tokens
141
+ token_stop=[]) # stop generation whenever you see any token here
142
+
143
+ if not chatbot:
144
+ return chatbot, history
145
+
146
+ message = chatbot[-1][0]
147
+ message = message.strip().replace('\r\n','\n').replace('\n\n','\n')
148
+ ctx = f"{user}: {message}\n\n{bot}:"
149
+
150
+ if not history:
151
+ prompt = prompt.replace("<|user|>", user.strip())
152
+ prompt = prompt.replace("<|bot|>", bot.strip())
153
+ prompt = prompt.strip()
154
+ prompt = f"\n{prompt}\n\n"
155
+
156
+ out, state = model.forward(pipeline.encode(prompt), None)
157
+ history = [state, None, []] # [state, state_pre, tokens]
158
+ print("History reloaded.")
159
+
160
+ [state, _, all_tokens] = history
161
+ state_pre_0 = copy.deepcopy(state)
162
+
163
+ out, state = model.forward(pipeline.encode(ctx)[-ctx_limit:], state)
164
+ state_pre_1 = copy.deepcopy(state) # For recovery
165
+
166
+ print("Bot:", end='')
167
+
168
+ begin = len(all_tokens)
169
+ out_last = begin
170
+ out_str: str = ''
171
+ occurrence = {}
172
+ for i in range(300):
173
+ if i <= 0:
174
+ nl_bias = -float('inf')
175
+ elif i <= 30:
176
+ nl_bias = (i - 30) * 0.1
177
+ elif i <= 130:
178
+ nl_bias = 0
179
+ else:
180
+ nl_bias = (i - 130) * 0.25
181
+ out[187] += nl_bias
182
+ for n in occurrence:
183
+ out[n] -= (args.alpha_presence + occurrence[n] * args.alpha_frequency)
184
+
185
+ token = pipeline.sample_logits(out, temperature=args.temperature, top_p=args.top_p)
186
+ next_tokens = [token]
187
+ if token == 0:
188
+ next_tokens = pipeline.encode('\n\n')
189
+ all_tokens += next_tokens
190
+
191
+ if token not in occurrence:
192
+ occurrence[token] = 1
193
+ else:
194
+ occurrence[token] += 1
195
+
196
+ out, state = model.forward(next_tokens, state)
197
+
198
+ tmp = pipeline.decode(all_tokens[out_last:])
199
+ if '\ufffd' not in tmp:
200
+ print(tmp, end='', flush=True)
201
+ out_last = begin + i + 1
202
+ out_str += tmp
203
+
204
+ chatbot[-1][1] = out_str.strip()
205
+ history = [state, all_tokens]
206
+ yield chatbot, history
207
+
208
+ out_str = pipeline.decode(all_tokens[begin:])
209
+ out_str = out_str.replace("\r\n", '\n').replace('\\n', '\n')
210
+
211
+ if '\n\n' in out_str:
212
+ break
213
+
214
+ # State recovery
215
+ if f'{user}:' in out_str or f'{bot}:' in out_str:
216
+ idx_user = out_str.find(f'{user}:')
217
+ idx_user = len(out_str) if idx_user == -1 else idx_user
218
+ idx_bot = out_str.find(f'{bot}:')
219
+ idx_bot = len(out_str) if idx_bot == -1 else idx_bot
220
+ idx = min(idx_user, idx_bot)
221
+
222
+ if idx < len(out_str):
223
+ out_str = f" {out_str[:idx].strip()}\n\n"
224
+ tokens = pipeline.encode(out_str)
225
+
226
+ all_tokens = all_tokens[:begin] + tokens
227
+ out, state = model.forward(tokens, state_pre_1)
228
+ break
229
+
230
+ gpu_info = nvmlDeviceGetMemoryInfo(gpu_h)
231
+ print(f'vram {gpu_info.total} used {gpu_info.used} free {gpu_info.free}')
232
+
233
+ gc.collect()
234
+ torch.cuda.empty_cache()
235
+
236
+ chatbot[-1][1] = out_str.strip()
237
+ history = [state, state_pre_0, all_tokens]
238
+ yield chatbot, history
239
+
240
+ ##########################################################################
241
+
242
+ with gr.Blocks(title=title) as demo:
243
+ gr.HTML(f"<div style=\"text-align: center;\">\n<h1>🐦Raven - {title}</h1>\n</div>")
244
+ with gr.Tab("Instruct mode"):
245
+ gr.Markdown(f"Raven is [RWKV 7B](https://github.com/BlinkDL/ChatRWKV) 100% RNN [RWKV-LM](https://github.com/BlinkDL/RWKV-LM) finetuned to follow instructions. *** Please try examples first (bottom of page) *** (edit them to use your question). Demo limited to ctxlen {ctx_limit}. Finetuned on alpaca, gpt4all, codealpaca and more. For best results, *** keep you prompt short and clear ***. <b>UPDATE: now with Chat (see above, as a tab)</b>.")
246
+ with gr.Row():
247
+ with gr.Column():
248
+ instruction = gr.Textbox(lines=2, label="Instruction", value="Tell me about ravens.")
249
+ input = gr.Textbox(lines=2, label="Input", placeholder="none")
250
+ token_count = gr.Slider(10, 200, label="Max Tokens", step=10, value=150)
251
+ temperature = gr.Slider(0.2, 2.0, label="Temperature", step=0.1, value=1.2)
252
+ top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.5)
253
+ presence_penalty = gr.Slider(0.0, 1.0, label="Presence Penalty", step=0.1, value=0.3)
254
+ count_penalty = gr.Slider(0.0, 1.0, label="Count Penalty", step=0.1, value=0.3)
255
+ with gr.Column():
256
+ with gr.Row():
257
+ submit = gr.Button("Submit", variant="primary")
258
+ clear = gr.Button("Clear", variant="secondary")
259
+ output = gr.Textbox(label="Output", lines=5)
260
+ data = gr.Dataset(components=[instruction, input, token_count, temperature, top_p, presence_penalty, count_penalty], samples=examples, label="Example Instructions", headers=["Instruction", "Input", "Max Tokens", "Temperature", "Top P", "Presence Penalty", "Count Penalty"])
261
+ submit.click(evaluate, [instruction, input, token_count, temperature, top_p, presence_penalty, count_penalty], [output])
262
+ clear.click(lambda: None, [], [output])
263
+ data.click(lambda x: x, [data], [instruction, input, token_count, temperature, top_p, presence_penalty, count_penalty])
264
+
265
+ with gr.Tab("Chat (Experimental - Might be buggy - use ChatRWKV for reference)"):
266
+ gr.Markdown(f'''<b>*** The length of response is restricted in this demo. Use ChatRWKV for longer generations. ***</b> Say "go on" or "continue" can sometimes continue the response. If you'd like to edit the scenario, make sure to follow the exact same format: empty lines between (and only between) different speakers. Changes only take effect after you press [Clear]. <b>The default "Bob" & "Alice" names work the best.</b>''', label="Description")
267
+ with gr.Row():
268
+ with gr.Column():
269
+ chatbot = gr.Chatbot()
270
+ state = gr.State()
271
+ message = gr.Textbox(label="Message", value="Write me a python code to land on moon.")
272
+ with gr.Row():
273
+ send = gr.Button("Send", variant="primary")
274
+ alt = gr.Button("Alternative", variant="secondary")
275
+ clear = gr.Button("Clear", variant="secondary")
276
+ with gr.Column():
277
+ with gr.Row():
278
+ user_name = gr.Textbox(lines=1, max_lines=1, label="User Name", value="Bob")
279
+ bot_name = gr.Textbox(lines=1, max_lines=1, label="Bot Name", value="Alice")
280
+ prompt = gr.Textbox(lines=10, max_lines=50, label="Scenario", value=chat_intro)
281
+ temperature = gr.Slider(0.2, 2.0, label="Temperature", step=0.1, value=1.2)
282
+ top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.5)
283
+ presence_penalty = gr.Slider(0.0, 1.0, label="Presence Penalty", step=0.1, value=0.3)
284
+ count_penalty = gr.Slider(0.0, 1.0, label="Count Penalty", step=0.1, value=0.3)
285
+ chat_inputs = [
286
+ prompt,
287
+ user_name,
288
+ bot_name,
289
+ chatbot,
290
+ state,
291
+ temperature,
292
+ top_p,
293
+ presence_penalty,
294
+ count_penalty
295
+ ]
296
+ chat_outputs = [chatbot, state]
297
+ message.submit(user, [message, chatbot], [message, chatbot], queue=False).then(chat, chat_inputs, chat_outputs)
298
+ send.click(user, [message, chatbot], [message, chatbot], queue=False).then(chat, chat_inputs, chat_outputs)
299
+ alt.click(alternative, [chatbot, state], [chatbot, state], queue=False).then(chat, chat_inputs, chat_outputs)
300
+ clear.click(lambda: ([], None, ""), [], [chatbot, state, message], queue=False)
301
+
302
+ demo.queue(concurrency_count=1, max_size=10)
303
+ demo.launch(share=False)