BlinkDL commited on
Commit
315ea19
1 Parent(s): a85e3a8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -215
app.py CHANGED
@@ -1,25 +1,25 @@
1
  import gradio as gr
2
- import os, gc, copy, torch, re
3
  from datetime import datetime
4
  from huggingface_hub import hf_hub_download
5
  from pynvml import *
6
  nvmlInit()
7
  gpu_h = nvmlDeviceGetHandleByIndex(0)
8
- ctx_limit = 1536
9
- title = "RWKV-4-World-7B-v1-20230626-ctx4096"
10
 
11
  os.environ["RWKV_JIT_ON"] = '1'
12
  os.environ["RWKV_CUDA_ON"] = '1' # if '1' then use CUDA kernel for seq mode (much faster)
13
 
14
  from rwkv.model import RWKV
15
- model_path = hf_hub_download(repo_id="BlinkDL/rwkv-4-world", filename=f"{title}.pth")
16
- model = RWKV(model=model_path, strategy='cuda fp16i8 *8 -> cuda fp16')
17
  from rwkv.utils import PIPELINE, PIPELINE_ARGS
18
  pipeline = PIPELINE(model, "rwkv_vocab_v20230424")
19
 
20
- def generate_prompt(instruction, input=None):
21
- instruction = instruction.strip().replace('\r\n','\n').replace('\n\n','\n').replace('\n\n','\n')
22
- input = input.strip().replace('\r\n','\n').replace('\n\n','\n').replace('\n\n','\n')
23
  if input:
24
  return f"""Instruction: {instruction}
25
 
@@ -27,13 +27,16 @@ Input: {input}
27
 
28
  Response:"""
29
  else:
30
- return f"""Question: {instruction}
31
 
32
- Answer:"""
 
 
 
 
33
 
34
  def evaluate(
35
- instruction,
36
- input=None,
37
  token_count=200,
38
  temperature=1.0,
39
  top_p=0.7,
@@ -45,11 +48,7 @@ def evaluate(
45
  alpha_presence = presencePenalty,
46
  token_ban = [], # ban the generation of some tokens
47
  token_stop = [0]) # stop generation whenever you see any token here
48
-
49
- instruction = re.sub(r'\n{2,}', '\n', instruction).strip().replace('\r\n','\n')
50
- input = re.sub(r'\n{2,}', '\n', input).strip().replace('\r\n','\n')
51
- ctx = generate_prompt(instruction, input)
52
-
53
  all_tokens = []
54
  out_last = 0
55
  out_str = ''
@@ -65,7 +64,7 @@ def evaluate(
65
  break
66
  all_tokens += [token]
67
  for xxx in occurrence:
68
- occurrence[xxx] *= 0.996
69
  if token not in occurrence:
70
  occurrence[token] = 1
71
  else:
@@ -76,8 +75,6 @@ def evaluate(
76
  out_str += tmp
77
  yield out_str.strip()
78
  out_last = i + 1
79
- if '\n\n' in out_str:
80
- break
81
 
82
  gpu_info = nvmlDeviceGetMemoryInfo(gpu_h)
83
  print(f'vram {gpu_info.total} used {gpu_info.used} free {gpu_info.free}')
@@ -88,214 +85,50 @@ def evaluate(
88
  yield out_str.strip()
89
 
90
  examples = [
91
- ["東京で訪れるべき素晴らしい場所とその紹介をいくつか挙げてください。", "", 300, 1.2, 0.5, 0.4, 0.4],
92
- ["Écrivez un programme Python pour miner 1 Bitcoin, avec des commentaires.", "", 300, 1.2, 0.5, 0.4, 0.4],
93
- ["Write a song about ravens.", "", 300, 1.2, 0.5, 0.4, 0.4],
94
- ["Explain the following metaphor: Life is like cats.", "", 300, 1.2, 0.5, 0.4, 0.4],
95
- ["Write a story using the following information", "A man named Alex chops a tree down", 300, 1.2, 0.5, 0.4, 0.4],
96
- ["Generate a list of adjectives that describe a person as brave.", "", 300, 1.2, 0.5, 0.4, 0.4],
97
- ["You have $100, and your goal is to turn that into as much money as possible with AI and Machine Learning. Please respond with detailed plan.", "", 300, 1.2, 0.5, 0.4, 0.4],
 
 
 
 
 
 
 
 
 
 
 
 
98
  ]
99
 
100
  ##########################################################################
101
 
102
- chat_intro = '''The following is a coherent verbose detailed conversation between <|user|> and an AI girl named <|bot|>.
103
-
104
- <|user|>: Hi <|bot|>, Would you like to chat with me for a while?
105
-
106
- <|bot|>: Hi <|user|>. Sure. What would you like to talk about? I'm listening.
107
- '''
108
-
109
- def user(message, chatbot):
110
- chatbot = chatbot or []
111
- # print(f"User: {message}")
112
- return "", chatbot + [[message, None]]
113
-
114
- def alternative(chatbot, history):
115
- if not chatbot or not history:
116
- return chatbot, history
117
-
118
- chatbot[-1][1] = None
119
- history[0] = copy.deepcopy(history[1])
120
-
121
- return chatbot, history
122
-
123
- def chat(
124
- prompt,
125
- user,
126
- bot,
127
- chatbot,
128
- history,
129
- temperature=1.0,
130
- top_p=0.8,
131
- presence_penalty=0.1,
132
- count_penalty=0.1,
133
- ):
134
- args = PIPELINE_ARGS(temperature=max(0.2, float(temperature)), top_p=float(top_p),
135
- alpha_frequency=float(count_penalty),
136
- alpha_presence=float(presence_penalty),
137
- token_ban=[], # ban the generation of some tokens
138
- token_stop=[]) # stop generation whenever you see any token here
139
-
140
- if not chatbot:
141
- return chatbot, history
142
-
143
- message = chatbot[-1][0]
144
- message = message.strip().replace('\r\n','\n').replace('\n\n','\n')
145
- ctx = f"{user}: {message}\n\n{bot}:"
146
-
147
- if not history:
148
- prompt = prompt.replace("<|user|>", user.strip())
149
- prompt = prompt.replace("<|bot|>", bot.strip())
150
- prompt = prompt.strip()
151
- prompt = f"\n{prompt}\n\n"
152
-
153
- out, state = model.forward(pipeline.encode(prompt), None)
154
- history = [state, None, []] # [state, state_pre, tokens]
155
- # print("History reloaded.")
156
-
157
- [state, _, all_tokens] = history
158
- state_pre_0 = copy.deepcopy(state)
159
-
160
- out, state = model.forward(pipeline.encode(ctx)[-ctx_limit:], state)
161
- state_pre_1 = copy.deepcopy(state) # For recovery
162
-
163
- # print("Bot:", end='')
164
-
165
- begin = len(all_tokens)
166
- out_last = begin
167
- out_str: str = ''
168
- occurrence = {}
169
- for i in range(300):
170
- if i <= 0:
171
- nl_bias = -float('inf')
172
- elif i <= 30:
173
- nl_bias = (i - 30) * 0.1
174
- elif i <= 130:
175
- nl_bias = 0
176
- else:
177
- nl_bias = (i - 130) * 0.25
178
- out[11] += nl_bias
179
- for n in occurrence:
180
- out[n] -= (args.alpha_presence + occurrence[n] * args.alpha_frequency)
181
-
182
- token = pipeline.sample_logits(out, temperature=args.temperature, top_p=args.top_p)
183
- next_tokens = [token]
184
- if token == 0:
185
- next_tokens = pipeline.encode('\n\n')
186
- all_tokens += next_tokens
187
- for xxx in occurrence:
188
- occurrence[xxx] *= 0.996
189
- if token not in occurrence:
190
- occurrence[token] = 1
191
- else:
192
- occurrence[token] += 1
193
-
194
- out, state = model.forward(next_tokens, state)
195
-
196
- tmp = pipeline.decode(all_tokens[out_last:])
197
- if '\ufffd' not in tmp:
198
- # print(tmp, end='', flush=True)
199
- out_last = begin + i + 1
200
- out_str += tmp
201
-
202
- chatbot[-1][1] = out_str.strip()
203
- history = [state, all_tokens]
204
- yield chatbot, history
205
-
206
- out_str = pipeline.decode(all_tokens[begin:])
207
- out_str = out_str.replace("\r\n", '\n')
208
-
209
- if '\n\n' in out_str:
210
- break
211
-
212
- # State recovery
213
- if f'{user}:' in out_str or f'{bot}:' in out_str:
214
- idx_user = out_str.find(f'{user}:')
215
- idx_user = len(out_str) if idx_user == -1 else idx_user
216
- idx_bot = out_str.find(f'{bot}:')
217
- idx_bot = len(out_str) if idx_bot == -1 else idx_bot
218
- idx = min(idx_user, idx_bot)
219
-
220
- if idx < len(out_str):
221
- out_str = f" {out_str[:idx].strip()}\n\n"
222
- tokens = pipeline.encode(out_str)
223
-
224
- all_tokens = all_tokens[:begin] + tokens
225
- out, state = model.forward(tokens, state_pre_1)
226
- break
227
-
228
- gpu_info = nvmlDeviceGetMemoryInfo(gpu_h)
229
- print(f'vram {gpu_info.total} used {gpu_info.used} free {gpu_info.free}')
230
-
231
- gc.collect()
232
- torch.cuda.empty_cache()
233
-
234
- chatbot[-1][1] = out_str.strip()
235
- history = [state, state_pre_0, all_tokens]
236
- yield chatbot, history
237
-
238
- ##########################################################################
239
-
240
  with gr.Blocks(title=title) as demo:
241
- gr.HTML(f"<div style=\"text-align: center;\">\n<h1>🌍World - {title}</h1>\n</div>")
242
- with gr.Tab("Instruct mode"):
243
- gr.Markdown(f"World is [RWKV 7B](https://github.com/BlinkDL/ChatRWKV) 100% RNN [RWKV-LM](https://github.com/BlinkDL/RWKV-LM) ***trained on 100+ world languages***. *** Please try examples first (bottom of page) *** (edit them to use your question). Demo limited to ctxlen {ctx_limit}. Finetuned on alpaca, gpt4all, codealpaca and more. For best results, *** keep you prompt short and clear ***.</b>.") # <b>UPDATE: now with Chat (see above, as a tab) ==> turn off as of now due to VRAM leak caused by buggy code.
244
  with gr.Row():
245
  with gr.Column():
246
- instruction = gr.Textbox(lines=2, label="Instruction", value='東京で訪れるべき素晴らしい場所とその紹介をいくつか挙げてください。')
247
- input = gr.Textbox(lines=2, label="Input", placeholder="none")
248
- token_count = gr.Slider(10, 300, label="Max Tokens", step=10, value=300)
249
- temperature = gr.Slider(0.2, 2.0, label="Temperature", step=0.1, value=1.2)
250
- top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.5)
251
- presence_penalty = gr.Slider(0.0, 1.0, label="Presence Penalty", step=0.1, value=0.4)
252
- count_penalty = gr.Slider(0.0, 1.0, label="Count Penalty", step=0.1, value=0.4)
253
  with gr.Column():
254
  with gr.Row():
255
  submit = gr.Button("Submit", variant="primary")
256
  clear = gr.Button("Clear", variant="secondary")
257
  output = gr.Textbox(label="Output", lines=5)
258
- data = gr.Dataset(components=[instruction, input, token_count, temperature, top_p, presence_penalty, count_penalty], samples=examples, label="Example Instructions", headers=["Instruction", "Input", "Max Tokens", "Temperature", "Top P", "Presence Penalty", "Count Penalty"])
259
- submit.click(evaluate, [instruction, input, token_count, temperature, top_p, presence_penalty, count_penalty], [output])
260
  clear.click(lambda: None, [], [output])
261
- data.click(lambda x: x, [data], [instruction, input, token_count, temperature, top_p, presence_penalty, count_penalty])
262
-
263
- # with gr.Tab("Chat (Experimental - Might be buggy - use ChatRWKV for reference)"):
264
- # gr.Markdown(f'''<b>*** The length of response is restricted in this demo. Use ChatRWKV for longer generations. ***</b> Say "go on" or "continue" can sometimes continue the response. If you'd like to edit the scenario, make sure to follow the exact same format: empty lines between (and only between) different speakers. Changes only take effect after you press [Clear]. <b>The default "Bob" & "Alice" names work the best.</b>''', label="Description")
265
- # with gr.Row():
266
- # with gr.Column():
267
- # chatbot = gr.Chatbot()
268
- # state = gr.State()
269
- # message = gr.Textbox(label="Message", value="Write me a python code to land on moon.")
270
- # with gr.Row():
271
- # send = gr.Button("Send", variant="primary")
272
- # alt = gr.Button("Alternative", variant="secondary")
273
- # clear = gr.Button("Clear", variant="secondary")
274
- # with gr.Column():
275
- # with gr.Row():
276
- # user_name = gr.Textbox(lines=1, max_lines=1, label="User Name", value="Bob")
277
- # bot_name = gr.Textbox(lines=1, max_lines=1, label="Bot Name", value="Alice")
278
- # prompt = gr.Textbox(lines=10, max_lines=50, label="Scenario", value=chat_intro)
279
- # temperature = gr.Slider(0.2, 2.0, label="Temperature", step=0.1, value=1.2)
280
- # top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.5)
281
- # presence_penalty = gr.Slider(0.0, 1.0, label="Presence Penalty", step=0.1, value=0.4)
282
- # count_penalty = gr.Slider(0.0, 1.0, label="Count Penalty", step=0.1, value=0.4)
283
- # chat_inputs = [
284
- # prompt,
285
- # user_name,
286
- # bot_name,
287
- # chatbot,
288
- # state,
289
- # temperature,
290
- # top_p,
291
- # presence_penalty,
292
- # count_penalty
293
- # ]
294
- # chat_outputs = [chatbot, state]
295
- # message.submit(user, [message, chatbot], [message, chatbot], queue=False).then(chat, chat_inputs, chat_outputs)
296
- # send.click(user, [message, chatbot], [message, chatbot], queue=False).then(chat, chat_inputs, chat_outputs)
297
- # alt.click(alternative, [chatbot, state], [chatbot, state], queue=False).then(chat, chat_inputs, chat_outputs)
298
- # clear.click(lambda: ([], None, ""), [], [chatbot, state, message], queue=False)
299
 
300
  demo.queue(concurrency_count=1, max_size=10)
301
  demo.launch(share=False)
 
1
  import gradio as gr
2
+ import os, gc, copy, torch
3
  from datetime import datetime
4
  from huggingface_hub import hf_hub_download
5
  from pynvml import *
6
  nvmlInit()
7
  gpu_h = nvmlDeviceGetHandleByIndex(0)
8
+ ctx_limit = 2000
9
+ title = "RWKV-5-World-3B-v2-20231113-ctx4096"
10
 
11
  os.environ["RWKV_JIT_ON"] = '1'
12
  os.environ["RWKV_CUDA_ON"] = '1' # if '1' then use CUDA kernel for seq mode (much faster)
13
 
14
  from rwkv.model import RWKV
15
+ model_path = hf_hub_download(repo_id="BlinkDL/rwkv-5-world", filename=f"{title}.pth")
16
+ model = RWKV(model=model_path, strategy='cuda fp16')
17
  from rwkv.utils import PIPELINE, PIPELINE_ARGS
18
  pipeline = PIPELINE(model, "rwkv_vocab_v20230424")
19
 
20
+ def generate_prompt(instruction, input=""):
21
+ instruction = instruction.strip().replace('\r\n','\n').replace('\n\n','\n')
22
+ input = input.strip().replace('\r\n','\n').replace('\n\n','\n')
23
  if input:
24
  return f"""Instruction: {instruction}
25
 
 
27
 
28
  Response:"""
29
  else:
30
+ return f"""User: hi
31
 
32
+ Assistant: Hi. I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.
33
+
34
+ User: {instruction}
35
+
36
+ Assistant:"""
37
 
38
  def evaluate(
39
+ ctx,
 
40
  token_count=200,
41
  temperature=1.0,
42
  top_p=0.7,
 
48
  alpha_presence = presencePenalty,
49
  token_ban = [], # ban the generation of some tokens
50
  token_stop = [0]) # stop generation whenever you see any token here
51
+ ctx = ctx.strip()
 
 
 
 
52
  all_tokens = []
53
  out_last = 0
54
  out_str = ''
 
64
  break
65
  all_tokens += [token]
66
  for xxx in occurrence:
67
+ occurrence[xxx] *= 0.996
68
  if token not in occurrence:
69
  occurrence[token] = 1
70
  else:
 
75
  out_str += tmp
76
  yield out_str.strip()
77
  out_last = i + 1
 
 
78
 
79
  gpu_info = nvmlDeviceGetMemoryInfo(gpu_h)
80
  print(f'vram {gpu_info.total} used {gpu_info.used} free {gpu_info.free}')
 
85
  yield out_str.strip()
86
 
87
  examples = [
88
+ ["Assistant: Sure! Here is a very detailed plan to create flying pigs:", 333, 1, 0.3, 0, 1],
89
+ ["Assistant: Sure! Here are some ideas for FTL drive:", 333, 1, 0.3, 0, 1],
90
+ [generate_prompt("Tell me about ravens."), 333, 1, 0.3, 0, 1],
91
+ [generate_prompt("Écrivez un programme Python pour miner 1 Bitcoin, avec des commentaires."), 333, 1, 0.3, 0, 1],
92
+ [generate_prompt("東京で訪れるべき素晴らしい場所とその紹介をいくつか挙げてください。"), 333, 1, 0.3, 0, 1],
93
+ [generate_prompt("Write a story using the following information.", "A man named Alex chops a tree down."), 333, 1, 0.3, 0, 1],
94
+ ["Assistant: Here is a very detailed plan to kill all mosquitoes:", 333, 1, 0.3, 0, 1],
95
+ ['''Edward: I am Edward Elric from fullmetal alchemist. I am in the world of full metal alchemist and know nothing of the real world.
96
+
97
+ User: Hello Edward. What have you been up to recently?
98
+
99
+ Edward:''', 333, 1, 0.3, 0, 1],
100
+ [generate_prompt("写一篇关于水利工程的流体力学模型的论文,需要详细全面。"), 333, 1, 0.3, 0, 1],
101
+ ['''“当然可以,大宇宙不会因为这五公斤就不坍缩了。”关一帆说���他还有一个没说出来的想法:也许大宇宙真的会因为相差一个原子的质量而由封闭转为开放。大自然的精巧有时超出想象,比如生命的诞生,就需要各项宇宙参数在几亿亿分之一精度上的精确配合。但程心仍然可以留下她的生态球,因为在那无数文明创造的无数小宇宙中,肯定有相当一部分不响应回归运动的号召,所以,大宇宙最终被夺走的质量至少有几亿吨,甚至可能是几亿亿亿吨。
102
+ 但愿大宇宙能够忽略这个误差。
103
+ 程心和关一帆进入了飞船,智子最后也进来了。她早就不再穿那身华丽的和服了,她现在身着迷彩服,再次成为一名轻捷精悍的战士,她的身上佩带着许多武器和生存装备,最引人注目的是那把插在背后的武士刀。
104
+ “放心,我在,你们就在!”智子对两位人类朋友说。
105
+ 聚变发动机启动了,推进器发出幽幽的蓝光,飞船缓缓地穿过了宇宙之门。
106
+ 小宇宙中只剩下漂流瓶和生态球。漂流瓶隐没于黑暗里,在一千米见方的宇宙中,只有生态球里的小太阳发出一点光芒。在这个小小的生命世界中,几只清澈的水球在零重力环境中静静地飘浮着,有一条小鱼从一只水球中蹦出,跃入另一只水球,轻盈地穿游于绿藻之间。在一小块陆地上的草丛中,有一滴露珠从一片草叶上脱离,旋转着飘起,向太空中折射出一缕晶莹的阳光。''', 333, 1, 0.3, 0, 1],
107
  ]
108
 
109
  ##########################################################################
110
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
  with gr.Blocks(title=title) as demo:
112
+ gr.HTML(f"<div style=\"text-align: center;\">\n<h1>RWKV-5 World v2 - {title}</h1>\n</div>")
113
+ with gr.Tab("Raw Generation"):
114
+ gr.Markdown(f"This is [RWKV-5 World v2](https://huggingface.co/BlinkDL/rwkv-5-world) with 1.5B params - a 100% attention-free RNN [RWKV-LM](https://github.com/BlinkDL/RWKV-LM). Supports all 100+ world languages and code. And we have [200+ Github RWKV projects](https://github.com/search?o=desc&p=1&q=rwkv&s=updated&type=Repositories). *** Please try examples first (bottom of page) *** (edit them to use your question). Demo limited to ctxlen {ctx_limit}.")
115
  with gr.Row():
116
  with gr.Column():
117
+ prompt = gr.Textbox(lines=2, label="Prompt", value="Assistant: Sure! Here is a very detailed plan to create flying pigs:")
118
+ token_count = gr.Slider(10, 333, label="Max Tokens", step=10, value=333)
119
+ temperature = gr.Slider(0.2, 2.0, label="Temperature", step=0.1, value=1.0)
120
+ top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.3)
121
+ presence_penalty = gr.Slider(0.0, 1.0, label="Presence Penalty", step=0.1, value=0)
122
+ count_penalty = gr.Slider(0.0, 1.0, label="Count Penalty", step=0.1, value=1)
 
123
  with gr.Column():
124
  with gr.Row():
125
  submit = gr.Button("Submit", variant="primary")
126
  clear = gr.Button("Clear", variant="secondary")
127
  output = gr.Textbox(label="Output", lines=5)
128
+ data = gr.Dataset(components=[prompt, token_count, temperature, top_p, presence_penalty, count_penalty], samples=examples, label="Example Instructions", headers=["Prompt", "Max Tokens", "Temperature", "Top P", "Presence Penalty", "Count Penalty"])
129
+ submit.click(evaluate, [prompt, token_count, temperature, top_p, presence_penalty, count_penalty], [output])
130
  clear.click(lambda: None, [], [output])
131
+ data.click(lambda x: x, [data], [prompt, token_count, temperature, top_p, presence_penalty, count_penalty])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
 
133
  demo.queue(concurrency_count=1, max_size=10)
134
  demo.launch(share=False)