0xMRTT BlinkDL commited on
Commit
3f37afb
0 Parent(s):

Duplicate from BlinkDL/Raven-RWKV-7B

Browse files

Co-authored-by: BlinkDL <BlinkDL@users.noreply.huggingface.co>

Files changed (5) hide show
  1. .gitattributes +34 -0
  2. 20B_tokenizer.json +0 -0
  3. README.md +14 -0
  4. app.py +305 -0
  5. requirements.txt +7 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
20B_tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Raven RWKV 7B
3
+ emoji: 🚀
4
+ colorFrom: blue
5
+ colorTo: green
6
+ sdk: gradio
7
+ sdk_version: 3.23.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: apache-2.0
11
+ duplicated_from: BlinkDL/Raven-RWKV-7B
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,305 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os, gc, copy, torch
3
+ from datetime import datetime
4
+ from huggingface_hub import hf_hub_download
5
+ from pynvml import *
6
+ nvmlInit()
7
+ gpu_h = nvmlDeviceGetHandleByIndex(0)
8
+ ctx_limit = 1024
9
+ title = "RWKV-4-Raven-7B-v11x-Eng99%-Other1%-20230429-ctx8192"
10
+
11
+ os.environ["RWKV_JIT_ON"] = '1'
12
+ os.environ["RWKV_CUDA_ON"] = '1' # if '1' then use CUDA kernel for seq mode (much faster)
13
+
14
+ from rwkv.model import RWKV
15
+ model_path = hf_hub_download(repo_id="BlinkDL/rwkv-4-raven", filename=f"{title}.pth")
16
+ model = RWKV(model=model_path, strategy='cuda fp16i8 *8 -> cuda fp16')
17
+ # model = RWKV(model='D:/ChatRWKV/RWKV-4-Raven-7B-v9-Eng99%-Other1%-20230412-ctx8192.pth', strategy='cuda fp16i8 *10 -> cuda fp16')
18
+ from rwkv.utils import PIPELINE, PIPELINE_ARGS
19
+ pipeline = PIPELINE(model, "20B_tokenizer.json")
20
+
21
+ def generate_prompt(instruction, input=None):
22
+ instruction = instruction.strip().replace('\r\n','\n').replace('\n\n','\n')
23
+ input = input.strip().replace('\r\n','\n').replace('\n\n','\n')
24
+ if input:
25
+ return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
26
+
27
+ # Instruction:
28
+ {instruction}
29
+
30
+ # Input:
31
+ {input}
32
+
33
+ # Response:
34
+ """
35
+ else:
36
+ return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
37
+
38
+ # Instruction:
39
+ {instruction}
40
+
41
+ # Response:
42
+ """
43
+
44
+ def evaluate(
45
+ instruction,
46
+ input=None,
47
+ token_count=200,
48
+ temperature=1.0,
49
+ top_p=0.7,
50
+ presencePenalty = 0.1,
51
+ countPenalty = 0.1,
52
+ ):
53
+ args = PIPELINE_ARGS(temperature = max(0.2, float(temperature)), top_p = float(top_p),
54
+ alpha_frequency = countPenalty,
55
+ alpha_presence = presencePenalty,
56
+ token_ban = [], # ban the generation of some tokens
57
+ token_stop = [0]) # stop generation whenever you see any token here
58
+
59
+ instruction = instruction.strip().replace('\r\n','\n').replace('\n\n','\n')
60
+ input = input.strip().replace('\r\n','\n').replace('\n\n','\n')
61
+ ctx = generate_prompt(instruction, input)
62
+
63
+ all_tokens = []
64
+ out_last = 0
65
+ out_str = ''
66
+ occurrence = {}
67
+ state = None
68
+ for i in range(int(token_count)):
69
+ out, state = model.forward(pipeline.encode(ctx)[-ctx_limit:] if i == 0 else [token], state)
70
+ for n in occurrence:
71
+ out[n] -= (args.alpha_presence + occurrence[n] * args.alpha_frequency)
72
+
73
+ token = pipeline.sample_logits(out, temperature=args.temperature, top_p=args.top_p)
74
+ if token in args.token_stop:
75
+ break
76
+ all_tokens += [token]
77
+ if token not in occurrence:
78
+ occurrence[token] = 1
79
+ else:
80
+ occurrence[token] += 1
81
+
82
+ tmp = pipeline.decode(all_tokens[out_last:])
83
+ if '\ufffd' not in tmp:
84
+ out_str += tmp
85
+ yield out_str.strip()
86
+ out_last = i + 1
87
+
88
+ gpu_info = nvmlDeviceGetMemoryInfo(gpu_h)
89
+ print(f'vram {gpu_info.total} used {gpu_info.used} free {gpu_info.free}')
90
+
91
+ gc.collect()
92
+ torch.cuda.empty_cache()
93
+ yield out_str.strip()
94
+
95
+ examples = [
96
+ ["Tell me about ravens.", "", 150, 1.2, 0.5, 0.4, 0.4],
97
+ ["Write a python function to mine 1 BTC, with details and comments.", "", 150, 1.2, 0.5, 0.4, 0.4],
98
+ ["Write a song about ravens.", "", 150, 1.2, 0.5, 0.4, 0.4],
99
+ ["Explain the following metaphor: Life is like cats.", "", 150, 1.2, 0.5, 0.4, 0.4],
100
+ ["Write a story using the following information", "A man named Alex chops a tree down", 150, 1.2, 0.5, 0.4, 0.4],
101
+ ["Generate a list of adjectives that describe a person as brave.", "", 150, 1.2, 0.5, 0.4, 0.4],
102
+ ["You have $100, and your goal is to turn that into as much money as possible with AI and Machine Learning. Please respond with detailed plan.", "", 150, 1.2, 0.5, 0.4, 0.4],
103
+ ]
104
+
105
+ ##########################################################################
106
+
107
+ chat_intro = '''The following is a coherent verbose detailed conversation between <|user|> and an AI girl named <|bot|>.
108
+
109
+ <|user|>: Hi <|bot|>, Would you like to chat with me for a while?
110
+
111
+ <|bot|>: Hi <|user|>. Sure. What would you like to talk about? I'm listening.
112
+ '''
113
+
114
+ def user(message, chatbot):
115
+ chatbot = chatbot or []
116
+ # print(f"User: {message}")
117
+ return "", chatbot + [[message, None]]
118
+
119
+ def alternative(chatbot, history):
120
+ if not chatbot or not history:
121
+ return chatbot, history
122
+
123
+ chatbot[-1][1] = None
124
+ history[0] = copy.deepcopy(history[1])
125
+
126
+ return chatbot, history
127
+
128
+ def chat(
129
+ prompt,
130
+ user,
131
+ bot,
132
+ chatbot,
133
+ history,
134
+ temperature=1.0,
135
+ top_p=0.8,
136
+ presence_penalty=0.1,
137
+ count_penalty=0.1,
138
+ ):
139
+ args = PIPELINE_ARGS(temperature=max(0.2, float(temperature)), top_p=float(top_p),
140
+ alpha_frequency=float(count_penalty),
141
+ alpha_presence=float(presence_penalty),
142
+ token_ban=[], # ban the generation of some tokens
143
+ token_stop=[]) # stop generation whenever you see any token here
144
+
145
+ if not chatbot:
146
+ return chatbot, history
147
+
148
+ message = chatbot[-1][0]
149
+ message = message.strip().replace('\r\n','\n').replace('\n\n','\n')
150
+ ctx = f"{user}: {message}\n\n{bot}:"
151
+
152
+ if not history:
153
+ prompt = prompt.replace("<|user|>", user.strip())
154
+ prompt = prompt.replace("<|bot|>", bot.strip())
155
+ prompt = prompt.strip()
156
+ prompt = f"\n{prompt}\n\n"
157
+
158
+ out, state = model.forward(pipeline.encode(prompt), None)
159
+ history = [state, None, []] # [state, state_pre, tokens]
160
+ # print("History reloaded.")
161
+
162
+ [state, _, all_tokens] = history
163
+ state_pre_0 = copy.deepcopy(state)
164
+
165
+ out, state = model.forward(pipeline.encode(ctx)[-ctx_limit:], state)
166
+ state_pre_1 = copy.deepcopy(state) # For recovery
167
+
168
+ # print("Bot:", end='')
169
+
170
+ begin = len(all_tokens)
171
+ out_last = begin
172
+ out_str: str = ''
173
+ occurrence = {}
174
+ for i in range(300):
175
+ if i <= 0:
176
+ nl_bias = -float('inf')
177
+ elif i <= 30:
178
+ nl_bias = (i - 30) * 0.1
179
+ elif i <= 130:
180
+ nl_bias = 0
181
+ else:
182
+ nl_bias = (i - 130) * 0.25
183
+ out[187] += nl_bias
184
+ for n in occurrence:
185
+ out[n] -= (args.alpha_presence + occurrence[n] * args.alpha_frequency)
186
+
187
+ token = pipeline.sample_logits(out, temperature=args.temperature, top_p=args.top_p)
188
+ next_tokens = [token]
189
+ if token == 0:
190
+ next_tokens = pipeline.encode('\n\n')
191
+ all_tokens += next_tokens
192
+
193
+ if token not in occurrence:
194
+ occurrence[token] = 1
195
+ else:
196
+ occurrence[token] += 1
197
+
198
+ out, state = model.forward(next_tokens, state)
199
+
200
+ tmp = pipeline.decode(all_tokens[out_last:])
201
+ if '\ufffd' not in tmp:
202
+ # print(tmp, end='', flush=True)
203
+ out_last = begin + i + 1
204
+ out_str += tmp
205
+
206
+ chatbot[-1][1] = out_str.strip()
207
+ history = [state, all_tokens]
208
+ yield chatbot, history
209
+
210
+ out_str = pipeline.decode(all_tokens[begin:])
211
+ out_str = out_str.replace("\r\n", '\n').replace('\\n', '\n')
212
+
213
+ if '\n\n' in out_str:
214
+ break
215
+
216
+ # State recovery
217
+ if f'{user}:' in out_str or f'{bot}:' in out_str:
218
+ idx_user = out_str.find(f'{user}:')
219
+ idx_user = len(out_str) if idx_user == -1 else idx_user
220
+ idx_bot = out_str.find(f'{bot}:')
221
+ idx_bot = len(out_str) if idx_bot == -1 else idx_bot
222
+ idx = min(idx_user, idx_bot)
223
+
224
+ if idx < len(out_str):
225
+ out_str = f" {out_str[:idx].strip()}\n\n"
226
+ tokens = pipeline.encode(out_str)
227
+
228
+ all_tokens = all_tokens[:begin] + tokens
229
+ out, state = model.forward(tokens, state_pre_1)
230
+ break
231
+
232
+ gpu_info = nvmlDeviceGetMemoryInfo(gpu_h)
233
+ print(f'vram {gpu_info.total} used {gpu_info.used} free {gpu_info.free}')
234
+
235
+ gc.collect()
236
+ torch.cuda.empty_cache()
237
+
238
+ chatbot[-1][1] = out_str.strip()
239
+ history = [state, state_pre_0, all_tokens]
240
+ yield chatbot, history
241
+
242
+ ##########################################################################
243
+
244
+ with gr.Blocks(title=title) as demo:
245
+ gr.HTML(f"<div style=\"text-align: center;\">\n<h1>🐦Raven - {title}</h1>\n</div>")
246
+ with gr.Tab("Instruct mode"):
247
+ gr.Markdown(f"Raven is [RWKV 7B](https://github.com/BlinkDL/ChatRWKV) 100% RNN [RWKV-LM](https://github.com/BlinkDL/RWKV-LM) finetuned to follow instructions. *** Please try examples first (bottom of page) *** (edit them to use your question). Demo limited to ctxlen {ctx_limit}. Finetuned on alpaca, gpt4all, codealpaca and more. For best results, *** keep you prompt short and clear ***. <b>UPDATE: now with Chat (see above, as a tab)</b>.")
248
+ with gr.Row():
249
+ with gr.Column():
250
+ instruction = gr.Textbox(lines=2, label="Instruction", value="Tell me about ravens.")
251
+ input = gr.Textbox(lines=2, label="Input", placeholder="none")
252
+ token_count = gr.Slider(10, 200, label="Max Tokens", step=10, value=150)
253
+ temperature = gr.Slider(0.2, 2.0, label="Temperature", step=0.1, value=1.2)
254
+ top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.5)
255
+ presence_penalty = gr.Slider(0.0, 1.0, label="Presence Penalty", step=0.1, value=0.4)
256
+ count_penalty = gr.Slider(0.0, 1.0, label="Count Penalty", step=0.1, value=0.4)
257
+ with gr.Column():
258
+ with gr.Row():
259
+ submit = gr.Button("Submit", variant="primary")
260
+ clear = gr.Button("Clear", variant="secondary")
261
+ output = gr.Textbox(label="Output", lines=5)
262
+ data = gr.Dataset(components=[instruction, input, token_count, temperature, top_p, presence_penalty, count_penalty], samples=examples, label="Example Instructions", headers=["Instruction", "Input", "Max Tokens", "Temperature", "Top P", "Presence Penalty", "Count Penalty"])
263
+ submit.click(evaluate, [instruction, input, token_count, temperature, top_p, presence_penalty, count_penalty], [output])
264
+ clear.click(lambda: None, [], [output])
265
+ data.click(lambda x: x, [data], [instruction, input, token_count, temperature, top_p, presence_penalty, count_penalty])
266
+
267
+ with gr.Tab("Chat (Experimental - Might be buggy - use ChatRWKV for reference)"):
268
+ gr.Markdown(f'''<b>*** The length of response is restricted in this demo. Use ChatRWKV for longer generations. ***</b> Say "go on" or "continue" can sometimes continue the response. If you'd like to edit the scenario, make sure to follow the exact same format: empty lines between (and only between) different speakers. Changes only take effect after you press [Clear]. <b>The default "Bob" & "Alice" names work the best.</b>''', label="Description")
269
+ with gr.Row():
270
+ with gr.Column():
271
+ chatbot = gr.Chatbot()
272
+ state = gr.State()
273
+ message = gr.Textbox(label="Message", value="Write me a python code to land on moon.")
274
+ with gr.Row():
275
+ send = gr.Button("Send", variant="primary")
276
+ alt = gr.Button("Alternative", variant="secondary")
277
+ clear = gr.Button("Clear", variant="secondary")
278
+ with gr.Column():
279
+ with gr.Row():
280
+ user_name = gr.Textbox(lines=1, max_lines=1, label="User Name", value="Bob")
281
+ bot_name = gr.Textbox(lines=1, max_lines=1, label="Bot Name", value="Alice")
282
+ prompt = gr.Textbox(lines=10, max_lines=50, label="Scenario", value=chat_intro)
283
+ temperature = gr.Slider(0.2, 2.0, label="Temperature", step=0.1, value=1.2)
284
+ top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.5)
285
+ presence_penalty = gr.Slider(0.0, 1.0, label="Presence Penalty", step=0.1, value=0.4)
286
+ count_penalty = gr.Slider(0.0, 1.0, label="Count Penalty", step=0.1, value=0.4)
287
+ chat_inputs = [
288
+ prompt,
289
+ user_name,
290
+ bot_name,
291
+ chatbot,
292
+ state,
293
+ temperature,
294
+ top_p,
295
+ presence_penalty,
296
+ count_penalty
297
+ ]
298
+ chat_outputs = [chatbot, state]
299
+ message.submit(user, [message, chatbot], [message, chatbot], queue=False).then(chat, chat_inputs, chat_outputs)
300
+ send.click(user, [message, chatbot], [message, chatbot], queue=False).then(chat, chat_inputs, chat_outputs)
301
+ alt.click(alternative, [chatbot, state], [chatbot, state], queue=False).then(chat, chat_inputs, chat_outputs)
302
+ clear.click(lambda: ([], None, ""), [], [chatbot, state, message], queue=False)
303
+
304
+ demo.queue(concurrency_count=1, max_size=10)
305
+ demo.launch(share=False)
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ torch
2
+ ninja
3
+ tokenizers
4
+ rwkv==0.6.2
5
+ pynvml
6
+ huggingface_hub
7
+ gradio>=3.17.1