decula commited on
Commit
5ef608f
·
1 Parent(s): 838bce8

add app.py

Browse files
Files changed (1) hide show
  1. app.py +168 -0
app.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os, gc, copy, torch
3
+ from datetime import datetime
4
+ from huggingface_hub import hf_hub_download
5
+ from pynvml import *
6
+
7
+ # Flag to check if GPU is present
8
+ HAS_GPU = False
9
+
10
+ # Model title and context size limit
11
+ ctx_limit = 2000
12
+ title = "RWKV-5-World-1B5-v2-20231025-ctx4096"
13
+ model_file = "rwkv-5-h-world-1b5"
14
+
15
+ # Get the GPU count
16
+ try:
17
+ nvmlInit()
18
+ GPU_COUNT = nvmlDeviceGetCount()
19
+ if GPU_COUNT > 0:
20
+ HAS_GPU = True
21
+ gpu_h = nvmlDeviceGetHandleByIndex(0)
22
+ except NVMLError as error:
23
+ print(error)
24
+
25
+
26
+ os.environ["RWKV_JIT_ON"] = '1'
27
+
28
+ # Model strat to use
29
+ MODEL_STRAT="cpu bf16"
30
+ os.environ["RWKV_CUDA_ON"] = '0' # if '1' then use CUDA kernel for seq mode (much faster)
31
+
32
+ # Switch to GPU mode
33
+ if HAS_GPU == True :
34
+ os.environ["RWKV_CUDA_ON"] = '1'
35
+ MODEL_STRAT = "cuda bf16"
36
+
37
+ # Load the model accordingly
38
+ from rwkv.model import RWKV
39
+ model_path = hf_hub_download(repo_id="a686d380/rwkv-5-h-world", filename=f"{model_file}.pth")
40
+ model = RWKV(model=model_path, strategy=MODEL_STRAT)
41
+ from rwkv.utils import PIPELINE, PIPELINE_ARGS
42
+ pipeline = PIPELINE(model, "rwkv_vocab_v20230424")
43
+
44
+ # Prompt generation
45
+ def generate_prompt(instruction, input=""):
46
+ instruction = instruction.strip().replace('\r\n','\n').replace('\n\n','\n')
47
+ input = input.strip().replace('\r\n','\n').replace('\n\n','\n')
48
+ if input:
49
+ return f"""Instruction: {instruction}
50
+
51
+ Input: {input}
52
+
53
+ Response:"""
54
+ else:
55
+ return f"""User: hi
56
+
57
+ Assistant: Hi. I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.
58
+
59
+ User: {instruction}
60
+
61
+ Assistant:"""
62
+
63
+ # Evaluation logic
64
+ def evaluate(
65
+ ctx,
66
+ token_count=200,
67
+ temperature=1.0,
68
+ top_p=0.7,
69
+ presencePenalty = 0.1,
70
+ countPenalty = 0.1,
71
+ ):
72
+ print(ctx)
73
+ args = PIPELINE_ARGS(temperature = max(0.2, float(temperature)), top_p = float(top_p),
74
+ alpha_frequency = countPenalty,
75
+ alpha_presence = presencePenalty,
76
+ token_ban = [], # ban the generation of some tokens
77
+ token_stop = [0]) # stop generation whenever you see any token here
78
+ ctx = ctx.strip()
79
+ all_tokens = []
80
+ out_last = 0
81
+ out_str = ''
82
+ occurrence = {}
83
+ state = None
84
+ for i in range(int(token_count)):
85
+ out, state = model.forward(pipeline.encode(ctx)[-ctx_limit:] if i == 0 else [token], state)
86
+ for n in occurrence:
87
+ out[n] -= (args.alpha_presence + occurrence[n] * args.alpha_frequency)
88
+
89
+ token = pipeline.sample_logits(out, temperature=args.temperature, top_p=args.top_p)
90
+ if token in args.token_stop:
91
+ break
92
+ all_tokens += [token]
93
+ for xxx in occurrence:
94
+ occurrence[xxx] *= 0.996
95
+ if token not in occurrence:
96
+ occurrence[token] = 1
97
+ else:
98
+ occurrence[token] += 1
99
+
100
+ tmp = pipeline.decode(all_tokens[out_last:])
101
+ if '\ufffd' not in tmp:
102
+ out_str += tmp
103
+ yield out_str.strip()
104
+ out_last = i + 1
105
+
106
+ if HAS_GPU == True :
107
+ gpu_info = nvmlDeviceGetMemoryInfo(gpu_h)
108
+ print(f'vram {gpu_info.total} used {gpu_info.used} free {gpu_info.free}')
109
+
110
+ del out
111
+ del state
112
+ gc.collect()
113
+
114
+ if HAS_GPU == True :
115
+ torch.cuda.empty_cache()
116
+
117
+ yield out_str.strip()
118
+
119
+ # Examples and gradio blocks
120
+ examples = [
121
+ ["Assistant: Sure! Here is a very detailed plan to create flying pigs:", 333, 1, 0.3, 0, 1],
122
+ ["Assistant: Sure! Here are some ideas for FTL drive:", 333, 1, 0.3, 0, 1],
123
+ [generate_prompt("Tell me about ravens."), 333, 1, 0.3, 0, 1],
124
+ [generate_prompt("Écrivez un programme Python pour miner 1 Bitcoin, avec des commentaires."), 333, 1, 0.3, 0, 1],
125
+ [generate_prompt("東京で訪れるべき素晴らしい場所とその紹介をいくつか挙げてください。"), 333, 1, 0.3, 0, 1],
126
+ [generate_prompt("Write a story using the following information.", "A man named Alex chops a tree down."), 333, 1, 0.3, 0, 1],
127
+ ["Assistant: Here is a very detailed plan to kill all mosquitoes:", 333, 1, 0.3, 0, 1],
128
+ ['''Edward: I am Edward Elric from fullmetal alchemist. I am in the world of full metal alchemist and know nothing of the real world.
129
+
130
+ User: Hello Edward. What have you been up to recently?
131
+
132
+ Edward:''', 333, 1, 0.3, 0, 1],
133
+ [generate_prompt("写一篇关于水利工程的流体力学模型的论文,需要详细全面。"), 333, 1, 0.3, 0, 1],
134
+ ['''“当然可以,大宇宙不会因为这五公斤就不坍缩了。”关一帆说,他还有一个没说出来的想法:也许大宇宙真的会因为相差一个原子的质量而由封闭转为开放。大自然的精巧有时超出想象,比如生命的诞生,就需要各项宇宙参数在几亿亿分之一精度上的精确配合。但程心仍然可以留下她的生态球,因为在那无数文明创造的无数小宇宙中,肯定有相当一部分不响应回归运动的号召,所以,大宇宙最终被夺走的质量至少有几亿吨,甚至可能是几亿亿亿吨。
135
+ 但愿大宇宙能够忽略这个误差。
136
+ 程心和关一帆进入了飞船,智子最后也进来了。她早就不再穿那身华丽的和服了,她现在身着迷彩服,再次成为一名轻捷精悍的战士,她的身上佩带着许多武器和生存装备,最引人注目的是那把插在背后的武士刀。
137
+ “放心,我在,你们就在!”智子对两位人类朋友说。
138
+ 聚变发动机启动了,推进器发出幽幽的蓝光,飞船缓缓地穿过了宇宙之门。
139
+ 小宇宙中只剩下漂流瓶和生态球。漂流瓶隐没于黑暗里,在一千米见方的宇宙中,只有生态球里的小太阳发出一点光芒。在这个小小的生命世界中,几只清澈的水球在零重力环境中静静地飘浮着,有一条小鱼从一只水球中蹦出,跃入另一只水球,轻盈地穿游于绿藻之间。在一小块陆地上的草丛中,有一滴露珠从一片草叶上脱离,旋转着飘起,向太空中折射出一缕晶莹的阳光。''', 333, 1, 0.3, 0, 1],
140
+ ]
141
+
142
+ ##########################################################################
143
+
144
+ # Gradio blocks
145
+ with gr.Blocks(title=title) as demo:
146
+ gr.HTML(f"<div style=\"text-align: center;\">\n<h1>RWKV-5 World v2 - {title}</h1>\n</div>")
147
+ with gr.Tab("Raw Generation"):
148
+ gr.Markdown(f"This is RWKV-5 World v2 with 1.5B params - a 100% attention-free RNN [RWKV-LM](https://github.com/BlinkDL/RWKV-LM). Supports all 100+ world languages and code. And we have [200+ Github RWKV projects](https://github.com/search?o=desc&p=1&q=rwkv&s=updated&type=Repositories). *** Please try examples first (bottom of page) *** (edit them to use your question). Demo limited to ctxlen {ctx_limit}.")
149
+ with gr.Row():
150
+ with gr.Column():
151
+ prompt = gr.Textbox(lines=2, label="Prompt", value="")
152
+ token_count = gr.Slider(10, 300, label="Max Tokens", step=10, value=100)
153
+ temperature = gr.Slider(0.2, 2.0, label="Temperature", step=0.1, value=1.0)
154
+ top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.3)
155
+ presence_penalty = gr.Slider(0.0, 1.0, label="Presence Penalty", step=0.1, value=1)
156
+ count_penalty = gr.Slider(0.0, 1.0, label="Count Penalty", step=0.1, value=1)
157
+ with gr.Column():
158
+ with gr.Row():
159
+ submit = gr.Button("Submit", variant="primary")
160
+ clear = gr.Button("Clear", variant="secondary")
161
+ output = gr.Textbox(label="Output", lines=5)
162
+ data = gr.Dataset(components=[prompt, token_count, temperature, top_p, presence_penalty, count_penalty], label="Example Instructions", headers=["Prompt", "Max Tokens", "Temperature", "Top P", "Presence Penalty", "Count Penalty"])
163
+ submit.click(evaluate, [prompt, token_count, temperature, top_p, presence_penalty, count_penalty], [output])
164
+ clear.click(lambda: None, [], [output])
165
+ data.click(lambda x: x, [data], [prompt, token_count, temperature, top_p, presence_penalty, count_penalty])
166
+
167
+ # Gradio launch
168
+ demo.launch(share=False)