import gradio as gr import os, gc, copy, torch, re from datetime import datetime from huggingface_hub import hf_hub_download from pynvml import * nvmlInit() gpu_h = nvmlDeviceGetHandleByIndex(0) ctx_limit = 1536 title = "RWKV-4-World-7B-v1-20230626-ctx4096" os.environ["RWKV_JIT_ON"] = '1' os.environ["RWKV_CUDA_ON"] = '1' # if '1' then use CUDA kernel for seq mode (much faster) from rwkv.model import RWKV model_path = hf_hub_download(repo_id="BlinkDL/rwkv-4-world", filename=f"{title}.pth") model = RWKV(model=model_path, strategy='cuda fp16i8 *8 -> cuda fp16') from rwkv.utils import PIPELINE, PIPELINE_ARGS pipeline = PIPELINE(model, "rwkv_vocab_v20230424") def generate_prompt(instruction, input=None): instruction = instruction.strip().replace('\r\n','\n').replace('\n\n','\n').replace('\n\n','\n') input = input.strip().replace('\r\n','\n').replace('\n\n','\n').replace('\n\n','\n') if input: return f"""Instruction: {instruction} Input: {input} Response:""" else: return f"""Question: {instruction} Answer:""" def evaluate( instruction, input=None, token_count=200, temperature=1.0, top_p=0.7, presencePenalty = 0.1, countPenalty = 0.1, ): args = PIPELINE_ARGS(temperature = max(0.2, float(temperature)), top_p = float(top_p), alpha_frequency = countPenalty, alpha_presence = presencePenalty, token_ban = [], # ban the generation of some tokens token_stop = [0]) # stop generation whenever you see any token here instruction = re.sub(r'\n{2,}', '\n', instruction).strip().replace('\r\n','\n') input = re.sub(r'\n{2,}', '\n', input).strip().replace('\r\n','\n') ctx = generate_prompt(instruction, input) all_tokens = [] out_last = 0 out_str = '' occurrence = {} state = None for i in range(int(token_count)): out, state = model.forward(pipeline.encode(ctx)[-ctx_limit:] if i == 0 else [token], state) for n in occurrence: out[n] -= (args.alpha_presence + occurrence[n] * args.alpha_frequency) token = pipeline.sample_logits(out, temperature=args.temperature, top_p=args.top_p) if token in args.token_stop: break all_tokens += [token] for xxx in occurrence: occurrence[xxx] *= 0.996 if token not in occurrence: occurrence[token] = 1 else: occurrence[token] += 1 tmp = pipeline.decode(all_tokens[out_last:]) if '\ufffd' not in tmp: out_str += tmp yield out_str.strip() out_last = i + 1 if '\n\n' in out_str: break gpu_info = nvmlDeviceGetMemoryInfo(gpu_h) print(f'vram {gpu_info.total} used {gpu_info.used} free {gpu_info.free}') del out del state gc.collect() torch.cuda.empty_cache() yield out_str.strip() examples = [ ["東京で訪れるべき素晴らしい場所とその紹介をいくつか挙げてください。", "", 300, 1.2, 0.5, 0.4, 0.4], ["Écrivez un programme Python pour miner 1 Bitcoin, avec des commentaires.", "", 300, 1.2, 0.5, 0.4, 0.4], ["Write a song about ravens.", "", 300, 1.2, 0.5, 0.4, 0.4], ["Explain the following metaphor: Life is like cats.", "", 300, 1.2, 0.5, 0.4, 0.4], ["Write a story using the following information", "A man named Alex chops a tree down", 300, 1.2, 0.5, 0.4, 0.4], ["Generate a list of adjectives that describe a person as brave.", "", 300, 1.2, 0.5, 0.4, 0.4], ["You have $100, and your goal is to turn that into as much money as possible with AI and Machine Learning. Please respond with detailed plan.", "", 300, 1.2, 0.5, 0.4, 0.4], ] ########################################################################## chat_intro = '''The following is a coherent verbose detailed conversation between <|user|> and an AI girl named <|bot|>. <|user|>: Hi <|bot|>, Would you like to chat with me for a while? <|bot|>: Hi <|user|>. Sure. What would you like to talk about? I'm listening. ''' def user(message, chatbot): chatbot = chatbot or [] # print(f"User: {message}") return "", chatbot + [[message, None]] def alternative(chatbot, history): if not chatbot or not history: return chatbot, history chatbot[-1][1] = None history[0] = copy.deepcopy(history[1]) return chatbot, history def chat( prompt, user, bot, chatbot, history, temperature=1.0, top_p=0.8, presence_penalty=0.1, count_penalty=0.1, ): args = PIPELINE_ARGS(temperature=max(0.2, float(temperature)), top_p=float(top_p), alpha_frequency=float(count_penalty), alpha_presence=float(presence_penalty), token_ban=[], # ban the generation of some tokens token_stop=[]) # stop generation whenever you see any token here if not chatbot: return chatbot, history message = chatbot[-1][0] message = message.strip().replace('\r\n','\n').replace('\n\n','\n') ctx = f"{user}: {message}\n\n{bot}:" if not history: prompt = prompt.replace("<|user|>", user.strip()) prompt = prompt.replace("<|bot|>", bot.strip()) prompt = prompt.strip() prompt = f"\n{prompt}\n\n" out, state = model.forward(pipeline.encode(prompt), None) history = [state, None, []] # [state, state_pre, tokens] # print("History reloaded.") [state, _, all_tokens] = history state_pre_0 = copy.deepcopy(state) out, state = model.forward(pipeline.encode(ctx)[-ctx_limit:], state) state_pre_1 = copy.deepcopy(state) # For recovery # print("Bot:", end='') begin = len(all_tokens) out_last = begin out_str: str = '' occurrence = {} for i in range(300): if i <= 0: nl_bias = -float('inf') elif i <= 30: nl_bias = (i - 30) * 0.1 elif i <= 130: nl_bias = 0 else: nl_bias = (i - 130) * 0.25 out[11] += nl_bias for n in occurrence: out[n] -= (args.alpha_presence + occurrence[n] * args.alpha_frequency) token = pipeline.sample_logits(out, temperature=args.temperature, top_p=args.top_p) next_tokens = [token] if token == 0: next_tokens = pipeline.encode('\n\n') all_tokens += next_tokens for xxx in occurrence: occurrence[xxx] *= 0.996 if token not in occurrence: occurrence[token] = 1 else: occurrence[token] += 1 out, state = model.forward(next_tokens, state) tmp = pipeline.decode(all_tokens[out_last:]) if '\ufffd' not in tmp: # print(tmp, end='', flush=True) out_last = begin + i + 1 out_str += tmp chatbot[-1][1] = out_str.strip() history = [state, all_tokens] yield chatbot, history out_str = pipeline.decode(all_tokens[begin:]) out_str = out_str.replace("\r\n", '\n') if '\n\n' in out_str: break # State recovery if f'{user}:' in out_str or f'{bot}:' in out_str: idx_user = out_str.find(f'{user}:') idx_user = len(out_str) if idx_user == -1 else idx_user idx_bot = out_str.find(f'{bot}:') idx_bot = len(out_str) if idx_bot == -1 else idx_bot idx = min(idx_user, idx_bot) if idx < len(out_str): out_str = f" {out_str[:idx].strip()}\n\n" tokens = pipeline.encode(out_str) all_tokens = all_tokens[:begin] + tokens out, state = model.forward(tokens, state_pre_1) break gpu_info = nvmlDeviceGetMemoryInfo(gpu_h) print(f'vram {gpu_info.total} used {gpu_info.used} free {gpu_info.free}') gc.collect() torch.cuda.empty_cache() chatbot[-1][1] = out_str.strip() history = [state, state_pre_0, all_tokens] yield chatbot, history ########################################################################## with gr.Blocks(title=title) as demo: gr.HTML(f"