Kevin676's picture
Update app.py
62e7d87
raw history blame
No virus
3.81 kB
import gradio as gr
import os, gc, torch
from datetime import datetime
from huggingface_hub import hf_hub_download
from pynvml import *
nvmlInit()
gpu_h = nvmlDeviceGetHandleByIndex(0)
ctx_limit = 1024
os.environ["RWKV_JIT_ON"] = '1'
os.environ["RWKV_CUDA_ON"] = '1' # if '1' then use CUDA kernel for seq mode (much faster)
from rwkv.model import RWKV
model_path = hf_hub_download(repo_id="BlinkDL/rwkv-4-raven", filename=f"{title}.pth")
model = RWKV(model=model_path, strategy='cuda fp16i8 *8 -> cuda fp16')
from rwkv.utils import PIPELINE, PIPELINE_ARGS
pipeline = PIPELINE(model, "20B_tokenizer.json")
def generate_prompt(instruction, input=None):
if input:
return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
# Instruction:
{instruction}
# Input:
{input}
# Response:
"""
else:
return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
# Instruction:
{instruction}
# Response:
"""
def evaluate(
instruction,
# input=None,
# token_count=200,
# temperature=1.0,
# top_p=0.7,
# presencePenalty = 0.1,
# countPenalty = 0.1,
):
args = PIPELINE_ARGS(temperature = max(0.2, float(1)), top_p = float(0.5),
alpha_frequency = 0.4,
alpha_presence = 0.4,
token_ban = [], # ban the generation of some tokens
token_stop = [0]) # stop generation whenever you see any token here
instruction = instruction.strip()
input=None
input = input.strip()
ctx = generate_prompt(instruction, input)
gpu_info = nvmlDeviceGetMemoryInfo(gpu_h)
print(f'vram {gpu_info.total} used {gpu_info.used} free {gpu_info.free}')
all_tokens = []
out_last = 0
out_str = ''
occurrence = {}
state = None
for i in range(int(150)):
out, state = model.forward(pipeline.encode(ctx)[-ctx_limit:] if i == 0 else [token], state)
for n in occurrence:
out[n] -= (args.alpha_presence + occurrence[n] * args.alpha_frequency)
token = pipeline.sample_logits(out, temperature=args.temperature, top_p=args.top_p)
if token in args.token_stop:
break
all_tokens += [token]
if token not in occurrence:
occurrence[token] = 1
else:
occurrence[token] += 1
tmp = pipeline.decode(all_tokens[out_last:])
if '\ufffd' not in tmp:
out_str += tmp
yield out_str.strip()
out_last = i + 1
gc.collect()
torch.cuda.empty_cache()
yield out_str.strip()
g = gr.Interface(
fn=evaluate,
inputs=[
gr.components.Textbox(lines=2, label="Instruction", value="Tell me about ravens."),
# gr.components.Textbox(lines=2, label="Input", placeholder="none"),
# gr.components.Slider(minimum=10, maximum=200, step=10, value=150), # token_count
# gr.components.Slider(minimum=0.2, maximum=2.0, step=0.1, value=1.0), # temperature
# gr.components.Slider(minimum=0, maximum=1, step=0.05, value=0.5), # top_p
# gr.components.Slider(0.0, 1.0, step=0.1, value=0.4), # presencePenalty
# gr.components.Slider(0.0, 1.0, step=0.1, value=0.4), # countPenalty
],
outputs=[
gr.inputs.Textbox(
lines=5,
label="Output",
)
],
title="🥳💬💕 - TalktoAI,随时随地,谈天说地!",
description="🤖 - 让有人文关怀的AI造福每一个人!AI向善,文明璀璨!TalktoAI - Enable the future!",
article = "Powered by the RWKV Language Model"
)
g.queue(concurrency_count=1, max_size=10)
g.launch(share=False)