Spaces:
Runtime error
Runtime error
import argparse | |
import os | |
from typing import Iterator | |
import gradio as gr | |
# from dotenv import load_dotenv | |
from distutils.util import strtobool | |
from llama2_wrapper import LLAMA2_WRAPPER | |
parser = argparse.ArgumentParser() | |
DEFAULT_SYSTEM_PROMPT = "You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information." | |
parser.add_argument('--model_path', type=str, required=False, default='76437bc4e8bea417641aaa076508098a7158e664c1cecfabfa41df497a27f98c', | |
help='model_path .') | |
parser.add_argument('--system_prompt', type=str, required=False, default=DEFAULT_SYSTEM_PROMPT, | |
help='Inference server Appkey. Default is .') | |
parser.add_argument('--max_max_new_tokens', type=int, default=2048, metavar='NUMBER', | |
help='maximum new tokens (default: 2048)') | |
FLAGS = parser.parse_args() | |
DEFAULT_SYSTEM_PROMPT = FLAGS.system_prompt | |
MAX_MAX_NEW_TOKENS = FLAGS.max_max_new_tokens | |
DEFAULT_MAX_NEW_TOKENS = 1024 | |
MAX_INPUT_TOKEN_LENGTH = 4000 | |
MODEL_PATH = FLAGS.model_path | |
assert MODEL_PATH is not None, f"MODEL_PATH is required, got: {MODEL_PATH}" | |
LOAD_IN_8BIT = False | |
LOAD_IN_4BIT = True | |
LLAMA_CPP = True | |
if LLAMA_CPP: | |
print("Running on CPU with llama.cpp.") | |
else: | |
import torch | |
if torch.cuda.is_available(): | |
print("Running on GPU with torch transformers.") | |
else: | |
print("CUDA not found.") | |
config = { | |
"model_name": MODEL_PATH, | |
"load_in_8bit": LOAD_IN_8BIT, | |
"load_in_4bit": LOAD_IN_4BIT, | |
"llama_cpp": LLAMA_CPP, | |
"MAX_INPUT_TOKEN_LENGTH": MAX_INPUT_TOKEN_LENGTH, | |
} | |
llama2_wrapper = LLAMA2_WRAPPER(config) | |
llama2_wrapper.init_tokenizer() | |
llama2_wrapper.init_model() | |
DESCRIPTION = """ | |
# Llama2-Chinese-7b-webui | |
θΏζ―δΈδΈͺ[Llama2-Chinese-2-7b](https://github.com/FlagAlpha/Llama2-Chinese)ηζ¨ηηι’γ | |
- ζ―ζη樑ε: [Llama-2-GGML](https://huggingface.co/FlagAlpha/Llama2-Chinese-7b-Chat-GGML) | |
- ζ―ζηεη«― | |
- CPU(at least 6 GB RAM), Mac/AMD | |
""" | |
def clear_and_save_textbox(message: str) -> tuple[str, str]: | |
return "", message | |
def display_input( | |
message: str, history: list[tuple[str, str]] | |
) -> list[tuple[str, str]]: | |
history.append((message, "")) | |
return history | |
def delete_prev_fn(history: list[tuple[str, str]]) -> tuple[list[tuple[str, str]], str]: | |
try: | |
message, _ = history.pop() | |
except IndexError: | |
message = "" | |
return history, message or "" | |
def generate( | |
message: str, | |
history_with_input: list[tuple[str, str]], | |
system_prompt: str, | |
max_new_tokens: int, | |
temperature: float, | |
top_p: float, | |
top_k: int, | |
) -> Iterator[list[tuple[str, str]]]: | |
if max_new_tokens > MAX_MAX_NEW_TOKENS: | |
raise ValueError | |
history = history_with_input[:-1] | |
generator = llama2_wrapper.run( | |
message, history, system_prompt, max_new_tokens, temperature, top_p, top_k | |
) | |
try: | |
first_response = next(generator) | |
yield history + [(message, first_response)] | |
except StopIteration: | |
yield history + [(message, "")] | |
for response in generator: | |
yield history + [(message, response)] | |
def process_example(message: str) -> tuple[str, list[tuple[str, str]]]: | |
generator = generate(message, [], DEFAULT_SYSTEM_PROMPT, 1024, 1, 0.95, 50) | |
for x in generator: | |
pass | |
return "", x | |
def check_input_token_length( | |
message: str, chat_history: list[tuple[str, str]], system_prompt: str | |
) -> None: | |
input_token_length = llama2_wrapper.get_input_token_length( | |
message, chat_history, system_prompt | |
) | |
if input_token_length > MAX_INPUT_TOKEN_LENGTH: | |
raise gr.Error( | |
f"The accumulated input is too long ({input_token_length} > {MAX_INPUT_TOKEN_LENGTH}). Clear your chat history and try again." | |
) | |
with gr.Blocks(css="style.css") as demo: | |
gr.Markdown(DESCRIPTION) | |
with gr.Group(): | |
chatbot = gr.Chatbot(label="Chatbot") | |
with gr.Row(): | |
textbox = gr.Textbox( | |
container=False, | |
show_label=False, | |
placeholder="Type a message...", | |
scale=10, | |
) | |
submit_button = gr.Button("Submit", variant="primary", scale=1, min_width=0) | |
with gr.Row(): | |
retry_button = gr.Button("π Retry", variant="secondary") | |
undo_button = gr.Button("β©οΈ Undo", variant="secondary") | |
clear_button = gr.Button("ποΈ Clear", variant="secondary") | |
saved_input = gr.State() | |
with gr.Accordion(label="Advanced options", open=False): | |
system_prompt = gr.Textbox( | |
label="System prompt", value=DEFAULT_SYSTEM_PROMPT, lines=6 | |
) | |
max_new_tokens = gr.Slider( | |
label="Max new tokens", | |
minimum=1, | |
maximum=MAX_MAX_NEW_TOKENS, | |
step=1, | |
value=DEFAULT_MAX_NEW_TOKENS, | |
) | |
temperature = gr.Slider( | |
label="Temperature", | |
minimum=0.1, | |
maximum=4.0, | |
step=0.1, | |
value=1.0, | |
) | |
top_p = gr.Slider( | |
label="Top-p (nucleus sampling)", | |
minimum=0.05, | |
maximum=1.0, | |
step=0.05, | |
value=0.95, | |
) | |
top_k = gr.Slider( | |
label="Top-k", | |
minimum=1, | |
maximum=1000, | |
step=1, | |
value=50, | |
) | |
gr.Examples( | |
examples=[ | |
"Hello there! How are you doing?", | |
"Can you explain briefly to me what is the Python programming language?", | |
], | |
inputs=textbox, | |
outputs=[textbox, chatbot], | |
fn=process_example, | |
cache_examples=True, | |
) | |
textbox.submit( | |
fn=clear_and_save_textbox, | |
inputs=textbox, | |
outputs=[textbox, saved_input], | |
api_name=False, | |
queue=False, | |
).then( | |
fn=display_input, | |
inputs=[saved_input, chatbot], | |
outputs=chatbot, | |
api_name=False, | |
queue=False, | |
).then( | |
fn=check_input_token_length, | |
inputs=[saved_input, chatbot, system_prompt], | |
api_name=False, | |
queue=False, | |
).success( | |
fn=generate, | |
inputs=[ | |
saved_input, | |
chatbot, | |
system_prompt, | |
max_new_tokens, | |
temperature, | |
top_p, | |
top_k, | |
], | |
outputs=chatbot, | |
api_name=False, | |
) | |
button_event_preprocess = ( | |
submit_button.click( | |
fn=clear_and_save_textbox, | |
inputs=textbox, | |
outputs=[textbox, saved_input], | |
api_name=False, | |
queue=False, | |
) | |
.then( | |
fn=display_input, | |
inputs=[saved_input, chatbot], | |
outputs=chatbot, | |
api_name=False, | |
queue=False, | |
) | |
.then( | |
fn=check_input_token_length, | |
inputs=[saved_input, chatbot, system_prompt], | |
api_name=False, | |
queue=False, | |
) | |
.success( | |
fn=generate, | |
inputs=[ | |
saved_input, | |
chatbot, | |
system_prompt, | |
max_new_tokens, | |
temperature, | |
top_p, | |
top_k, | |
], | |
outputs=chatbot, | |
api_name=False, | |
) | |
) | |
retry_button.click( | |
fn=delete_prev_fn, | |
inputs=chatbot, | |
outputs=[chatbot, saved_input], | |
api_name=False, | |
queue=False, | |
).then( | |
fn=display_input, | |
inputs=[saved_input, chatbot], | |
outputs=chatbot, | |
api_name=False, | |
queue=False, | |
).then( | |
fn=generate, | |
inputs=[ | |
saved_input, | |
chatbot, | |
system_prompt, | |
max_new_tokens, | |
temperature, | |
top_p, | |
top_k, | |
], | |
outputs=chatbot, | |
api_name=False, | |
) | |
undo_button.click( | |
fn=delete_prev_fn, | |
inputs=chatbot, | |
outputs=[chatbot, saved_input], | |
api_name=False, | |
queue=False, | |
).then( | |
fn=lambda x: x, | |
inputs=[saved_input], | |
outputs=textbox, | |
api_name=False, | |
queue=False, | |
) | |
clear_button.click( | |
fn=lambda: ([], ""), | |
outputs=[chatbot, saved_input], | |
queue=False, | |
api_name=False, | |
) | |
demo.queue(max_size=20).launch(server_name="0.0.0.0", server_port=8090) | |