Spaces:
Paused
Paused
from transformers import AutoModelForCausalLM, AutoTokenizer | |
from tokenization_yi import YiTokenizer | |
import torch | |
# Load the model and tokenizer | |
model_name = "01-ai/Yi-34B-200K" | |
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True) | |
tokenizer_dir = "Tonic1/YiTonic" | |
vocab_file = os.path.join(tokenizer_dir, "tokenizer.model") | |
tokenizer_json = os.path.join(tokenizer_dir, "tokenizer.json") | |
tokenizer_config = os.path.join(tokenizer_dir, "tokenizer_config.json") | |
tokenizer = YiTokenizer(vocab_file=vocab_file) | |
def run(message, chat_history, system_prompt, max_new_tokens=1024, temperature=0.3, top_p=0.9, top_k=50): | |
prompt = get_prompt(message, chat_history, system_prompt) | |
# Encode the prompt to tensor | |
input_ids = tokenizer.encode(prompt, return_tensors='pt') | |
# Generate a response using the model with adjusted parameters | |
response_ids = model.generate( | |
input_ids, | |
max_length=max_new_tokens + input_ids.shape[1], | |
temperature=temperature, # Controls randomness. Lower values make text more deterministic. | |
top_p=top_p, # Nucleus sampling: higher values allow more diversity. | |
top_k=top_k, # Top-k sampling: limits the number of top tokens considered. | |
pad_token_id=tokenizer.eos_token_id | |
) | |
# Decode the response | |
response = tokenizer.decode(response_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True) | |
return response | |
def get_prompt(message, chat_history, system_prompt): | |
texts = [f"<s>[INST] <<SYS>>\n{system_prompt}\n<</SYS>>\n\n"] | |
do_strip = False | |
for user_input, response in chat_history: | |
user_input = user_input.strip() if do_strip else user_input | |
do_strip = True | |
texts.append(f"{user_input} [/INST] {response.strip()} </s><s>[INST] ") | |
message = message.strip() if do_strip else message | |
texts.append(f"{message} [/INST]") | |
return ''.join(texts) | |
DEFAULT_SYSTEM_PROMPT = """ | |
You are Yi. You are an AI assistant, you are moderately-polite and give only true information. | |
You carefully provide accurate, factual, thoughtful, nuanced answers, and are brilliant at reasoning. | |
If you think there might not be a correct answer, you say so. Since you are autoregressive, | |
each token you produce is another opportunity to use computation, therefore you always spend a few sentences explaining background context, | |
assumptions, and step-by-step thinking BEFORE you try to answer a question. | |
""" | |
MAX_MAX_NEW_TOKENS = 200000 | |
DEFAULT_MAX_NEW_TOKENS = 100000 | |
MAX_INPUT_TOKEN_LENGTH = 100000 | |
DESCRIPTION = "# [Yi-6B](https://huggingface.co/01-ai/Yi-6B)" | |
def clear_and_save_textbox(message): return '', message | |
def display_input(message, history=[]): | |
history.append((message, '')) | |
return history | |
def delete_prev_fn(history=[]): | |
try: | |
message, _ = history.pop() | |
except IndexError: | |
message = '' | |
return history, message or '' | |
def generate(message, history_with_input, system_prompt, max_new_tokens, temperature, top_p, top_k): | |
if max_new_tokens > MAX_MAX_NEW_TOKENS: | |
raise ValueError | |
history = history_with_input[:-1] | |
generator = run(message, history, system_prompt, max_new_tokens, temperature, top_p, top_k) | |
try: | |
first_response = next(generator) | |
yield history + [(message, first_response)] | |
except StopIteration: | |
yield history + [(message, '')] | |
for response in generator: | |
yield history + [(message, response)] | |
def process_example(message): | |
generator = generate(message, [], DEFAULT_SYSTEM_PROMPT, 1024, 1, 0.95, 50) | |
for x in generator: | |
pass | |
return '', x | |
def check_input_token_length(message, chat_history, system_prompt): | |
input_token_length = len(message) + len(chat_history) | |
if input_token_length > MAX_INPUT_TOKEN_LENGTH: | |
raise gr.Error(f"The accumulated input is too long ({input_token_length} > {MAX_INPUT_TOKEN_LENGTH}). Clear your chat history and try again.") | |
with gr.Blocks(theme='ParityError/Anime') as demo: | |
gr.Markdown(DESCRIPTION) | |
with gr.Group(): | |
chatbot = gr.Chatbot(label='Yi-6B') | |
with gr.Row(): | |
textbox = gr.Textbox( | |
container=False, | |
show_label=False, | |
placeholder='Hi, Yi', | |
scale=10 | |
) | |
submit_button = gr.Button('Submit', variant='primary', scale=1, min_width=0) | |
with gr.Row(): | |
retry_button = gr.Button('Retry', variant='secondary') | |
undo_button = gr.Button('Undo', variant='secondary') | |
clear_button = gr.Button('Clear', variant='secondary') | |
saved_input = gr.State() | |
with gr.Accordion(label='Advanced options', open=False): | |
system_prompt = gr.Textbox(label='System prompt', value=DEFAULT_SYSTEM_PROMPT, lines=5, interactive=False) | |
max_new_tokens = gr.Slider(label='Max New Tokens', minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS) | |
temperature = gr.Slider(label='Temperature', minimum=0.1, maximum=4.0, step=0.1, value=0.1) | |
top_p = gr.Slider(label='Top-P (nucleus sampling)', minimum=0.05, maximum=1.0, step=0.05, value=0.9) | |
top_k = gr.Slider(label='Top-K', minimum=1, maximum=1000, step=1, value=10) | |
textbox.submit( | |
fn=clear_and_save_textbox, | |
inputs=textbox, | |
outputs=[textbox, saved_input], | |
api_name=False, | |
queue=False, | |
).then( | |
fn=display_input, | |
inputs=[saved_input, chatbot], | |
outputs=chatbot, | |
api_name=False, | |
queue=False, | |
).then( | |
fn=check_input_token_length, | |
inputs=[saved_input, chatbot, system_prompt], | |
api_name=False, | |
queue=False, | |
).success( | |
fn=generate, | |
inputs=[ | |
saved_input, | |
chatbot, | |
system_prompt, | |
max_new_tokens, | |
temperature, | |
top_p, | |
top_k, | |
], | |
outputs=chatbot, | |
api_name=False, | |
) | |
button_event_preprocess = submit_button.click( | |
fn=clear_and_save_textbox, | |
inputs=textbox, | |
outputs=[textbox, saved_input], | |
api_name=False, | |
queue=False, | |
).then( | |
fn=display_input, | |
inputs=[saved_input, chatbot], | |
outputs=chatbot, | |
api_name=False, | |
queue=False, | |
).then( | |
fn=check_input_token_length, | |
inputs=[saved_input, chatbot, system_prompt], | |
api_name=False, | |
queue=False, | |
).success( | |
fn=generate, | |
inputs=[ | |
saved_input, | |
chatbot, | |
system_prompt, | |
max_new_tokens, | |
temperature, | |
top_p, | |
top_k, | |
], | |
outputs=chatbot, | |
api_name=False, | |
) | |
retry_button.click( | |
fn=delete_prev_fn, | |
inputs=chatbot, | |
outputs=[chatbot, saved_input], | |
api_name=False, | |
queue=False, | |
).then( | |
fn=display_input, | |
inputs=[saved_input, chatbot], | |
outputs=chatbot, | |
api_name=False, | |
queue=False, | |
).then( | |
fn=generate, | |
inputs=[ | |
saved_input, | |
chatbot, | |
system_prompt, | |
max_new_tokens, | |
temperature, | |
top_p, | |
top_k, | |
], | |
outputs=chatbot, | |
api_name=False, | |
) | |
undo_button.click( | |
fn=delete_prev_fn, | |
inputs=chatbot, | |
outputs=[chatbot, saved_input], | |
api_name=False, | |
queue=False, | |
).then( | |
fn=lambda x: x, | |
inputs=[saved_input], | |
outputs=textbox, | |
api_name=False, | |
queue=False, | |
) | |
clear_button.click( | |
fn=lambda: ([], ''), | |
outputs=[chatbot, saved_input], | |
queue=False, | |
api_name=False, | |
) | |
demo.queue(max_size=32).launch(show_api=False) |