0x7o's picture
Update app.py
dc59912 verified
raw
history blame
2.79 kB
import gradio as gr
import torch
import spaces
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
from threading import Thread
# Loading the tokenizer and model from Hugging Face's model hub.
if torch.cuda.is_available():
tokenizer = AutoTokenizer.from_pretrained("0x7194633/fialka-13B-v4")
model = AutoModelForCausalLM.from_pretrained("0x7194633/fialka-13B-v4", load_in_8bit=True, device_map="auto")
# Defining a custom stopping criteria class for the model's text generation.
class StopOnTokens(StoppingCriteria):
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
stop_ids = [2] # IDs of tokens where the generation should stop.
for stop_id in stop_ids:
if input_ids[0][-1] == stop_id: # Checking if the last generated token is a stop token.
return True
return False
# Function to generate model predictions.
@spaces.GPU(duration=420)
def predict(message, history):
history_transformer_format = history + [[message, ""]]
stop = StopOnTokens()
# Formatting the input for the model.
messages = "<|system|>\nТы Фиалка - самый умный нейронный помощник, созданный 0x7o.</s>\n"
messages += "</s>".join(["</s>".join(["\n<|user|>" + item[0], "\n<|assistant|>" + item[1]])
for item in history_transformer_format])
model_inputs = tokenizer([messages], return_tensors="pt").to("cuda")
streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)
generate_kwargs = dict(
model_inputs,
streamer=streamer,
max_new_tokens=1024,
do_sample=True,
top_p=0.95,
top_k=50,
temperature=0.2,
repetition_penalty=1.2,
num_beams=1,
stopping_criteria=StoppingCriteriaList([stop])
)
t = Thread(target=model.generate, kwargs=generate_kwargs)
t.start() # Starting the generation in a separate thread.
partial_message = ""
for new_token in streamer:
partial_message += new_token
if '</s>' in partial_message: # Breaking the loop if the stop token is generated.
break
yield partial_message
# Setting up the Gradio chat interface.
gr.ChatInterface(predict,
title="Fialka 13B v4",
description="Внимание! Все ответы сгенерированы и могут содержать неточную информацию.",
examples=['Как приготовить рыбу?', 'Кто президент США?']
).launch() # Launching the web interface.