|
import gradio as gr |
|
import spaces |
|
|
|
import transformers |
|
import torch |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
|
model_id = "doubledsbv/Llama-3-Kafka-8B-v0.3" |
|
|
|
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16) |
|
tokenizer = transformers.AutoTokenizer.from_pretrained(model_id) |
|
|
|
|
|
generate_text = transformers.pipeline( |
|
model=model, tokenizer=tokenizer, |
|
return_full_text=True, |
|
task='text-generation', |
|
device="gpu", |
|
) |
|
|
|
@spaces.GPU |
|
def chat_function(message, history, system_prompt,max_new_tokens,temperature): |
|
messages = [ |
|
{"role": "system", "content": system_prompt}, |
|
{"role": "user", "content": message}, |
|
] |
|
prompt = pipeline.tokenizer.apply_chat_template( |
|
messages, |
|
tokenize=False, |
|
add_generation_prompt=True |
|
) |
|
terminators = [ |
|
pipeline.tokenizer.eos_token_id, |
|
pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>") |
|
] |
|
outputs = pipeline( |
|
prompt, |
|
max_new_tokens=max_new_tokens, |
|
eos_token_id=terminators, |
|
do_sample=True, |
|
temperature=temperature, |
|
top_p=0.9, |
|
) |
|
return outputs[0]["generated_text"][len(prompt):] |
|
|
|
gr.ChatInterface( |
|
chat_function, |
|
chatbot=gr.Chatbot(height=400), |
|
textbox=gr.Textbox(placeholder="Enter message here", container=False, scale=7), |
|
title="Llama-3-Kafka-8B-v0.3", |
|
description=""" |
|
German-focused finetuned version of Llama-3-8B |
|
""", |
|
additional_inputs=[ |
|
gr.Textbox("Du bist ein freundlicher KI-Assistent", label="System Prompt"), |
|
gr.Slider(512, 8192, label="Max New Tokens"), |
|
gr.Slider(0, 1, label="Temperature") |
|
] |
|
).launch() |