|
import gradio as gr |
|
import torch |
|
from transformers import pipeline |
|
|
|
def generate_text(prompt): |
|
messages = [ |
|
{"role": "system", "content": "You are a code assistant"}, |
|
{"role": "user", "content": prompt}, |
|
] |
|
formatted_prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) |
|
outputs = pipe(formatted_prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) |
|
generated_text = outputs[0]["generated_text"] |
|
return generated_text |
|
|
|
|
|
pipe = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.bfloat16, device_map="auto") |
|
|
|
|
|
iface = gr.Interface(fn=generate_text, inputs="text", outputs="text", live=True, title="Chatbot Assistant") |
|
iface.launch() |