import gradio as gr | |
from transformers import pipeline | |
def chat_with_model(prompt): | |
model_id = "surbhi47/lora-llama3" | |
tokenizer = "surbhi47/lora-llama3" | |
model_pipeline = pipeline("text-generation", model=model_id, tokenizer=tokenizer) | |
response = model_pipeline(prompt, max_length=50) | |
return response[0]['generated_text'] | |
interface = gr.Interface(fn=chat_with_model, inputs="text", outputs="text", title="Chat with Lora-Llama3") | |
interface.launch() |