Satyam-Singh's picture
Update app.py
e89e37f verified
raw
history blame
707 Bytes
import gradio as gr
from transformers import pipeline
# Load the Meta-Llama-3.1-8B-Instruct-GGUF model
model_name = "lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF"
model = pipeline("text-generation", model=model_name, device=-1) # -1 for CPU
# Define the Gradio interface
def generate_text(prompt):
output = model(prompt)[0]["generated_text"]
return output
iface = gr.Interface(
fn=generate_text,
inputs=gr.Textbox(label="Prompt"),
outputs=gr.Textbox(label="Generated Text"),
title="Meta-Llama-3.1-8B-Instruct-GGUF Text Generation",
description="Enter a prompt to generate text using the Meta-Llama-3.1-8B-Instruct-GGUF model.",
)
# Launch the Gradio app
iface.launch()