gtmio / app.py
geored's picture
Upload folder using huggingface_hub
fe41391 verified
raw
history blame
822 Bytes
import gradio as gr
from transformers import pipeline
# Load the llama2 LLM model
model = pipeline("text-generation", model="llamalanguage/llama2", tokenizer="llamalanguage/llama2")
# Define the chat function that uses the LLM model
def chat_interface(input_text):
response = model(input_text, max_length=100, return_full_text=True)[0]["generated_text"]
response_words = response.split()
return response_words
# Create the Gradio interface
iface = gr.Interface(
fn=chat_interface,
inputs=gr.inputs.Textbox(lines=2, label="Input Text"),
outputs=gr.outputs.Textbox(label="Output Text"),
title="Chat Interface",
description="Enter text and get a response using the LLM model",
live=True # Enable live updates
)
# Launch the interface using Hugging Face Spaces
iface.launch(share=True)