Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline | |
# Load the model and tokenizer | |
model_name = "Reverb/Mistral-7B-LoreWeaver" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
# Initialize the pipeline | |
generator = pipeline('text-generation', model=model, tokenizer=tokenizer) | |
def generate_story(prompt): | |
# Generate a response using the model | |
responses = generator(prompt, max_length=200, num_return_sequences=1) | |
return responses[0]['generated_text'] | |
# Define the Gradio interface | |
iface = gr.Interface( | |
fn=generate_story, | |
inputs=gr.Textbox(lines=5, placeholder="Enter your prompt here..."), | |
outputs=gr.Textbox(label="Generated Story"), | |
title="Mistral-7B-LoreWeaver Story Generator", | |
description="Enter a prompt to generate a narrative text using the Mistral-7B-LoreWeaver model." | |
) | |
# Launch the interface | |
iface.launch() |