Spaces:
Sleeping
Sleeping
File size: 956 Bytes
b2a3e9d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 |
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
# Load the model and tokenizer
model_name = "Reverb/Mistral-7B-LoreWeaver"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Initialize the pipeline
generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
def generate_story(prompt):
# Generate a response using the model
responses = generator(prompt, max_length=200, num_return_sequences=1)
return responses[0]['generated_text']
# Define the Gradio interface
iface = gr.Interface(
fn=generate_story,
inputs=gr.Textbox(lines=5, placeholder="Enter your prompt here..."),
outputs=gr.Textbox(label="Generated Story"),
title="Mistral-7B-LoreWeaver Story Generator",
description="Enter a prompt to generate a narrative text using the Mistral-7B-LoreWeaver model."
)
# Launch the interface
iface.launch() |