Files changed (1) hide show
  1. test01.py +27 -0
test01.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
+
4
+ # Load the model and tokenizer
5
+ model_name = "Reverb/Mistral-7B-LoreWeaver"
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ model = AutoModelForCausalLM.from_pretrained(model_name)
8
+
9
+ # Initialize the pipeline
10
+ generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
11
+
12
+ def generate_story(prompt):
13
+ # Generate a response using the model
14
+ responses = generator(prompt, max_length=200, num_return_sequences=1)
15
+ return responses[0]['generated_text']
16
+
17
+ # Define the Gradio interface
18
+ iface = gr.Interface(
19
+ fn=generate_story,
20
+ inputs=gr.Textbox(lines=5, placeholder="Enter your prompt here..."),
21
+ outputs=gr.Textbox(label="Generated Story"),
22
+ title="Mistral-7B-LoreWeaver Story Generator",
23
+ description="Enter a prompt to generate a narrative text using the Mistral-7B-LoreWeaver model."
24
+ )
25
+
26
+ # Launch the interface
27
+ iface.launch()