DHEIVER commited on
Commit
0622b2f
1 Parent(s): c217cbb

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -0
app.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ import torch
4
+
5
+ # Load the tokenizer and model
6
+ tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-560m")
7
+ model = AutoModelForCausalLM.from_pretrained("bigscience/bloom-560m")
8
+
9
+ # Define a function to generate text
10
+ def generate_text(prompt):
11
+ input_ids = tokenizer.encode(prompt, return_tensors="pt")
12
+ output = model.generate(input_ids, max_length=100, num_return_sequences=1)
13
+ generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
14
+ return generated_text
15
+
16
+ # Create a Gradio interface
17
+ interface = gr.Interface(
18
+ fn=generate_text,
19
+ inputs=gr.Textbox("text", label="Enter your prompt here:"),
20
+ outputs=gr.Textbox("text", label="Generated Text:")
21
+ )
22
+
23
+ # Launch the interface
24
+ interface.launch()