wastella commited on
Commit
afd794c
1 Parent(s): 2925008
Files changed (3) hide show
  1. app.py +17 -0
  2. flagged/log.csv +2 -0
  3. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
4
+ tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B")
5
+
6
+ def generate(text):
7
+ input_ids = tokenizer(text, return_tensors="pt").input_ids
8
+ gen_tokens = model.generate(input_ids, do_sample=True, temperature=0.9, max_length=100)
9
+ return tokenizer.batch_decode(gen_tokens)[0]
10
+
11
+ gradio_ui = gr.Interface(fn=generate, title="Use GPT-J:", description="Put your text into the box below, and have the GPT-J open source model generate the next 100 characters!", inputs=gr.inputs.Textbox(label="Put your text here!"), outputs=gr.inputs.Textbox(label="Your text:"))
12
+ gradio_ui.launch()
13
+
14
+
15
+
16
+
17
+
flagged/log.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ name,output,flag,username,timestamp
2
+ Hello,Hello Hello!!,,,2023-03-22 16:51:10.476201
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ gradio
2
+ transformers
3
+ torch