gofilipa commited on
Commit
beda32e
·
1 Parent(s): ff82137

updating with bedtime story code

Browse files
Files changed (1) hide show
  1. app.py +13 -6
app.py CHANGED
@@ -1,13 +1,20 @@
1
  import gradio as gr
 
 
 
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
 
 
 
5
 
6
  def plex(input_text):
7
- mnputs = tokenizer(input_text, return_tensors='pt')
8
- prediction = model.generate(mnputs['input_ids'], min_length=20, max_length=150, num_return_sequences=1)
9
- lines = tokenizer.decode(prediction[0]).splitlines()
10
- return lines[0]
11
 
12
  iface=gr.Interface(
13
  fn=plex,
 
1
  import gradio as gr
2
+ import torch
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM, TextDataset, DataCollatorForLanguageModeling, Trainer, TrainingArguments, pipeline
4
+ from accelerate import Accelerator
5
+ accelerator = Accelerator(cpu=True)
6
 
7
+ # def greet(name):
8
+ # return "Hello " + name + "!!"
9
+
10
+ tokenizer = accelerator.prepare(AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-125m"))
11
+ model = accelerator.prepare(AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-125m"))
12
 
13
  def plex(input_text):
14
+ mnputs = tokenizer(input_text, return_tensors='pt')
15
+ prediction = model.generate(mnputs['input_ids'], min_length=20, max_length=150, num_return_sequences=1)
16
+ lines = tokenizer.decode(prediction[0]).splitlines()
17
+ return lines[0]
18
 
19
  iface=gr.Interface(
20
  fn=plex,