dzmltzack commited on
Commit
91f35a7
1 Parent(s): a6b5c53

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -14
app.py CHANGED
@@ -1,28 +1,30 @@
1
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
2
-
3
- tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-large")
 
 
4
 
5
- model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-large", device_map="auto", cache_dir="cache", offload_folder="offload" )
6
 
7
- def generate(input_text):
8
- input_ids = tokenizer(input_text, return_tensors="pt")
9
- output = model.generate(input_ids, max_length=70)
10
- return tokenizer.decode(output[0], skip_special_tokens=True)
11
 
12
- #@title GUI
13
- import gradio as gr
14
 
15
- title = "Flan T5 :)"
 
16
 
17
  def inference(text):
18
- return generate(text)
 
19
 
20
  io = gr.Interface(
21
  inference,
22
  gr.Textbox(lines=3),
23
  outputs=[
24
- gr.Textbox(lines=3, label="Flan T5")
25
  ],
26
  title=title,
 
 
27
  )
28
- io.launch(share=False,debug=False)
 
1
+ import os
2
+ import gradio as gr
3
+ import torch
4
+ import numpy as np
5
+ from transformers import pipeline
6
 
7
+ import torch
8
 
9
+ pipe_flan = pipeline("text2text-generation", model="google/flan-t5-large", model_kwargs={"device_map": "auto"})
 
 
 
10
 
11
+ examples = []
 
12
 
13
+ title = "Flan T5 and Vanilla T5"
14
+ description = ""
15
 
16
  def inference(text):
17
+ output_flan = pipe_flan(text, max_length=100)[0]["generated_text"]
18
+ return [output_flan]
19
 
20
  io = gr.Interface(
21
  inference,
22
  gr.Textbox(lines=3),
23
  outputs=[
24
+ gr.Textbox(lines=3, label="Flan T5"),
25
  ],
26
  title=title,
27
+ description=description,
28
+ examples=examples
29
  )
30
+ io.launch()