Ali-C137 commited on
Commit
e47ad50
1 Parent(s): a912e42

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -5
app.py CHANGED
@@ -3,10 +3,7 @@
3
  from transformers import AutoModelForCausalLM, AutoTokenizer, AutoTokenizer, AutoModelForSeq2SeqLM, set_seed, pipeline
4
  import gradio as gr
5
 
6
- ### need more GPU power to call better models !!!!!!
7
-
8
- #model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0pp", use_cache=True) # 11B param
9
- #tokenizer = AutoTokenizer.from_pretrained("bigscience/T0pp")
10
 
11
  model = AutoModelForCausalLM.from_pretrained('EleutherAI/gpt-neo-1.3B', use_cache=True)
12
  tokenizer = AutoTokenizer.from_pretrained('EleutherAI/gpt-neo-1.3B')
@@ -20,7 +17,7 @@ def generate(Name, Position, Organization, max_length=500, top_k=1, temperature=
20
  return tokenizer.decode(sample[0], truncate_before_pattern=[r"\n\n^#", "^'''", "\n\n\n"])
21
 
22
  title = "Motivation Letter Generator"
23
- article = "Impress your employer"
24
 
25
  gr = gr.Interface(fn = generate, inputs=["text", "text", "text"], outputs="text", title=title, article=article)
26
 
 
3
  from transformers import AutoModelForCausalLM, AutoTokenizer, AutoTokenizer, AutoModelForSeq2SeqLM, set_seed, pipeline
4
  import gradio as gr
5
 
6
+ ### need more GPU power to call T0pp
 
 
 
7
 
8
  model = AutoModelForCausalLM.from_pretrained('EleutherAI/gpt-neo-1.3B', use_cache=True)
9
  tokenizer = AutoTokenizer.from_pretrained('EleutherAI/gpt-neo-1.3B')
 
17
  return tokenizer.decode(sample[0], truncate_before_pattern=[r"\n\n^#", "^'''", "\n\n\n"])
18
 
19
  title = "Motivation Letter Generator"
20
+ article = "For now this still a toy demo and no good results will came out. PS: if you have enough resources try using stronger models !"
21
 
22
  gr = gr.Interface(fn = generate, inputs=["text", "text", "text"], outputs="text", title=title, article=article)
23