beeguy commited on
Commit
bb224bf
1 Parent(s): 1acc2f3

using model directly

Browse files
Files changed (1) hide show
  1. app.py +10 -4
app.py CHANGED
@@ -1,12 +1,18 @@
1
  import torch
2
- from transformers import pipeline
3
  import gradio as gr
4
 
5
- pipe = pipeline("text-generation", model="MTSAIR/multi_verse_model")
 
 
6
 
7
  def greet(name):
8
- res = pipe(name, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
9
- return res[0]['generated_text']
 
 
 
 
10
 
11
  iface = gr.Interface(fn=greet, inputs="text", outputs="text")
12
  iface.launch()
 
1
  import torch
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import gradio as gr
4
 
5
+ # Load model directly
6
+ tokenizer = AutoTokenizer.from_pretrained("MTSAIR/multi_verse_model")
7
+ model = AutoModelForCausalLM.from_pretrained("MTSAIR/multi_verse_model")
8
 
9
  def greet(name):
10
+ #i want to get same result res = pipe(name, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
11
+ #but using tokenizer and model
12
+ input_ids = tokenizer.encode(name, return_tensors='pt')
13
+ res = model.generate(input_ids, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
14
+ generated = tokenizer.decode(res[0], skip_special_tokens=True)
15
+ return generated
16
 
17
  iface = gr.Interface(fn=greet, inputs="text", outputs="text")
18
  iface.launch()