Benjamin Gonzalez commited on
Commit
b461977
1 Parent(s): 5c66c5b
Files changed (1) hide show
  1. app.py +21 -3
app.py CHANGED
@@ -3,7 +3,13 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import gradio as gr
4
 
5
  tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-2", trust_remote_code=True)
6
- model = AutoModelForCausalLM.from_pretrained("microsoft/phi-2", torch_dtype=torch.float32, device_map="cpu", trust_remote_code=True)
 
 
 
 
 
 
7
 
8
  def generate(prompt, length):
9
  inputs = tokenizer(prompt, return_tensors="pt", return_attention_mask=False)
@@ -12,7 +18,19 @@ def generate(prompt, length):
12
  outputs = model.generate(**inputs, max_length=length)
13
  return tokenizer.batch_decode(outputs)[0]
14
 
15
- demo = gr.Interface(fn=generate, inputs=["text", gr.Number(value=50, label="max length",maximum=200)], outputs="text")
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
  if __name__ == "__main__":
18
- demo.launch(show_api=False)
 
3
  import gradio as gr
4
 
5
  tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-2", trust_remote_code=True)
6
+ model = AutoModelForCausalLM.from_pretrained(
7
+ "microsoft/phi-2",
8
+ torch_dtype=torch.float32,
9
+ device_map="cpu",
10
+ trust_remote_code=True,
11
+ )
12
+
13
 
14
  def generate(prompt, length):
15
  inputs = tokenizer(prompt, return_tensors="pt", return_attention_mask=False)
 
18
  outputs = model.generate(**inputs, max_length=length)
19
  return tokenizer.batch_decode(outputs)[0]
20
 
21
+
22
+ demo = gr.Interface(
23
+ fn=generate,
24
+ inputs=[
25
+ gr.Text(
26
+ label="prompt",
27
+ value="Write a detailed analogy between mathematics and a lighthouse.",
28
+ ),
29
+ gr.Number(value=50, label="max length", maximum=200),
30
+ ],
31
+ outputs="text",
32
+ )
33
+
34
 
35
  if __name__ == "__main__":
36
+ demo.launch(show_api=False)