vernieuwe commited on
Commit
d45e861
1 Parent(s): aecc6f0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -11
app.py CHANGED
@@ -3,19 +3,19 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
3
  checkpoint_name="ArmelR/starcoder-gradio-v0"
4
  model = AutoModelForCausalLM.from_pretrained(checkpoint_name)
5
  tokenizer = AutoTokenizer.from_pretrained(checkpoint_name)
 
 
 
6
 
7
- prompt = "Create a gradio application that help to convert temperature in celcius into temperature in Fahrenheit"
8
- inputs = tokenizer(f"Question: {prompt}\n\nAnswer: ", return_tensors="pt")
 
 
 
 
9
 
10
- outputs = model.generate(
11
- inputs["input_ids"],
12
- temperature=0.2,
13
- top_p=0.95,
14
- max_new_tokens=200
15
- )
16
-
17
- input_len=len(inputs["input_ids"])
18
- print(tokenizer.decode(outputs[0][input_len:]))
19
 
20
  output_text = gr.outputs.Textbox()
21
  gr.Interface(generate_text,"textbox",output_text,title="Text Generation machine ",description="Ask any question. Note: It can take 20-60 seconds to generate output based on your internet connection.").launch()
 
3
  checkpoint_name="ArmelR/starcoder-gradio-v0"
4
  model = AutoModelForCausalLM.from_pretrained(checkpoint_name)
5
  tokenizer = AutoTokenizer.from_pretrained(checkpoint_name)
6
+ def generate_text(inp):
7
+ prompt = "Create a gradio application that help to convert temperature in celcius into temperature in Fahrenheit"
8
+ inputs = tokenizer(f"Question: {prompt}\n\nAnswer: ", return_tensors="pt")
9
 
10
+ outputs = model.generate(
11
+ inputs["input_ids"],
12
+ temperature=0.2,
13
+ top_p=0.95,
14
+ max_new_tokens=200
15
+ )
16
 
17
+ input_len=len(inputs["input_ids"])
18
+ print(tokenizer.decode(outputs[0][input_len:]))
 
 
 
 
 
 
 
19
 
20
  output_text = gr.outputs.Textbox()
21
  gr.Interface(generate_text,"textbox",output_text,title="Text Generation machine ",description="Ask any question. Note: It can take 20-60 seconds to generate output based on your internet connection.").launch()