injilashah commited on
Commit
6b85a72
·
verified ·
1 Parent(s): dc34cfb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -8
app.py CHANGED
@@ -9,6 +9,7 @@ g_model = AutoModelForCausalLM.from_pretrained("google/gemma-2-2b")
9
 
10
  def Sentence_Commpletion(model_name, input_text):
11
 
 
12
  if model_name == "Bloom":
13
  tokenizer, model = b_tokenizer, b_model
14
  elif model_name == "Gemma":
@@ -16,10 +17,10 @@ def Sentence_Commpletion(model_name, input_text):
16
 
17
 
18
 
19
- inputs = tokenizer(input_text, return_tensors="pt")
20
- outputs = model.generate(inputs.input_ids, max_length=50, num_return_sequences=1)
21
 
22
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
23
 
24
 
25
  interface = gr.Interface(
@@ -27,10 +28,7 @@ fn=Sentence_Commpletion,
27
  inputs=[gr.Radio(["Bloom", "Gemma"], label="Choose model"),
28
 
29
  gr.Textbox(placeholder="Enter sentece"),],
30
- outputs="text",
31
- title="Bloom vs Gemma Sentence completion",
32
-
33
- )
34
-
35
 
36
  interface.launch()
 
9
 
10
  def Sentence_Commpletion(model_name, input_text):
11
 
12
+
13
  if model_name == "Bloom":
14
  tokenizer, model = b_tokenizer, b_model
15
  elif model_name == "Gemma":
 
17
 
18
 
19
 
20
+ inputs = tokenizer(input_text, return_tensors="pt")
21
+ outputs = model.generate(inputs.input_ids, max_length=50, num_return_sequences=1)
22
 
23
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
24
 
25
 
26
  interface = gr.Interface(
 
28
  inputs=[gr.Radio(["Bloom", "Gemma"], label="Choose model"),
29
 
30
  gr.Textbox(placeholder="Enter sentece"),],
31
+ outputs="text",
32
+ title="Bloom vs Gemma Sentence completion",)
 
 
 
33
 
34
  interface.launch()