ethanlshen commited on
Commit
b01dd78
1 Parent(s): 5824320

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -1
app.py CHANGED
@@ -66,6 +66,7 @@ def decode(tokenizer, encoding):
66
  @spaces.GPU
67
  def update_options(input, num_tokens):
68
  tokenized_prompts = tokenizer.encode([input], True, False)
 
69
  alive_gens, _ = model.sup_generate(prompt_tokens=tokenized_prompts,
70
  smoothing="geom",
71
  max_gen_len=num_tokens,
@@ -78,6 +79,7 @@ def update_options(input, num_tokens):
78
  ngrams=ngrams,
79
  get_time=False,
80
  penalty=200)
 
81
  gens = alive_gens[0].reshape(n_drafts, -1)
82
  return decode(tokenizer, gens[0]), decode(tokenizer, gens[1]), decode(tokenizer, gens[2])
83
 
@@ -105,4 +107,4 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
105
  return curr + txt
106
 
107
  if __name__ == "__main__":
108
- demo.launch()
 
66
  @spaces.GPU
67
  def update_options(input, num_tokens):
68
  tokenized_prompts = tokenizer.encode([input], True, False)
69
+ print("Processed prompt")
70
  alive_gens, _ = model.sup_generate(prompt_tokens=tokenized_prompts,
71
  smoothing="geom",
72
  max_gen_len=num_tokens,
 
79
  ngrams=ngrams,
80
  get_time=False,
81
  penalty=200)
82
+ print("Generated")
83
  gens = alive_gens[0].reshape(n_drafts, -1)
84
  return decode(tokenizer, gens[0]), decode(tokenizer, gens[1]), decode(tokenizer, gens[2])
85
 
 
107
  return curr + txt
108
 
109
  if __name__ == "__main__":
110
+ demo.launch(share=True)