keshan commited on
Commit
615ba9a
1 Parent(s): 8f192a0

waiting msg while generating

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -9,7 +9,6 @@ seq_num = st.number_input('Number of sentences to generate ', 1, 20, 5)
9
  max_len = st.number_input('Length of the sentence ', 5, 300, 100)
10
 
11
  go = st.button('Generate')
12
-
13
  with st.spinner('Waiting for the model to load.....'):
14
  model = AutoModelForCausalLM.from_pretrained('flax-community/Sinhala-gpt2')
15
  tokenizer = AutoTokenizer.from_pretrained('flax-community/Sinhala-gpt2')
@@ -18,8 +17,9 @@ st.success('Model loaded!!')
18
 
19
  if go:
20
  try:
21
- generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
22
- seqs = generator(seed, max_length=max_len, num_return_sequences=seq_num)
 
23
  st.write(seqs)
24
  except Exception as e:
25
  st.exception(f'Exception: {e}')
 
9
  max_len = st.number_input('Length of the sentence ', 5, 300, 100)
10
 
11
  go = st.button('Generate')
 
12
  with st.spinner('Waiting for the model to load.....'):
13
  model = AutoModelForCausalLM.from_pretrained('flax-community/Sinhala-gpt2')
14
  tokenizer = AutoTokenizer.from_pretrained('flax-community/Sinhala-gpt2')
 
17
 
18
  if go:
19
  try:
20
+ with st.spinner('Generating...'):
21
+ generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
22
+ seqs = generator(seed, max_length=max_len, num_return_sequences=seq_num)
23
  st.write(seqs)
24
  except Exception as e:
25
  st.exception(f'Exception: {e}')