MohamedRashad commited on
Commit
8e1016b
·
1 Parent(s): 4d6899e

chore: Update TashkeelModelEO and TashkeelModelED loading in app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -5
app.py CHANGED
@@ -24,16 +24,13 @@ def infer_shakkala(input_text):
24
  tokenizer = TashkeelTokenizer()
25
  eo_ckpt_path = Path(__file__).parent / 'models/best_eo_mlm_ns_epoch_193.pt'
26
 
27
- device = 'cuda' if torch.cuda.is_available() else 'cpu'
28
- print('device:', device)
29
-
30
  max_seq_len = 1024
31
  print('Creating Model...')
32
  eo_model = TashkeelModelEO(tokenizer, max_seq_len=max_seq_len, n_layers=6, learnable_pos_emb=False)
33
  ed_model = TashkeelModelED(tokenizer, max_seq_len=max_seq_len, n_layers=3, learnable_pos_emb=False)
34
 
35
- eo_model.load_state_dict(torch.load(eo_ckpt_path, map_location=device)).eval().to(device)
36
- ed_model.load_state_dict(torch.load(eo_ckpt_path, map_location=device)).eval().to(device)
37
 
38
  @spaces.GPU()
39
  def infer_catt(input_text, choose_model):
@@ -41,8 +38,10 @@ def infer_catt(input_text, choose_model):
41
  batch_size = 16
42
  verbose = True
43
  if choose_model == 'Encoder-Only':
 
44
  output_text = eo_model.do_tashkeel_batch([input_text], batch_size, verbose)
45
  else:
 
46
  output_text = ed_model.do_tashkeel_batch([input_text], batch_size, verbose)
47
 
48
  return output_text[0]
 
24
  tokenizer = TashkeelTokenizer()
25
  eo_ckpt_path = Path(__file__).parent / 'models/best_eo_mlm_ns_epoch_193.pt'
26
 
 
 
 
27
  max_seq_len = 1024
28
  print('Creating Model...')
29
  eo_model = TashkeelModelEO(tokenizer, max_seq_len=max_seq_len, n_layers=6, learnable_pos_emb=False)
30
  ed_model = TashkeelModelED(tokenizer, max_seq_len=max_seq_len, n_layers=3, learnable_pos_emb=False)
31
 
32
+ eo_model.load_state_dict(torch.load(eo_ckpt_path)).eval()
33
+ ed_model.load_state_dict(torch.load(eo_ckpt_path)).eval()
34
 
35
  @spaces.GPU()
36
  def infer_catt(input_text, choose_model):
 
38
  batch_size = 16
39
  verbose = True
40
  if choose_model == 'Encoder-Only':
41
+ eo_model.to("cuda")
42
  output_text = eo_model.do_tashkeel_batch([input_text], batch_size, verbose)
43
  else:
44
+ ed_model.to("cuda")
45
  output_text = ed_model.do_tashkeel_batch([input_text], batch_size, verbose)
46
 
47
  return output_text[0]