MohamedRashad commited on
Commit
760166f
·
verified ·
1 Parent(s): 6f55e41

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -3
app.py CHANGED
@@ -25,7 +25,7 @@ tokenizer = TashkeelTokenizer()
25
  eo_ckpt_path = Path(__file__).parent / 'models/best_eo_mlm_ns_epoch_193.pt'
26
  ed_ckpt_path = Path(__file__).parent / 'models/best_ed_mlm_ns_epoch_178.pt'
27
 
28
- device = 'cpu'
29
  max_seq_len = 1024
30
  print('Creating Model...')
31
  eo_model = TashkeelModelEO(tokenizer, max_seq_len=max_seq_len, n_layers=6, learnable_pos_emb=False)
@@ -42,10 +42,8 @@ def infer_catt(input_text, choose_model):
42
  batch_size = 16
43
  verbose = True
44
  if choose_model == 'Encoder-Only':
45
- eo_model.to("cuda")
46
  output_text = eo_model.do_tashkeel_batch([input_text], batch_size, verbose)
47
  else:
48
- ed_model.to("cuda")
49
  output_text = ed_model.do_tashkeel_batch([input_text], batch_size, verbose)
50
 
51
  return output_text[0]
 
25
  eo_ckpt_path = Path(__file__).parent / 'models/best_eo_mlm_ns_epoch_193.pt'
26
  ed_ckpt_path = Path(__file__).parent / 'models/best_ed_mlm_ns_epoch_178.pt'
27
 
28
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
29
  max_seq_len = 1024
30
  print('Creating Model...')
31
  eo_model = TashkeelModelEO(tokenizer, max_seq_len=max_seq_len, n_layers=6, learnable_pos_emb=False)
 
42
  batch_size = 16
43
  verbose = True
44
  if choose_model == 'Encoder-Only':
 
45
  output_text = eo_model.do_tashkeel_batch([input_text], batch_size, verbose)
46
  else:
 
47
  output_text = ed_model.do_tashkeel_batch([input_text], batch_size, verbose)
48
 
49
  return output_text[0]