consciousAI commited on
Commit
b5a8eeb
1 Parent(s): e165485

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -16,19 +16,19 @@ M5 = "consciousAI/question-generation-auto-hints-t5-v1-base-s-q-c"
16
  device = ['cuda' if torch.cuda.is_available() else 'cpu'][0]
17
 
18
  _m0 = AutoModelForSeq2SeqLM.from_pretrained(M0).to(device)
19
- _tk0 = AutoTokenizer.from_pretrained(M0)
20
 
21
  _m1 = AutoModelForSeq2SeqLM.from_pretrained(M1).to(device)
22
- _tk1 = AutoTokenizer.from_pretrained(M1)
23
 
24
  _m2 = AutoModelForSeq2SeqLM.from_pretrained(M2).to(device)
25
- _tk2 = AutoTokenizer.from_pretrained(M2)
26
 
27
  _m4 = AutoModelForSeq2SeqLM.from_pretrained(M4).to(device)
28
- _tk4 = AutoTokenizer.from_pretrained(M4)
29
 
30
  _m5 = AutoModelForSeq2SeqLM.from_pretrained(M5).to(device)
31
- _tk5 = AutoTokenizer.from_pretrained(M5)
32
 
33
  def _formatQs(questions):
34
  _finalQs = ""
 
16
  device = ['cuda' if torch.cuda.is_available() else 'cpu'][0]
17
 
18
  _m0 = AutoModelForSeq2SeqLM.from_pretrained(M0).to(device)
19
+ _tk0 = AutoTokenizer.from_pretrained(M0, cache_dir="./cache")
20
 
21
  _m1 = AutoModelForSeq2SeqLM.from_pretrained(M1).to(device)
22
+ _tk1 = AutoTokenizer.from_pretrained(M1, cache_dir="./cache")
23
 
24
  _m2 = AutoModelForSeq2SeqLM.from_pretrained(M2).to(device)
25
+ _tk2 = AutoTokenizer.from_pretrained(M2, cache_dir="./cache")
26
 
27
  _m4 = AutoModelForSeq2SeqLM.from_pretrained(M4).to(device)
28
+ _tk4 = AutoTokenizer.from_pretrained(M4, cache_dir="./cache")
29
 
30
  _m5 = AutoModelForSeq2SeqLM.from_pretrained(M5).to(device)
31
+ _tk5 = AutoTokenizer.from_pretrained(M5, cache_dir="./cache")
32
 
33
  def _formatQs(questions):
34
  _finalQs = ""