ToletiSri commited on
Commit
18f4fb0
1 Parent(s): c7b08c4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -19,7 +19,7 @@ encode = lambda s: [stoi[c] for c in s] # encoder: take a string, output a list
19
  decode = lambda l: ''.join([itos[i] for i in l]) # decoder: take a list of integers, output a string
20
 
21
  model = GPTLanguageModel(vocab_size)
22
- model.load_state_dict(torch.load('saved_model.pth'))
23
  m = model.to(cfg.device)
24
 
25
  def inference(input_context, count):
@@ -27,7 +27,7 @@ def inference(input_context, count):
27
  count = int(count)
28
  context = torch.tensor(encoded_text, dtype=torch.long, device=cfg.device)
29
 
30
- print('--------------------context = ',context)
31
  out_text = decode(m.generate(context, max_new_tokens=count)[0].tolist())
32
  return out_text
33
 
 
19
  decode = lambda l: ''.join([itos[i] for i in l]) # decoder: take a list of integers, output a string
20
 
21
  model = GPTLanguageModel(vocab_size)
22
+ model.load_state_dict(torch.load('saved_model.pth', map_location=cfg.device))
23
  m = model.to(cfg.device)
24
 
25
  def inference(input_context, count):
 
27
  count = int(count)
28
  context = torch.tensor(encoded_text, dtype=torch.long, device=cfg.device)
29
 
30
+ #print('--------------------context = ',context)
31
  out_text = decode(m.generate(context, max_new_tokens=count)[0].tolist())
32
  return out_text
33