mjlee commited on
Commit
b59964c
1 Parent(s): 8966d80
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -19,10 +19,10 @@ base_model = base_model
19
  tokenizer = AutoTokenizer.from_pretrained(base_model)
20
 
21
  sen_model = Classifier(base_model, num_labels=2, device='cpu', tokenizer=tokenizer)
22
- sen_model.load_state_dict(torch.load(sen_model_file))
23
 
24
  entity_model = Classifier(base_model, num_labels=2, device='cpu', tokenizer=tokenizer)
25
- entity_model.load_state_dict(torch.load(entity_model_file))
26
 
27
 
28
  def infer(test_sentence):
@@ -45,15 +45,15 @@ def infer(test_sentence):
45
 
46
  tokenized_data = tokenizer(form_, pair_, padding='max_length', max_length=512, truncation=True)
47
 
48
- input_ids = torch.tensor([tokenized_data['input_ids']]).to(device)
49
- attention_mask = torch.tensor([tokenized_data['attention_mask']]).to(device)
50
 
51
  first_sep = tokenized_data['input_ids'].index(2)
52
  last_sep = tokenized_data['input_ids'][first_sep+2:].index(2) + (first_sep + 2)
53
  mask = [0] * len(tokenized_data['input_ids'])
54
  for i in range(first_sep + 2, last_sep):
55
  mask[i] = 1
56
- mask = torch.tensor([mask]).to(device)
57
 
58
  with torch.no_grad():
59
  outputs = entity_model(input_ids, attention_mask, mask)
 
19
  tokenizer = AutoTokenizer.from_pretrained(base_model)
20
 
21
  sen_model = Classifier(base_model, num_labels=2, device='cpu', tokenizer=tokenizer)
22
+ sen_model.load_state_dict(torch.load(sen_model_file, map_location=torch.device('cpu')))
23
 
24
  entity_model = Classifier(base_model, num_labels=2, device='cpu', tokenizer=tokenizer)
25
+ entity_model.load_state_dict(torch.load(entity_model_file, map_location=torch.device('cpu')))
26
 
27
 
28
  def infer(test_sentence):
 
45
 
46
  tokenized_data = tokenizer(form_, pair_, padding='max_length', max_length=512, truncation=True)
47
 
48
+ input_ids = torch.tensor([tokenized_data['input_ids']])
49
+ attention_mask = torch.tensor([tokenized_data['attention_mask']])
50
 
51
  first_sep = tokenized_data['input_ids'].index(2)
52
  last_sep = tokenized_data['input_ids'][first_sep+2:].index(2) + (first_sep + 2)
53
  mask = [0] * len(tokenized_data['input_ids'])
54
  for i in range(first_sep + 2, last_sep):
55
  mask[i] = 1
56
+ mask = torch.tensor([mask])
57
 
58
  with torch.no_grad():
59
  outputs = entity_model(input_ids, attention_mask, mask)