anakin87 commited on
Commit
a0ccfc1
1 Parent(s): 4f20235

torch inference_mode instead of no_grad and other optimization

Browse files
Files changed (1) hide show
  1. app_utils/entailment_checker.py +2 -2
app_utils/entailment_checker.py CHANGED
@@ -96,14 +96,14 @@ class EntailmentChecker(BaseComponent):
96
  pass
97
 
98
  def get_entailment(self, premise, hypotesis):
99
- with torch.no_grad():
100
  inputs = self.tokenizer(
101
  f"{premise}{self.tokenizer.sep_token}{hypotesis}", return_tensors="pt"
102
  ).to(self.devices[0])
103
  out = self.model(**inputs)
104
  logits = out.logits
105
  probs = (
106
- torch.nn.functional.softmax(logits, dim=-1)[0, :].cpu().detach().numpy()
107
  )
108
  entailment_dict = {k.lower(): v for k, v in zip(self.labels, probs)}
109
  return entailment_dict
 
96
  pass
97
 
98
  def get_entailment(self, premise, hypotesis):
99
+ with torch.inference_mode():
100
  inputs = self.tokenizer(
101
  f"{premise}{self.tokenizer.sep_token}{hypotesis}", return_tensors="pt"
102
  ).to(self.devices[0])
103
  out = self.model(**inputs)
104
  logits = out.logits
105
  probs = (
106
+ torch.nn.functional.softmax(logits, dim=-1)[0, :].detach().cpu().numpy()
107
  )
108
  entailment_dict = {k.lower(): v for k, v in zip(self.labels, probs)}
109
  return entailment_dict