KoichiYasuoka commited on
Commit
1d0c47a
1 Parent(s): fb432f9

GPU support

Browse files
Files changed (1) hide show
  1. ud.py +2 -2
ud.py CHANGED
@@ -21,14 +21,14 @@ class UniversalDependenciesPipeline(TokenClassificationPipeline):
21
  s,e=m[i]
22
  m.insert(i+1,(s+len(j)-2,e))
23
  m[i]=(s,s+len(j)-2)
24
- r["offset_mapping"]=torch.tensor([m])
25
  r["sentence"]=sentence
26
  return r
27
  def _forward(self,model_inputs):
28
  import torch
29
  v=model_inputs["input_ids"][0].tolist()
30
  with torch.no_grad():
31
- e=self.model(input_ids=torch.tensor([v[0:i]+[self.tokenizer.mask_token_id]+v[i+1:]+[j] for i,j in enumerate(v[1:-1],1)]))
32
  return {"logits":e.logits[:,1:-2,:],**model_inputs}
33
  def postprocess(self,model_outputs,**kwargs):
34
  import numpy
 
21
  s,e=m[i]
22
  m.insert(i+1,(s+len(j)-2,e))
23
  m[i]=(s,s+len(j)-2)
24
+ r["offset_mapping"]=torch.tensor([m],device=self.device)
25
  r["sentence"]=sentence
26
  return r
27
  def _forward(self,model_inputs):
28
  import torch
29
  v=model_inputs["input_ids"][0].tolist()
30
  with torch.no_grad():
31
+ e=self.model(input_ids=torch.tensor([v[0:i]+[self.tokenizer.mask_token_id]+v[i+1:]+[j] for i,j in enumerate(v[1:-1],1)],device=self.device))
32
  return {"logits":e.logits[:,1:-2,:],**model_inputs}
33
  def postprocess(self,model_outputs,**kwargs):
34
  import numpy