vamsibanda commited on
Commit
c2c33f1
1 Parent(s): 4a94086

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +53 -54
README.md CHANGED
@@ -1,74 +1,73 @@
1
  ---
 
 
2
  license: apache-2.0
 
 
 
 
 
 
3
  ---
4
 
 
5
 
6
- ```
7
- ##How to download the model?
8
- !sudo apt-get install git-lfs
9
- !git lfs install
10
 
11
- # Then
12
- !git clone https://huggingface.co/vamsibanda/sbert-onnx-all-roberta-large-v1
13
  ```
14
-
15
- ## How to generate embeddings?
 
 
 
 
 
16
  ```
17
- from onnxruntime import InferenceSession
 
 
 
 
 
 
 
 
 
18
  import torch
19
  from transformers.modeling_outputs import BaseModelOutput
20
- from transformers import RobertaTokenizerFast
21
  import torch.nn.functional as F
22
- from sentence_transformers.models import Transformer, Pooling, Dense
23
-
24
- class RobertaEncoder(torch.nn.Module):
25
- def __init__(self, encoder_sess):
26
- super().__init__()
27
- self.encoder = encoder_sess
28
 
29
- def forward(
30
- self,
31
- input_ids,
32
- attention_mask,
33
- inputs_embeds=None,
34
- head_mask=None,
35
- output_attentions=None,
36
- output_hidden_states=None,
37
- return_dict=None,
38
- ):
39
-
40
- encoder_hidden_state = torch.from_numpy(
41
- self.encoder.run(
42
- None,
43
- {
44
- "input_ids": input_ids.cpu().numpy(),
45
- "attention_mask": attention_mask.cpu().numpy(),
46
-
47
- },
48
- )[0]
49
- )
50
-
51
- return BaseModelOutput(encoder_hidden_state)
52
 
53
  def mean_pooling(model_output, attention_mask):
54
  token_embeddings = model_output[0] #First element of model_output contains all token embeddings
55
  input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
56
  return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
57
 
58
- def sbert_onnx_encode(sentence_input):
59
- token = roberta_tokenizer(sentence_input, return_tensors='pt')
60
- encoder_outputs = encoder_layer(input_ids=token['input_ids'], attention_mask=token['attention_mask'])
61
- sbert_embeddings = mean_pooling(encoder_outputs, token['attention_mask'])
62
- sbert_embeddings = F.normalize(sbert_embeddings, p=2, dim=1)
63
- return sbert_embeddings.tolist()[0]
64
-
65
- roberta_tokenizer = RobertaTokenizerFast.from_pretrained('sbert-onnx-all-roberta-large-v1')
66
- encoder_sess = InferenceSession('sbert-onnx-all-roberta-large-v1/sbert-roberta-large-quant.onnx')
67
- encoder_layer = RobertaEncoder(encoder_sess)
68
- pooling_layer = Pooling.load('./sbert-onnx-all-roberta-large-v1/1_Pooling/')
 
 
 
 
 
69
 
70
- m1 = sbert_onnx_encode('That is a happy person')
71
- m2 = sbert.encode('That is a happy person').tolist()
72
- print(util.cos_sim(m1,m2))
73
- ##tensor([[0.9925]])
 
 
74
  ```
 
1
  ---
2
+ pipeline_tag: sentence-similarity
3
+ language: en
4
  license: apache-2.0
5
+ tags:
6
+ - sentence-transformers
7
+ - feature-extraction
8
+ - sentence-similarity
9
+ - transformers
10
+ - onnx
11
  ---
12
 
13
+ #
14
 
15
+ This is the ONNX model of sentence-transformers/all-roberta-large-v1 [https://seb.sbert.net]. Currently, Hugging Face does not support downloading ONNX files with external format files. I have created a workaround using sbert and optimum together to generate embeddings.
 
 
 
16
 
 
 
17
  ```
18
+ pip install onnx
19
+ pip install onnxruntime==1.10.0
20
+ pip install transformers>4.6.1
21
+ pip install sentencepiece
22
+ pip install sentence-transformers
23
+ pip install optimum
24
+ pip install torch==1.9.0
25
  ```
26
+
27
+ Then you can use the model like this:
28
+
29
+ ```python
30
+ import os
31
+ from torch.hub import _get_torch_home
32
+ from sentence_transformers.util import snapshot_download
33
+ from transformers import AutoTokenizer, pipeline
34
+ from optimum.onnxruntime import ORTModelForSeq2SeqLM, ORTModelForFeatureExtraction
35
+ from sentence_transformers.models import Transformer, Pooling, Dense
36
  import torch
37
  from transformers.modeling_outputs import BaseModelOutput
38
+ from transformers import T5TokenizerFast
39
  import torch.nn.functional as F
 
 
 
 
 
 
40
 
41
+ model_name = 'vamsibanda/sbert-onnx-all-roberta-large-v1'
42
+ cache_folder = './'
43
+ model_path = os.path.join(cache_folder, model_name.replace("/", "_"))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
  def mean_pooling(model_output, attention_mask):
46
  token_embeddings = model_output[0] #First element of model_output contains all token embeddings
47
  input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
48
  return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
49
 
50
+ def download_onnx_model(model_name, cache_folder, model_path, force_download = False):
51
+ if force_download and os.path.exists(model_path):
52
+ os.remove(model_path)
53
+
54
+ snapshot_download(model_name,
55
+ cache_dir=cache_folder,
56
+ library_name='sentence-transformers',
57
+ ignore_files=['flax_model.msgpack', 'rust_model.ot', 'tf_model.h5'],
58
+ )
59
+ return
60
+
61
+
62
+ _ = download_onnx_model(model_name, cache_folder, model_path)
63
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
64
+ model = ORTModelForFeatureExtraction.from_pretrained(model_path, force_download=False)
65
+ pooling_layer = Pooling.load(f"{model_path}/1_Pooling")
66
 
67
+ token = tokenizer('That is a happy person', return_tensors='pt')
68
+ embeddings = model(input_ids=token['input_ids'], attention_mask=token['attention_mask'])
69
+ sbert_embeddings = mean_pooling(embeddings, token['attention_mask'])
70
+ sbert_embeddings = F.normalize(sbert_embeddings, p=2, dim=1)
71
+ sbert_embeddings.tolist()[0]
72
+
73
  ```