File size: 3,259 Bytes
5bbf545
 
 
 
 
 
00f0bcf
5bbf545
e1f1efb
5bbf545
 
e1f1efb
5bbf545
e1f1efb
5bbf545
 
e1f1efb
 
 
 
 
 
 
5bbf545
 
 
 
 
a863897
e1f1efb
 
a863897
 
e1f1efb
 
 
 
961e4dd
e1f1efb
 
 
71a7782
e1f1efb
d87f207
 
 
 
 
 
 
 
5a14a21
0d69e07
 
 
 
d87f207
6f2dde0
21bae54
a863897
d87f207
563a8be
 
 
 
 
 
 
 
 
 
 
 
 
e1f1efb
6f2dde0
 
e1f1efb
 
 
 
 
d87f207
e1f1efb
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
---
language: en
license: apache-2.0
tags:
- sentence-transformers
- sentence-similarity
- feature-extraction
- transformers
- onnx
---

# 

This is the ONNX model of sentence-transformers/gtr-t5-xl [Large Dual Encoders Are Generalizable Retrievers](https://arxiv.org/abs/2112.07899). Currently, Hugging Face does not support downloading ONNX files with external format files. I have created a workaround using sbert and optimum together to generate embeddings.

```
pip install onnx
pip install onnxruntime==1.10.0
pip install transformers>4.6.1
pip install sentencepiece
pip install sentence-transformers
pip install optimum
pip install torch==1.9.0
```

Then you can use the model like this:

```python

import os
from sentence_transformers.util import snapshot_download
from transformers import AutoTokenizer
from optimum.onnxruntime import ORTModelForFeatureExtraction
from sentence_transformers.models import Transformer, Pooling, Dense
import torch
from transformers.modeling_outputs import BaseModelOutput
import torch.nn.functional as F
import shutil

model_name = 'vamsibanda/sbert-onnx-gtr-t5-xl'
cache_folder = './'
model_path =  os.path.join(cache_folder, model_name.replace("/", "_"))

def generate_embedding(text):
    token = tokenizer(text, return_tensors='pt')
    embeddings = model(input_ids=token['input_ids'], attention_mask=token['attention_mask'])
    sbert_embeddings = mean_pooling(embeddings, token['attention_mask'])
    sbert_embeddings = dense_layer.forward({'sentence_embedding':sbert_embeddings})
    sbert_embeddings = F.normalize(sbert_embeddings['sentence_embedding'], p=2, dim=1)
    return sbert_embeddings.tolist()[0]
    
def download_onnx_model(model_name, cache_folder, model_path, force_download = False):
    if force_download and os.path.exists(model_path):
       shutil.rmtree(model_path)
    elif os.path.exists(model_path):
       return 
    snapshot_download(model_name,
                     cache_dir=cache_folder,
                     library_name='sentence-transformers'
                     )
    return
      
def mean_pooling(model_output, attention_mask):
    token_embeddings = model_output[0] #First element of model_output contains all token embeddings
    input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
    return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)

def generate_embedding(text):
    token = tokenizer(text, return_tensors='pt')
    embedding = model(input_ids=token['input_ids'], attention_mask=token['attention_mask'])
    embedding = mean_pooling(embedding, token['attention_mask'])
    embedding = dense_layer.forward({'sentence_embedding':embedding})
    embedding = F.normalize(embedding['sentence_embedding'], p=2, dim=1)
    return embedding.tolist()[0]
                    
     
_ = download_onnx_model(model_name, cache_folder, model_path)                
tokenizer = AutoTokenizer.from_pretrained(model_path)
model = ORTModelForFeatureExtraction.from_pretrained(model_path, force_download=False)
pooling_layer = Pooling.load(f"{model_path}/1_Pooling")
dense_layer = Dense.load(f"{model_path}/2_Dense")

generate_embedding('That is a happy person')
                    
```