ai-forever
commited on
Commit
•
5a50ff0
1
Parent(s):
0e6d158
Update README.md
Browse files
README.md
CHANGED
@@ -27,8 +27,8 @@ def mean_pooling(model_output, attention_mask):
|
|
27 |
sentences = ['Привет! Как твои дела?',
|
28 |
'А правда, что 42 твое любимое число?']
|
29 |
#Load AutoModel from huggingface model repository
|
30 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
31 |
-
model = AutoModel.from_pretrained("
|
32 |
#Tokenize sentences
|
33 |
encoded_input = tokenizer(sentences, padding=True, truncation=True, max_length=24, return_tensors='pt')
|
34 |
#Compute token embeddings
|
@@ -40,5 +40,5 @@ sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']
|
|
40 |
|
41 |
# Authors
|
42 |
+ [SberDevices](https://sberdevices.ru/) Team.
|
43 |
-
+ Aleksandr Abramov: [Github](https://github.com/Ab1992ao), [Kaggle Competitions Master](https://www.kaggle.com/andrilko);
|
44 |
+ Denis Antykhov: [Github](https://github.com/gaphex);
|
|
|
27 |
sentences = ['Привет! Как твои дела?',
|
28 |
'А правда, что 42 твое любимое число?']
|
29 |
#Load AutoModel from huggingface model repository
|
30 |
+
tokenizer = AutoTokenizer.from_pretrained("ai-forever/sbert_large_mt_nlu_ru")
|
31 |
+
model = AutoModel.from_pretrained("ai-forever/sbert_large_mt_nlu_ru")
|
32 |
#Tokenize sentences
|
33 |
encoded_input = tokenizer(sentences, padding=True, truncation=True, max_length=24, return_tensors='pt')
|
34 |
#Compute token embeddings
|
|
|
40 |
|
41 |
# Authors
|
42 |
+ [SberDevices](https://sberdevices.ru/) Team.
|
43 |
+
+ Aleksandr Abramov: [HF profile](https://huggingface.co/Andrilko), [Github](https://github.com/Ab1992ao), [Kaggle Competitions Master](https://www.kaggle.com/andrilko);
|
44 |
+ Denis Antykhov: [Github](https://github.com/gaphex);
|