Commit
•
f5fa686
1
Parent(s):
842bfa3
Test.
Browse files- README.md +13 -10
- pytorch_model.bin +1 -1
README.md
CHANGED
@@ -1,19 +1,22 @@
|
|
1 |
---
|
2 |
tags:
|
3 |
- sentence-transformers
|
|
|
4 |
---
|
5 |
|
6 |
# TODO: Name of Model
|
7 |
|
8 |
TODO: Description
|
9 |
|
10 |
-
##
|
11 |
TODO: Add relevant content
|
12 |
|
13 |
(0) Base Transformer Type: RobertaModel
|
|
|
14 |
(1) Pooling mean
|
15 |
|
16 |
-
|
|
|
17 |
|
18 |
Using this model becomes more convenient when you have [sentence-transformers](https://github.com/UKPLab/sentence-transformers) installed:
|
19 |
|
@@ -31,7 +34,7 @@ print(embeddings)
|
|
31 |
```
|
32 |
|
33 |
|
34 |
-
##
|
35 |
|
36 |
```
|
37 |
from transformers import AutoTokenizer, AutoModel
|
@@ -46,17 +49,17 @@ def max_pooling(model_output, attention_mask):
|
|
46 |
max_over_time = torch.max(token_embeddings, 1)[0]
|
47 |
return max_over_time
|
48 |
|
49 |
-
#Sentences we want sentence embeddings for
|
50 |
sentences = ['This is an example sentence']
|
51 |
|
52 |
-
#Load model from HuggingFace Hub
|
53 |
tokenizer = AutoTokenizer.from_pretrained(TODO)
|
54 |
model = AutoModel.from_pretrained(TODO
|
55 |
|
56 |
-
#Tokenize sentences
|
57 |
encoded_input = tokenizer(sentences, padding=True, truncation=True, max_length=128, return_tensors='pt'))
|
58 |
|
59 |
-
#Compute token embeddings
|
60 |
with torch.no_grad():
|
61 |
model_output = model(**encoded_input)
|
62 |
|
@@ -69,8 +72,8 @@ print(sentence_embeddings)
|
|
69 |
|
70 |
|
71 |
|
72 |
-
##
|
73 |
|
74 |
-
##
|
75 |
|
76 |
-
##
|
|
|
1 |
---
|
2 |
tags:
|
3 |
- sentence-transformers
|
4 |
+
- feature-extraction
|
5 |
---
|
6 |
|
7 |
# TODO: Name of Model
|
8 |
|
9 |
TODO: Description
|
10 |
|
11 |
+
## Model Description
|
12 |
TODO: Add relevant content
|
13 |
|
14 |
(0) Base Transformer Type: RobertaModel
|
15 |
+
|
16 |
(1) Pooling mean
|
17 |
|
18 |
+
|
19 |
+
## Usage (Sentence-Transformers)
|
20 |
|
21 |
Using this model becomes more convenient when you have [sentence-transformers](https://github.com/UKPLab/sentence-transformers) installed:
|
22 |
|
|
|
34 |
```
|
35 |
|
36 |
|
37 |
+
## Usage (HuggingFace Transformers)
|
38 |
|
39 |
```
|
40 |
from transformers import AutoTokenizer, AutoModel
|
|
|
49 |
max_over_time = torch.max(token_embeddings, 1)[0]
|
50 |
return max_over_time
|
51 |
|
52 |
+
# Sentences we want sentence embeddings for
|
53 |
sentences = ['This is an example sentence']
|
54 |
|
55 |
+
# Load model from HuggingFace Hub
|
56 |
tokenizer = AutoTokenizer.from_pretrained(TODO)
|
57 |
model = AutoModel.from_pretrained(TODO
|
58 |
|
59 |
+
# Tokenize sentences
|
60 |
encoded_input = tokenizer(sentences, padding=True, truncation=True, max_length=128, return_tensors='pt'))
|
61 |
|
62 |
+
# Compute token embeddings
|
63 |
with torch.no_grad():
|
64 |
model_output = model(**encoded_input)
|
65 |
|
|
|
72 |
|
73 |
|
74 |
|
75 |
+
## TODO: Training Procedure
|
76 |
|
77 |
+
## TODO: Evaluation Results
|
78 |
|
79 |
+
## TODO: Citing & Authors
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 328519167
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:903d4a81e3b114c98bce7a89ccc817ec1db33102cd8604c4de576534ebbe71e1
|
3 |
size 328519167
|