Mean pooling not max pooling
#1
by
mustapha
- opened
README.md
CHANGED
@@ -83,7 +83,7 @@ encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tenso
|
|
83 |
with torch.no_grad():
|
84 |
model_output = model(**encoded_input)
|
85 |
|
86 |
-
# Perform pooling. In this case,
|
87 |
sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
|
88 |
|
89 |
print("Sentence embeddings:")
|
|
|
83 |
with torch.no_grad():
|
84 |
model_output = model(**encoded_input)
|
85 |
|
86 |
+
# Perform pooling. In this case, mean pooling.
|
87 |
sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
|
88 |
|
89 |
print("Sentence embeddings:")
|