smhavens commited on
Commit
f937d1c
1 Parent(s): c7978b9

Basic HuggingFace setup

Browse files
Files changed (1) hide show
  1. main.py +50 -0
main.py CHANGED
@@ -1,2 +1,52 @@
1
  import spacy
2
  import math
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import spacy
2
  import math
3
+ from datasets import load_dataset
4
+ from sentence_transformers import SentenceTransformer
5
+ from transformers import AutoTokenizer, AutoModel
6
+ import torch
7
+ import torch.nn.functional as F
8
+
9
+
10
+
11
+ #Mean Pooling - Take attention mask into account for correct averaging
12
+ def mean_pooling(model_output, attention_mask):
13
+ token_embeddings = model_output[0] #First element of model_output contains all token embeddings
14
+ input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
15
+ return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
16
+
17
+
18
+ def main():
19
+ dataset = load_dataset("glue", "cola")
20
+ dataset = dataset["train"]
21
+
22
+ sentences = ["This is an example sentence", "Each sentence is converted"]
23
+
24
+ model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
25
+ embeddings = model.encode(sentences)
26
+ print(embeddings)
27
+
28
+ # Sentences we want sentence embeddings for
29
+ sentences = ['This is an example sentence', 'Each sentence is converted']
30
+
31
+ # Load model from HuggingFace Hub
32
+ tokenizer = AutoTokenizer.from_pretrained('sentence-transformers/all-MiniLM-L6-v2')
33
+ model = AutoModel.from_pretrained('sentence-transformers/all-MiniLM-L6-v2')
34
+
35
+ # Tokenize sentences
36
+ encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
37
+
38
+ # Compute token embeddings
39
+ with torch.no_grad():
40
+ model_output = model(**encoded_input)
41
+
42
+ # Perform pooling
43
+ sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
44
+
45
+ # Normalize embeddings
46
+ sentence_embeddings = F.normalize(sentence_embeddings, p=2, dim=1)
47
+
48
+ print("Sentence embeddings:")
49
+ print(sentence_embeddings)
50
+
51
+ if __name__ == "__main__":
52
+ main()