ppbrown commited on
Commit
7a9b925
1 Parent(s): b9dffb2

Upload calculate-2distance.py

Browse files
Files changed (1) hide show
  1. calculate-2distance.py +101 -0
calculate-2distance.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/env python
2
+
3
+ """
4
+ Plan:
5
+ Read in "dictionary" for list of words
6
+ Read in pre-calculated "proper" embedding for each word from
7
+ safetensor file named "embeddings.safetensors"
8
+ Prompt user for two words from the list
9
+ (but may also be off the list, or a phrase)
10
+ Print out Euclidean distance between the two
11
+
12
+ (the point of the dictionary is that it can make loading super fast for known words)
13
+
14
+ """
15
+
16
+
17
+ import sys
18
+ import json
19
+ import torch
20
+ from safetensors import safe_open
21
+
22
+ import numpy
23
+
24
+ from transformers import CLIPProcessor,CLIPModel
25
+
26
+ clipsrc="openai/clip-vit-large-patch14"
27
+ processor=None
28
+ model=None
29
+
30
+ device=torch.device("cuda")
31
+
32
+
33
+ def init():
34
+ global processor
35
+ global model
36
+ # Load the processor and model
37
+ print("loading processor from "+clipsrc,file=sys.stderr)
38
+ processor = CLIPProcessor.from_pretrained(clipsrc)
39
+ print("done",file=sys.stderr)
40
+ print("loading model from "+clipsrc,file=sys.stderr)
41
+ model = CLIPModel.from_pretrained(clipsrc)
42
+ print("done",file=sys.stderr)
43
+
44
+ model = model.to(device)
45
+
46
+
47
+
48
+ embed_file="embeddings.safetensors"
49
+
50
+ device=torch.device("cuda")
51
+
52
+ print("read in words from dictionary now",file=sys.stderr)
53
+ with open("dictionary","r") as f:
54
+ tokendict = f.readlines()
55
+ wordlist = [token.strip() for token in tokendict] # Remove trailing newlines
56
+ print(len(wordlist),"lines read")
57
+
58
+ print("read in embeddings now",file=sys.stderr)
59
+ model = safe_open(embed_file,framework="pt",device="cuda")
60
+ embs=model.get_tensor("embeddings")
61
+ embs.to(device)
62
+ print("Shape of loaded embeds =",embs.shape)
63
+
64
+ def standard_embed_calc(text):
65
+ if processor == None:
66
+ init()
67
+
68
+ inputs = processor(text=text, return_tensors="pt")
69
+ inputs.to(device)
70
+ with torch.no_grad():
71
+ text_features = model.get_text_features(**inputs)
72
+ embedding = text_features[0]
73
+ return embedding
74
+
75
+
76
+ def print_distance(emb1,emb2):
77
+ targetdistance = torch.norm( emb1 - emb2)
78
+ print("DISTANCE:",targetdistance)
79
+
80
+
81
+ # return embed of target word.
82
+ # pull from dictionary, or do full calc
83
+ def find_word(targetword):
84
+ try:
85
+ targetindex=wordlist.index(targetword)
86
+ targetemb=embs[targetindex]
87
+ return targetemb
88
+ return
89
+ except ValueError:
90
+ print(targetword,"not found in cache")
91
+ print("Now doing lookup with full calc embed")
92
+ targetemb=standard_embed_calc(targetword)
93
+ return targetemb
94
+
95
+
96
+ while True:
97
+ input_text1=input("Input a word1(or phrase) now:")
98
+ input_text2=input("Input word2 now:")
99
+ emb1=find_word(input_text1)
100
+ emb2=find_word(input_text2)
101
+ print_distance(emb1,emb2)