ppbrown commited on
Commit
2edceda
1 Parent(s): 5a1142f

Upload 2 files

Browse files
Files changed (2) hide show
  1. calculate-distances.py +6 -4
  2. calculate-distancesXL.py +102 -0
calculate-distances.py CHANGED
@@ -21,6 +21,12 @@ clipsrc="openai/clip-vit-large-patch14"
21
  processor=None
22
  model=None
23
 
 
 
 
 
 
 
24
  device=torch.device("cuda")
25
 
26
 
@@ -39,10 +45,6 @@ def init():
39
 
40
 
41
 
42
- embed_file="embeddings.safetensors"
43
-
44
- device=torch.device("cuda")
45
-
46
  print("read in words from dictionary now",file=sys.stderr)
47
  with open("dictionary","r") as f:
48
  tokendict = f.readlines()
 
21
  processor=None
22
  model=None
23
 
24
+ if len(sys.argv) == 2:
25
+ embed_file=sys.argv[1]
26
+ else:
27
+ print("You have to give name of embeddings file")
28
+ sys.exit(1)
29
+
30
  device=torch.device("cuda")
31
 
32
 
 
45
 
46
 
47
 
 
 
 
 
48
  print("read in words from dictionary now",file=sys.stderr)
49
  with open("dictionary","r") as f:
50
  tokendict = f.readlines()
calculate-distancesXL.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/env python
2
+
3
+ """
4
+ Plan:
5
+ Read in "dictionary" for list of words
6
+ Read in pre-calculated "proper" embedding for each word from safetensor file
7
+ Prompt user for a word from the list
8
+ Generate a tensor array of distance to all the other known words
9
+ Print out the 20 closest ones
10
+ """
11
+
12
+
13
+ import sys
14
+ import torch
15
+ from safetensors import safe_open
16
+
17
+ from transformers import CLIPProcessor,CLIPModel, CLIPTextModelWithProjection
18
+
19
+ processor=None
20
+ tmodel2=None
21
+ model_path2=None
22
+ model_config2=None
23
+
24
+ if len(sys.argv) == 4:
25
+ model_path2=sys.argv[1]
26
+ model_config2=sys.argv[2]
27
+ embed_file=sys.argv[3]
28
+ else:
29
+ print("You have to give name of textencoder modelfile,config file, and embeddings file")
30
+ sys.exit(1)
31
+
32
+ device=torch.device("cuda")
33
+
34
+
35
+ def init():
36
+ global tmodel2,processor
37
+ # yes, oddly they all use the same tokenizer, basically
38
+ processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")
39
+
40
+ print("loading",model_path)
41
+ tmodel2 = CLIPTextModelWithProjection.from_pretrained(model_path,config=model_config,local_files_only=True,use_safetensors=True)
42
+ tmodel2.to(device)
43
+
44
+
45
+
46
+ print("read in words from dictionary now",file=sys.stderr)
47
+ with open("dictionary","r") as f:
48
+ tokendict = f.readlines()
49
+ wordlist = [token.strip() for token in tokendict] # Remove trailing newlines
50
+ print(len(wordlist),"lines read")
51
+
52
+ print("read in embeddings now",file=sys.stderr)
53
+ model = safe_open(embed_file,framework="pt",device="cuda")
54
+ embs=model.get_tensor("embeddings")
55
+ embs.to(device)
56
+ print("Shape of loaded embeds =",embs.shape)
57
+
58
+ def standard_embed_calc(text):
59
+ global processor,tmodel2
60
+ inputs = processor(text=text, return_tensors="pt")
61
+ inputs.to(device)
62
+
63
+ with torch.no_grad():
64
+ outputs = tmodel2(**inputs)
65
+ embeddings = outputs.text_embeds
66
+ return embeddings[0]
67
+
68
+
69
+ def print_distances(targetemb):
70
+ targetdistances = torch.cdist( targetemb.unsqueeze(0), embs, p=2)
71
+
72
+ print("shape of distances...",targetdistances.shape)
73
+
74
+ smallest_distances, smallest_indices = torch.topk(targetdistances[0], 20, largest=False)
75
+
76
+ smallest_distances=smallest_distances.tolist()
77
+ smallest_indices=smallest_indices.tolist()
78
+ for d,i in zip(smallest_distances,smallest_indices):
79
+ print(wordlist[i],"(",d,")")
80
+
81
+
82
+
83
+ # Find 10 closest tokens to targetword.
84
+ # Will include the word itself
85
+ def find_closest(targetword):
86
+ try:
87
+ targetindex=wordlist.index(targetword)
88
+ targetemb=embs[targetindex]
89
+ print_distances(targetemb)
90
+ return
91
+ except ValueError:
92
+ print(targetword,"not found in cache")
93
+
94
+
95
+ print("Now doing with full calc embed")
96
+ targetemb=standard_embed_calc(targetword)
97
+ print_distances(targetemb)
98
+
99
+
100
+ while True:
101
+ input_text=input("Input a word now:")
102
+ find_closest(input_text)