convert to handle dictionary format
Browse files- .gitattributes +1 -0
- README.md +28 -10
- dictionary.fullword +0 -0
- dictionary.huge +0 -0
- embeddings.safetensors.huge +3 -0
- generate-distances.py +61 -19
- generate-embeddings.py +4 -4
- requirements.txt +3 -0
.gitattributes
CHANGED
@@ -54,3 +54,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
54 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
56 |
embeddings.safetensors.fullword filter=lfs diff=lfs merge=lfs -text
|
|
|
|
54 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
56 |
embeddings.safetensors.fullword filter=lfs diff=lfs merge=lfs -text
|
57 |
+
embeddings.safetensors.huge filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
@@ -12,17 +12,40 @@ which allows command-line browsing of words and their neighbours
|
|
12 |
Loads the generated embeddings, calculates a full matrix
|
13 |
of distances between all tokens, and then reads in a word, to show neighbours for.
|
14 |
|
15 |
-
To run this requires the files "embeddings.safetensors" and "
|
16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
|
19 |
## generate-embeddings.py
|
20 |
|
21 |
-
Generates the "embeddings.safetensor" file
|
|
|
22 |
|
23 |
-
|
24 |
-
generates a standalone embedding for each word.
|
25 |
-
Shape of the embeddings tensor, is
|
26 |
[number-of-words][768]
|
27 |
|
28 |
Note that yes, it is possible to directly pull a tensor from the CLIP model,
|
@@ -32,11 +55,6 @@ This will NOT GIVE YOU THE RIGHT DISTANCES!
|
|
32 |
Hence why we are calculating and then storing the embedding weights actually
|
33 |
generated by the CLIP process
|
34 |
|
35 |
-
## embeddings.safetensors
|
36 |
-
|
37 |
-
Data file generated by generate-embeddings.py
|
38 |
-
|
39 |
-
|
40 |
|
41 |
## fullword.json
|
42 |
|
|
|
12 |
Loads the generated embeddings, calculates a full matrix
|
13 |
of distances between all tokens, and then reads in a word, to show neighbours for.
|
14 |
|
15 |
+
To run this requires the files "embeddings.safetensors" and "dictionary"
|
16 |
|
17 |
+
You will need to rename or copy appropriate files for this as mentioned below
|
18 |
+
|
19 |
+
### embeddings.safetensors
|
20 |
+
|
21 |
+
You can either copy one of the provided files, or generate your own.
|
22 |
+
See generate-embeddings.py for that.
|
23 |
+
|
24 |
+
Note that you muist always use the "dictionary" file that matchnes your embeddings file
|
25 |
+
|
26 |
+
### dictionary
|
27 |
+
|
28 |
+
Make sure to always use the dictionary file that matches your embeddings file.
|
29 |
+
|
30 |
+
The "dictionary.fullword" file is pulled from fullword.json, which is distilled from "full words"
|
31 |
+
present in the ViT-L/14 CLIP model's provided token dictionary, called "vocab.json".
|
32 |
+
Thus there are only around 30,000 words in it
|
33 |
+
|
34 |
+
If you want to use the provided "embeddings.safetensors.huge" file, you will want to use the matching
|
35 |
+
"dictionary.huge" file, which has over 300,000 words
|
36 |
+
|
37 |
+
This huge file comes from the linux "wamerican-huge" package, which delivers it under
|
38 |
+
/usr/share/dict/american-english-huge
|
39 |
+
|
40 |
+
There also exists a "american-insane" package
|
41 |
|
42 |
|
43 |
## generate-embeddings.py
|
44 |
|
45 |
+
Generates the "embeddings.safetensor" file, based on the "dictionary" file present.
|
46 |
+
Takes a few minutes to run, depending on size of the dictionary
|
47 |
|
48 |
+
The shape of the embeddings tensor, is
|
|
|
|
|
49 |
[number-of-words][768]
|
50 |
|
51 |
Note that yes, it is possible to directly pull a tensor from the CLIP model,
|
|
|
55 |
Hence why we are calculating and then storing the embedding weights actually
|
56 |
generated by the CLIP process
|
57 |
|
|
|
|
|
|
|
|
|
|
|
58 |
|
59 |
## fullword.json
|
60 |
|
dictionary.fullword
ADDED
The diff for this file is too large to render.
See raw diff
|
|
dictionary.huge
ADDED
The diff for this file is too large to render.
See raw diff
|
|
embeddings.safetensors.huge
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a848df65f451f2d1ae45484f3ad3751e18e8b5b160b107964bdf71a11f96c934
|
3 |
+
size 1070450784
|
generate-distances.py
CHANGED
@@ -14,46 +14,88 @@ import json
|
|
14 |
import torch
|
15 |
from safetensors import safe_open
|
16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
embed_file="embeddings.safetensors"
|
18 |
|
19 |
device=torch.device("cuda")
|
20 |
|
21 |
-
print("read in words from
|
22 |
-
with open("
|
23 |
-
tokendict =
|
24 |
-
wordlist =
|
|
|
25 |
|
26 |
print("read in embeddings now",file=sys.stderr)
|
27 |
-
|
28 |
model = safe_open(embed_file,framework="pt",device="cuda")
|
29 |
embs=model.get_tensor("embeddings")
|
30 |
embs.to(device)
|
31 |
print("Shape of loaded embeds =",embs.shape)
|
32 |
|
33 |
-
|
34 |
-
|
35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
|
37 |
# Find 10 closest tokens to targetword.
|
38 |
# Will include the word itself
|
39 |
def find_closest(targetword):
|
40 |
try:
|
41 |
targetindex=wordlist.index(targetword)
|
42 |
-
|
43 |
-
|
44 |
return
|
|
|
|
|
45 |
|
46 |
-
#print("index of",targetword,"is",targetindex)
|
47 |
-
targetdistances=distances[targetindex]
|
48 |
|
49 |
-
|
|
|
|
|
50 |
|
51 |
-
smallest_distances=smallest_distances.tolist()
|
52 |
-
smallest_indices=smallest_indices.tolist()
|
53 |
-
for d,i in zip(smallest_distances,smallest_indices):
|
54 |
-
print(wordlist[i],"(",d,")")
|
55 |
-
#print("The smallest distance values are",smallest_distances)
|
56 |
-
#print("The smallest index values are",smallest_indices)
|
57 |
|
58 |
|
59 |
print("Input a word now:")
|
|
|
14 |
import torch
|
15 |
from safetensors import safe_open
|
16 |
|
17 |
+
from transformers import CLIPProcessor,CLIPModel
|
18 |
+
|
19 |
+
clipsrc="openai/clip-vit-large-patch14"
|
20 |
+
processor=None
|
21 |
+
model=None
|
22 |
+
|
23 |
+
device=torch.device("cuda")
|
24 |
+
|
25 |
+
|
26 |
+
def init():
|
27 |
+
global processor
|
28 |
+
global model
|
29 |
+
# Load the processor and model
|
30 |
+
print("loading processor from "+clipsrc,file=sys.stderr)
|
31 |
+
processor = CLIPProcessor.from_pretrained(clipsrc)
|
32 |
+
print("done",file=sys.stderr)
|
33 |
+
print("loading model from "+clipsrc,file=sys.stderr)
|
34 |
+
model = CLIPModel.from_pretrained(clipsrc)
|
35 |
+
print("done",file=sys.stderr)
|
36 |
+
|
37 |
+
model = model.to(device)
|
38 |
+
|
39 |
+
|
40 |
+
|
41 |
embed_file="embeddings.safetensors"
|
42 |
|
43 |
device=torch.device("cuda")
|
44 |
|
45 |
+
print("read in words from dictionary now",file=sys.stderr)
|
46 |
+
with open("dictionary","r") as f:
|
47 |
+
tokendict = f.readlines()
|
48 |
+
wordlist = [token.strip() for token in tokendict] # Remove trailing newlines
|
49 |
+
print(len(wordlist),"lines read")
|
50 |
|
51 |
print("read in embeddings now",file=sys.stderr)
|
|
|
52 |
model = safe_open(embed_file,framework="pt",device="cuda")
|
53 |
embs=model.get_tensor("embeddings")
|
54 |
embs.to(device)
|
55 |
print("Shape of loaded embeds =",embs.shape)
|
56 |
|
57 |
+
def standard_embed_calc(text):
|
58 |
+
if processor == None:
|
59 |
+
init()
|
60 |
+
|
61 |
+
inputs = processor(text=text, return_tensors="pt")
|
62 |
+
inputs.to(device)
|
63 |
+
with torch.no_grad():
|
64 |
+
text_features = model.get_text_features(**inputs)
|
65 |
+
embedding = text_features[0]
|
66 |
+
return embedding
|
67 |
+
|
68 |
+
|
69 |
+
def print_distances(targetemb):
|
70 |
+
targetdistances = torch.cdist( targetemb.unsqueeze(0), embs, p=2)
|
71 |
+
|
72 |
+
print("shape of distances...",targetdistances.shape)
|
73 |
+
|
74 |
+
smallest_distances, smallest_indices = torch.topk(targetdistances[0], 20, largest=False)
|
75 |
+
|
76 |
+
smallest_distances=smallest_distances.tolist()
|
77 |
+
smallest_indices=smallest_indices.tolist()
|
78 |
+
for d,i in zip(smallest_distances,smallest_indices):
|
79 |
+
print(wordlist[i],"(",d,")")
|
80 |
+
|
81 |
+
|
82 |
|
83 |
# Find 10 closest tokens to targetword.
|
84 |
# Will include the word itself
|
85 |
def find_closest(targetword):
|
86 |
try:
|
87 |
targetindex=wordlist.index(targetword)
|
88 |
+
targetemb=embs[targetindex]
|
89 |
+
print_distances(targetemb)
|
90 |
return
|
91 |
+
except ValueError:
|
92 |
+
print(targetword,"not found in cache")
|
93 |
|
|
|
|
|
94 |
|
95 |
+
print("Now doing with full calc embed")
|
96 |
+
targetemb=standard_embed_calc(targetword)
|
97 |
+
print_distances(targetemb)
|
98 |
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
|
100 |
|
101 |
print("Input a word now:")
|
generate-embeddings.py
CHANGED
@@ -47,14 +47,14 @@ def standard_embed_calc(text):
|
|
47 |
|
48 |
init()
|
49 |
|
50 |
-
|
51 |
-
|
52 |
-
tokendict =
|
53 |
|
54 |
print("generate embeddings for each now",file=sys.stderr)
|
55 |
count=1
|
56 |
all_embeddings = []
|
57 |
-
for word in tokendict
|
58 |
emb = standard_embed_calc(word)
|
59 |
emb=emb.unsqueeze(0) # stupid matrix magic to make the cat work
|
60 |
all_embeddings.append(emb)
|
|
|
47 |
|
48 |
init()
|
49 |
|
50 |
+
with open("dictionary","r") as f:
|
51 |
+
tokendict = f.readlines()
|
52 |
+
tokendict = [token.strip() for token in tokendict] # Remove trailing newlines
|
53 |
|
54 |
print("generate embeddings for each now",file=sys.stderr)
|
55 |
count=1
|
56 |
all_embeddings = []
|
57 |
+
for word in tokendict:
|
58 |
emb = standard_embed_calc(word)
|
59 |
emb=emb.unsqueeze(0) # stupid matrix magic to make the cat work
|
60 |
all_embeddings.append(emb)
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
safetensors
|
3 |
+
transformers
|