ppbrown commited on
Commit
c6a81ec
1 Parent(s): 7eddabc

Upload subtracted-distances.py

Browse files
Files changed (1) hide show
  1. subtracted-distances.py +90 -0
subtracted-distances.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+
3
+ """ Work in progress
4
+ Plan:
5
+ Kinda a "temp" hack (maybe)
6
+ Prompt for TWO things. Subtract the second embed from the first.
7
+ Then see what is near
8
+ """
9
+
10
+
11
+ import sys
12
+ import json
13
+ import torch
14
+ from safetensors import safe_open
15
+
16
+ from transformers import CLIPProcessor,CLIPModel
17
+
18
+ clipsrc="openai/clip-vit-large-patch14"
19
+ processor=None
20
+ model=None
21
+
22
+ device=torch.device("cuda")
23
+
24
+
25
+ def init():
26
+ global processor
27
+ global model
28
+ # Load the processor and model
29
+ print("loading processor from "+clipsrc,file=sys.stderr)
30
+ processor = CLIPProcessor.from_pretrained(clipsrc)
31
+ print("done",file=sys.stderr)
32
+ print("loading model from "+clipsrc,file=sys.stderr)
33
+ model = CLIPModel.from_pretrained(clipsrc)
34
+ print("done",file=sys.stderr)
35
+
36
+ model = model.to(device)
37
+
38
+
39
+
40
+ embed_file="embeddings.safetensors"
41
+
42
+ device=torch.device("cuda")
43
+
44
+ print("reading words from dictionary now",file=sys.stderr)
45
+ with open("dictionary","r") as f:
46
+ tokendict = f.readlines()
47
+ wordlist = [token.strip() for token in tokendict] # Remove trailing newlines
48
+ print(len(wordlist),"lines read")
49
+
50
+
51
+ print("reading embeddings now",file=sys.stderr)
52
+ model = safe_open(embed_file,framework="pt",device="cuda")
53
+ embs=model.get_tensor("embeddings")
54
+ embs.to(device)
55
+ print("Shape of loaded embeds =",embs.shape)
56
+
57
+ def standard_embed_calc(text):
58
+ if processor == None:
59
+ init()
60
+
61
+ inputs = processor(text=text, return_tensors="pt")
62
+ inputs.to(device)
63
+ with torch.no_grad():
64
+ text_features = model.get_text_features(**inputs)
65
+ embedding = text_features[0]
66
+ return embedding
67
+
68
+
69
+ def print_distances(targetemb):
70
+ targetdistances = torch.cdist( targetemb.unsqueeze(0), embs, p=2)
71
+
72
+ print("shape of distances...",targetdistances.shape)
73
+
74
+ smallest_distances, smallest_indices = torch.topk(targetdistances[0], 20, largest=False)
75
+
76
+ smallest_distances=smallest_distances.tolist()
77
+ smallest_indices=smallest_indices.tolist()
78
+ for d,i in zip(smallest_distances,smallest_indices):
79
+ print(wordlist[i],"(",d,")")
80
+
81
+
82
+
83
+
84
+ text1=input("First text? ")
85
+ text2=input("Second text? ")
86
+ emb1=standard_embed_calc(text1)
87
+ emb2=standard_embed_calc(text2)
88
+
89
+ result=torch.sub(emb1,emb2)
90
+ print_distances(result)