tokenspace / calculate-2distance.py
ppbrown's picture
Upload calculate-2distance.py
7a9b925 verified
raw
history blame
2.6 kB
#!/bin/env python
"""
Plan:
Read in "dictionary" for list of words
Read in pre-calculated "proper" embedding for each word from
safetensor file named "embeddings.safetensors"
Prompt user for two words from the list
(but may also be off the list, or a phrase)
Print out Euclidean distance between the two
(the point of the dictionary is that it can make loading super fast for known words)
"""
import sys
import json
import torch
from safetensors import safe_open
import numpy
from transformers import CLIPProcessor,CLIPModel
clipsrc="openai/clip-vit-large-patch14"
processor=None
model=None
device=torch.device("cuda")
def init():
global processor
global model
# Load the processor and model
print("loading processor from "+clipsrc,file=sys.stderr)
processor = CLIPProcessor.from_pretrained(clipsrc)
print("done",file=sys.stderr)
print("loading model from "+clipsrc,file=sys.stderr)
model = CLIPModel.from_pretrained(clipsrc)
print("done",file=sys.stderr)
model = model.to(device)
embed_file="embeddings.safetensors"
device=torch.device("cuda")
print("read in words from dictionary now",file=sys.stderr)
with open("dictionary","r") as f:
tokendict = f.readlines()
wordlist = [token.strip() for token in tokendict] # Remove trailing newlines
print(len(wordlist),"lines read")
print("read in embeddings now",file=sys.stderr)
model = safe_open(embed_file,framework="pt",device="cuda")
embs=model.get_tensor("embeddings")
embs.to(device)
print("Shape of loaded embeds =",embs.shape)
def standard_embed_calc(text):
if processor == None:
init()
inputs = processor(text=text, return_tensors="pt")
inputs.to(device)
with torch.no_grad():
text_features = model.get_text_features(**inputs)
embedding = text_features[0]
return embedding
def print_distance(emb1,emb2):
targetdistance = torch.norm( emb1 - emb2)
print("DISTANCE:",targetdistance)
# return embed of target word.
# pull from dictionary, or do full calc
def find_word(targetword):
try:
targetindex=wordlist.index(targetword)
targetemb=embs[targetindex]
return targetemb
return
except ValueError:
print(targetword,"not found in cache")
print("Now doing lookup with full calc embed")
targetemb=standard_embed_calc(targetword)
return targetemb
while True:
input_text1=input("Input a word1(or phrase) now:")
input_text2=input("Input word2 now:")
emb1=find_word(input_text1)
emb2=find_word(input_text2)
print_distance(emb1,emb2)