#!/bin/env python """ Plan: Read in "dictionary" for list of words Read in pre-calculated "proper" embedding for each word from safetensor file Prompt user for a word from the list Generate a tensor array of distance to all the other known words Print out the 20 closest ones """ import sys import torch from safetensors import safe_open from transformers import CLIPProcessor,CLIPModel, CLIPTextModelWithProjection processor=None tmodel2=None model_path2=None model_config2=None if len(sys.argv) == 4: model_path2=sys.argv[1] model_config2=sys.argv[2] embed_file=sys.argv[3] else: print("You have to give name of textencoder modelfile,config file, and embeddings file") sys.exit(1) device=torch.device("cuda") def init(): global tmodel2,processor # yes, oddly they all use the same tokenizer, basically processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14") print("loading",model_path) tmodel2 = CLIPTextModelWithProjection.from_pretrained(model_path,config=model_config,local_files_only=True,use_safetensors=True) tmodel2.to(device) print("read in words from dictionary now",file=sys.stderr) with open("dictionary","r") as f: tokendict = f.readlines() wordlist = [token.strip() for token in tokendict] # Remove trailing newlines print(len(wordlist),"lines read") print("read in embeddings now",file=sys.stderr) model = safe_open(embed_file,framework="pt",device="cuda") embs=model.get_tensor("embeddings") embs.to(device) print("Shape of loaded embeds =",embs.shape) def standard_embed_calc(text): global processor,tmodel2 inputs = processor(text=text, return_tensors="pt") inputs.to(device) with torch.no_grad(): outputs = tmodel2(**inputs) embeddings = outputs.text_embeds return embeddings[0] def print_distances(targetemb): targetdistances = torch.cdist( targetemb.unsqueeze(0), embs, p=2) print("shape of distances...",targetdistances.shape) smallest_distances, smallest_indices = torch.topk(targetdistances[0], 20, largest=False) smallest_distances=smallest_distances.tolist() smallest_indices=smallest_indices.tolist() for d,i in zip(smallest_distances,smallest_indices): print(wordlist[i],"(",d,")") # Find 10 closest tokens to targetword. # Will include the word itself def find_closest(targetword): try: targetindex=wordlist.index(targetword) targetemb=embs[targetindex] print_distances(targetemb) return except ValueError: print(targetword,"not found in cache") print("Now doing with full calc embed") targetemb=standard_embed_calc(targetword) print_distances(targetemb) while True: input_text=input("Input a word now:") find_closest(input_text)