#!/bin/env python """ Purpose: Read in "dictionary" for list of works and token Generate "proper" embedding for each token, and store in tensor file Generate a tensor array of distance to every other token/embedding Save it out to "mtype@mname.safetensors" Warning: Some models require more VRAM than others. Some require more RAM than others. """ import sys import torch import open_clip from safetensors.torch import save_file """ REMEMBER!!! You MUST use the same settings when you READ from the output file as well!! """ # See "list_models.txt" for full combination sets #mtype='ViT-L-14-336' mtype='ViT-L-14' mname='openai' import argparse parser = argparse.ArgumentParser( prog='generate-embeddings', epilog=f"defaults: mtype={mtype}, mname={mname}", description='Read in "dictionary" wordlist and generate calculated embeddings') parser.add_argument('--mtype',default=mtype) parser.add_argument('--mname',default=mname) args = parser.parse_args() mtype=args.mtype mname=args.mname #### Warning, this requires more than 4GB vram #mtype='ViT-H-14-quickgelu' #mname='dfn5b' # May also be able to use syntax of # hf-hub:hf-internal-testing/tiny-open-clip-model' # for mname outfile=f"{mtype}@{mname}.safetensors" print("Will save to:") print(" ",outfile) print("Loading",mtype,mname) cmodel, _, preprocess = open_clip.create_model_and_transforms( mtype, pretrained=mname) tokenizer = open_clip.get_tokenizer(mtype) device=torch.device("cuda") try: cmodel.to(device) except torch.cuda.OutOfMemoryError as e: print(f"FALLING BACK TO CPU!! \n {e}") device=torch.device("cpu") cmodel.to(device) # This is very rare... unless you are trying to load the quickgelu sets # on a 4GB card. Or maybe have 2 things running def standard_embed_calc(text): with torch.no_grad(): ttext = tokenizer(text).to(device) text_features = cmodel.encode_text(ttext) text_features.to(device) #print("shape of text is",ttext.shape) embedding = text_features[0] #print("shape of embedding is",embedding.shape) # For VIT-B, expected is [512] return embedding with open("dictionary","r") as f: tokendict = f.readlines() tokendict = [token.strip() for token in tokendict] # Remove trailing newlines print("generate embeddings for each now",file=sys.stderr) count=1 all_embeddings = [] for word in tokendict: emb = standard_embed_calc(word) emb=emb.unsqueeze(0) # stupid matrix magic to make the cat work all_embeddings.append(emb) count+=1 if (count %100) ==0: print(count) embs = torch.cat(all_embeddings,dim=0) print("Shape of result = ",embs.shape) print("Saving to ",outfile) save_file({"embeddings": embs}, outfile) print("calculate distances now")