tokenspace / generate-embeddingXL.py
ppbrown's picture
Upload generate-embeddingXL.py
84892d5 verified
raw
history blame
2.71 kB
#!/bin/env python
""" Work in progress
Similar to generate-embedding.py, but outputs in the format
that SDXL models expect. I hope.
Also tries to load the SDXL base text encoder specifically.
Requires you populate the two paths mentioned immediately below this comment section.
You can get them from:
https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/tree/main/text_encoder_2
(rename diffusion_pytorch_model.safetensors to text_encoder_xl.safetensors)
Plan:
Take input for a single word or phrase.
Save out calculations, to "generatedXL.safetensors"
Note that you can generate an embedding from two words, or even more
I could also include a "clip_l" key, but..
Meh.
"""
model_path = "text_encoder_xl.safetensors"
model_config = "text_encoder_2_config.json"
import sys
import torch
from transformers import CLIPProcessor, CLIPTextModel, CLIPTextModelWithProjection
from safetensors.torch import save_file
# 1. Load the pretrained model
# Note that it doesnt like a leading "/" in the name!!
model=None
processor=None
device=torch.device("cuda")
# Note the default, required 2 pathnames
def initXLCLIPmodel():
global model
print("loading",model_path)
model = CLIPTextModelWithProjection.from_pretrained(model_path,config=model_config,local_files_only=True,use_safetensors=True)
model.to(device)
# a bit wierd, but SDXL seems to still use this tokeninzer
def initCLIPprocessor():
global processor
CLIPname= "openai/clip-vit-large-patch14"
print("getting processor from",CLIPname)
processor = CLIPProcessor.from_pretrained(CLIPname)
def embed_from_text(text):
global processor,model
if processor == None:
initCLIPprocessor()
initXLCLIPmodel()
print("getting tokens")
inputs = processor(text=text, return_tensors="pt")
inputs.to(device)
print("getting embeddings?")
outputs = model(**inputs)
print("finalizing")
embeddings = outputs.text_embeds
return embeddings
##########################################
word = input("type a phrase to generate an embedding for: ")
emb = embed_from_text(word)
#embs=emb.unsqueeze(0) # stupid matrix magic to make it the required shape
embs=emb
print("Shape of result = ",embs.shape)
# Note that programs like shapes such as
# torch.Size([1, 768])
output = "generatedXL.safetensors"
# if single word used, then rename output file
if all(char.isalpha() for char in word):
output=f"{word}XL.safetensors"
print(f"Saving to {output}...")
save_file({"clip_g": embs}, output)
# technically we are saving a shape ([1][1280])
# whereas official XL embeddings files, are
# (clip_g) shape ([8][1280])
# (clip_l) shape ([8][768])