|
|
|
|
|
""" Work in progress |
|
|
|
Similar to generate-embedding.py, but outputs in the format |
|
that SDXL models expect. I hope. |
|
|
|
Also tries to load the SDXL base text encoder specifically. |
|
Requires you populate the two paths mentioned immediately below this comment section. |
|
|
|
You can get them from: |
|
https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/tree/main/text_encoder_2 |
|
|
|
(rename diffusion_pytorch_model.safetensors to text_encoder_xl.safetensors) |
|
|
|
|
|
Plan: |
|
Take input for a single word or phrase. |
|
Save out calculations, to "generatedXL.safetensors" |
|
|
|
Note that you can generate an embedding from two words, or even more |
|
|
|
I could also include a "clip_l" key, but.. |
|
Meh. |
|
""" |
|
|
|
model_path = "text_encoder_xl.safetensors" |
|
model_config = "text_encoder_2_config.json" |
|
|
|
import sys |
|
import torch |
|
from transformers import CLIPProcessor, CLIPTextModel, CLIPTextModelWithProjection |
|
from safetensors.torch import save_file |
|
|
|
|
|
|
|
|
|
|
|
model=None |
|
processor=None |
|
|
|
device=torch.device("cuda") |
|
|
|
|
|
def initXLCLIPmodel(): |
|
global model |
|
print("loading",model_path) |
|
model = CLIPTextModelWithProjection.from_pretrained(model_path,config=model_config,local_files_only=True,use_safetensors=True) |
|
model.to(device) |
|
|
|
|
|
def initCLIPprocessor(): |
|
global processor |
|
CLIPname= "openai/clip-vit-large-patch14" |
|
print("getting processor from",CLIPname) |
|
processor = CLIPProcessor.from_pretrained(CLIPname) |
|
|
|
def embed_from_text(text): |
|
global processor,model |
|
if processor == None: |
|
initCLIPprocessor() |
|
initXLCLIPmodel() |
|
print("getting tokens") |
|
inputs = processor(text=text, return_tensors="pt") |
|
inputs.to(device) |
|
|
|
print("getting embeddings?") |
|
outputs = model(**inputs) |
|
print("finalizing") |
|
embeddings = outputs.text_embeds |
|
return embeddings |
|
|
|
|
|
|
|
|
|
|
|
word = input("type a phrase to generate an embedding for: ") |
|
|
|
emb = embed_from_text(word) |
|
|
|
embs=emb |
|
|
|
print("Shape of result = ",embs.shape) |
|
|
|
|
|
|
|
output = "generatedXL.safetensors" |
|
|
|
if all(char.isalpha() for char in word): |
|
output=f"{word}XL.safetensors" |
|
print(f"Saving to {output}...") |
|
save_file({"clip_g": embs}, output) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|