|
|
|
|
|
""" Demo source that explores difference between embeddings from |
|
stock CLIPModel data, vs one embedded in a full SD model. |
|
Input a single word, and it will graph each version. |
|
|
|
You will want to zoom in to actually see the differences, usually |
|
|
|
Required data file: "text_encoder.bin" |
|
|
|
Find the "diffusers format" version of the model you are interested in, |
|
and steal from that. |
|
eg: grab |
|
stablediffusionapi/ghostmix/text_encoder/pytorch_model.bin |
|
|
|
and download it, renamed to |
|
"text_encoder.bin" |
|
|
|
""" |
|
|
|
|
|
import sys |
|
import json |
|
import torch |
|
from transformers import CLIPProcessor,CLIPModel |
|
import logging |
|
|
|
logging.disable(logging.WARNING) |
|
|
|
import PyQt5 |
|
import matplotlib |
|
matplotlib.use('QT5Agg') |
|
|
|
import matplotlib.pyplot as plt |
|
|
|
|
|
clipsrc="openai/clip-vit-large-patch14" |
|
|
|
overlaymodel="text_encoder.bin" |
|
|
|
processor=None |
|
model=None |
|
|
|
device=torch.device("cuda") |
|
|
|
|
|
def init(): |
|
global processor |
|
global model |
|
|
|
print("loading processor from "+clipsrc,file=sys.stderr) |
|
processor = CLIPProcessor.from_pretrained(clipsrc) |
|
print("done",file=sys.stderr) |
|
print("loading model from "+clipsrc,file=sys.stderr) |
|
model = CLIPModel.from_pretrained(clipsrc) |
|
print("done",file=sys.stderr) |
|
model = model.to(device) |
|
|
|
def load_overlay(): |
|
global model |
|
print("loading overlay",overlaymodel) |
|
overlay=torch.load(overlaymodel) |
|
if "state_dict" in overlay: |
|
print("dereferencing state_dict") |
|
overlay=overlay["state_dict"] |
|
|
|
print("Attempting to update old from new") |
|
sd=model.state_dict() |
|
sd.update(overlay) |
|
|
|
|
|
|
|
if "text_model.embeddings.position_ids" in sd: |
|
print("Removing key text_model.embeddings.position_ids") |
|
sd.pop("text_model.embeddings.position_ids") |
|
|
|
print("Reloading merged data") |
|
model.load_state_dict(sd) |
|
model = model.to(device) |
|
|
|
|
|
def standard_embed_calc(text): |
|
inputs = processor(text=text, return_tensors="pt") |
|
inputs.to(device) |
|
with torch.no_grad(): |
|
text_features = model.get_text_features(**inputs) |
|
embedding = text_features[0] |
|
return embedding |
|
|
|
|
|
init() |
|
|
|
fig, ax = plt.subplots() |
|
|
|
|
|
text1 = input("First word or prompt: ") |
|
|
|
|
|
print("generating embeddings for each now") |
|
emb1 = standard_embed_calc(text1) |
|
graph1=emb1.tolist() |
|
ax.plot(graph1, label=text1[:20]) |
|
|
|
load_overlay() |
|
emb2 = standard_embed_calc(text1) |
|
graph2=emb2.tolist() |
|
ax.plot(graph2, label="overlay data") |
|
|
|
|
|
|
|
ax.set_ylabel('Values') |
|
ax.set_title('Graph embedding from standard vs MERGED dict') |
|
ax.legend() |
|
|
|
|
|
print("Pulling up the graph") |
|
plt.show() |
|
|