meh
Browse files- generate-embeddingXL.py +40 -29
generate-embeddingXL.py
CHANGED
@@ -3,7 +3,7 @@
|
|
3 |
""" Work in progress
|
4 |
|
5 |
Similar to generate-embedding.py, but outputs in the format
|
6 |
-
that SDXL models expect.
|
7 |
|
8 |
Also tries to load the SDXL base text encoder specifically.
|
9 |
Requires you populate the two paths mentioned immediately below this comment section.
|
@@ -20,12 +20,12 @@ Plan:
|
|
20 |
|
21 |
Note that you can generate an embedding from two words, or even more
|
22 |
|
23 |
-
I could also include a "clip_l" key, but..
|
24 |
-
Meh.
|
25 |
"""
|
26 |
|
27 |
-
|
28 |
-
|
|
|
|
|
29 |
|
30 |
import sys
|
31 |
import torch
|
@@ -36,17 +36,25 @@ from safetensors.torch import save_file
|
|
36 |
# Note that it doesnt like a leading "/" in the name!!
|
37 |
|
38 |
|
39 |
-
|
|
|
40 |
processor=None
|
41 |
|
42 |
device=torch.device("cuda")
|
43 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
# Note the default, required 2 pathnames
|
45 |
-
def initXLCLIPmodel():
|
46 |
-
global
|
47 |
print("loading",model_path)
|
48 |
-
|
49 |
-
|
50 |
|
51 |
# a bit wierd, but SDXL seems to still use this tokeninzer
|
52 |
def initCLIPprocessor():
|
@@ -56,17 +64,27 @@ def initCLIPprocessor():
|
|
56 |
processor = CLIPProcessor.from_pretrained(CLIPname)
|
57 |
|
58 |
def embed_from_text(text):
|
59 |
-
global processor,
|
60 |
if processor == None:
|
61 |
initCLIPprocessor()
|
62 |
-
|
63 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
inputs = processor(text=text, return_tensors="pt")
|
65 |
inputs.to(device)
|
66 |
|
67 |
-
print("getting
|
68 |
-
outputs =
|
69 |
-
print("finalizing")
|
70 |
embeddings = outputs.text_embeds
|
71 |
return embeddings
|
72 |
|
@@ -76,24 +94,17 @@ def embed_from_text(text):
|
|
76 |
|
77 |
word = input("type a phrase to generate an embedding for: ")
|
78 |
|
79 |
-
|
80 |
-
|
81 |
-
embs=emb
|
82 |
|
83 |
-
print("Shape of
|
84 |
-
# Note that programs like shapes such as
|
85 |
-
# torch.Size([1, 768])
|
86 |
|
87 |
-
output = "
|
88 |
# if single word used, then rename output file
|
89 |
if all(char.isalpha() for char in word):
|
90 |
-
output=f"{word}
|
91 |
print(f"Saving to {output}...")
|
92 |
-
save_file({"clip_g":
|
93 |
|
94 |
-
# technically we are saving a shape ([1][1280])
|
95 |
-
# whereas official XL embeddings files, are
|
96 |
-
# (clip_g) shape ([8][1280])
|
97 |
-
# (clip_l) shape ([8][768])
|
98 |
|
99 |
|
|
|
3 |
""" Work in progress
|
4 |
|
5 |
Similar to generate-embedding.py, but outputs in the format
|
6 |
+
that SDXL models expect.
|
7 |
|
8 |
Also tries to load the SDXL base text encoder specifically.
|
9 |
Requires you populate the two paths mentioned immediately below this comment section.
|
|
|
20 |
|
21 |
Note that you can generate an embedding from two words, or even more
|
22 |
|
|
|
|
|
23 |
"""
|
24 |
|
25 |
+
model_path1 = "text_encoder.safetensors"
|
26 |
+
model_config1 = "text_encoder_config.json"
|
27 |
+
model_path2 = "text_encoder_2.safetensors"
|
28 |
+
model_config2 = "text_encoder_2_config.json"
|
29 |
|
30 |
import sys
|
31 |
import torch
|
|
|
36 |
# Note that it doesnt like a leading "/" in the name!!
|
37 |
|
38 |
|
39 |
+
tmodel1=None
|
40 |
+
tmodel2=None
|
41 |
processor=None
|
42 |
|
43 |
device=torch.device("cuda")
|
44 |
|
45 |
+
def initCLIPmodel(model_path,model_config):
|
46 |
+
global tmodel1
|
47 |
+
print("loading",model_path)
|
48 |
+
tmodel1 = CLIPTextModel.from_pretrained(model_path,config=model_config,local_files_only=True,use_safetensors=True)
|
49 |
+
tmodel1.to(device)
|
50 |
+
|
51 |
+
#
|
52 |
# Note the default, required 2 pathnames
|
53 |
+
def initXLCLIPmodel(model_path,model_config):
|
54 |
+
global tmodel2
|
55 |
print("loading",model_path)
|
56 |
+
tmodel2 = CLIPTextModelWithProjection.from_pretrained(model_path,config=model_config,local_files_only=True,use_safetensors=True)
|
57 |
+
tmodel2.to(device)
|
58 |
|
59 |
# a bit wierd, but SDXL seems to still use this tokeninzer
|
60 |
def initCLIPprocessor():
|
|
|
64 |
processor = CLIPProcessor.from_pretrained(CLIPname)
|
65 |
|
66 |
def embed_from_text(text):
|
67 |
+
global processor,tmodel1
|
68 |
if processor == None:
|
69 |
initCLIPprocessor()
|
70 |
+
initCLIPmodel(model_path1,model_config1)
|
71 |
+
inputs = processor(text=text, return_tensors="pt")
|
72 |
+
inputs.to(device)
|
73 |
+
|
74 |
+
print("getting embeddings1")
|
75 |
+
outputs = tmodel1(**inputs)
|
76 |
+
embeddings = outputs.pooler_output
|
77 |
+
return embeddings
|
78 |
+
|
79 |
+
def embed_from_text2(text):
|
80 |
+
global processor,tmodel2
|
81 |
+
if tmodel2 == None:
|
82 |
+
initXLCLIPmodel(model_path2,model_config2)
|
83 |
inputs = processor(text=text, return_tensors="pt")
|
84 |
inputs.to(device)
|
85 |
|
86 |
+
print("getting embeddings2")
|
87 |
+
outputs = tmodel2(**inputs)
|
|
|
88 |
embeddings = outputs.text_embeds
|
89 |
return embeddings
|
90 |
|
|
|
94 |
|
95 |
word = input("type a phrase to generate an embedding for: ")
|
96 |
|
97 |
+
emb1 = embed_from_text(word)
|
98 |
+
emb2 = embed_from_text2(word)
|
|
|
99 |
|
100 |
+
print("Shape of results = ",emb1.shape,emb2.shape)
|
|
|
|
|
101 |
|
102 |
+
output = "generated_XL.safetensors"
|
103 |
# if single word used, then rename output file
|
104 |
if all(char.isalpha() for char in word):
|
105 |
+
output=f"{word}_XL.safetensors"
|
106 |
print(f"Saving to {output}...")
|
107 |
+
save_file({"clip_g": emb2,"clip_l":emb1}, output)
|
108 |
|
|
|
|
|
|
|
|
|
109 |
|
110 |
|