ppbrown commited on
Commit
805d7ed
1 Parent(s): f444652

Upload 2 files

Browse files
embeddingsXL.unum.eng.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e634bd6b3a7f2d02fe70ef93133d46eba58f43e8893fdd569ba99a568a3714b
3
+ size 104074336
generate-dict-embeddingsXL.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/env python
2
+
3
+ """
4
+ (SDXL counterpart of "cliptextmodel-generate-embeddings.py".
5
+ Not following that name, because we dont use "cliptextmodel")
6
+
7
+ Take filenames of an SDXL clip-g type text_encoder2 and config file
8
+ Read in a wordlist from "dictionary"
9
+ Generate the official "embedding" tensor for each one.
10
+ Save the result set to "{outputfile}"
11
+
12
+ Defaults to loading openai/clip-vit-large-patch14 from huggingface hub,
13
+ for purposes of tokenizer, since thats what sdxl does anyway
14
+
15
+ RULES of the loader:
16
+ 1. The text_encoder2 model file must appear to be either
17
+ in current directory or one down. So, do NOT use
18
+ badpath1=some/directory/tree/file.here
19
+ badpath2=/absolutepath
20
+ 2. Yes, you MUST have a matching config.json file
21
+ 3. if you have no safetensor alternative, you can get away with using pytorch_model.bin
22
+
23
+ Sample location for such things that you can download:
24
+ https://huggingface.co/stablediffusionapi/edge-of-realism/tree/main/text_encoder/
25
+ If there is a .safetensors AND a .bin file, ignore the .bin file
26
+
27
+ Alternatively, you can also convert a singlefile model, such as is downloaded from civitai,
28
+ by using the utility at
29
+ https://github.com/huggingface/diffusers/blob/main/scripts/convert_original_stable_diffusion_to_diffusers.py
30
+ Args should look like
31
+ convert_original_stable_diffusion_to_diffusers.py \
32
+ --checkpoint_file somemodel.safetensors \
33
+ --dump_path extractdir --to_safetensors --from_safetensors
34
+
35
+ """
36
+
37
+
38
+ outputfile="embeddingsXL.temp.safetensors"
39
+
40
+ import sys
41
+ import torch
42
+ from safetensors.torch import save_file
43
+ from transformers import CLIPProcessor, CLIPTextModel, CLIPTextModelWithProjection
44
+
45
+ processor=None
46
+
47
+
48
+ tmodel2=None
49
+ model_path2=None
50
+ model_config2=None
51
+
52
+ if len(sys.argv) == 3:
53
+ model_path2=sys.argv[1]
54
+ model_config2=sys.argv[2]
55
+ else:
56
+ print("You have to give name of modelfile and config file")
57
+ sys.exit(1)
58
+
59
+ device=torch.device("cuda")
60
+
61
+
62
+
63
+ def initXLCLIPmodel(model_path,model_config):
64
+ global tmodel2,processor
65
+ # yes, oddly they all uses the same one, basically
66
+ processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")
67
+
68
+ print("loading",model_path)
69
+ tmodel2 = CLIPTextModelWithProjection.from_pretrained(model_path,config=model_config,local_files_only=True,use_safetensors=True)
70
+ tmodel2.to(device)
71
+
72
+
73
+ def embed_from_text2(text):
74
+ global processor,tmodel2
75
+ inputs = processor(text=text, return_tensors="pt")
76
+ inputs.to(device)
77
+
78
+ with torch.no_grad():
79
+ outputs = tmodel2(**inputs)
80
+ embeddings = outputs.text_embeds
81
+ return embeddings
82
+
83
+
84
+ # "inputs" == magic pre-embedding format
85
+ def embed_from_inputs(inputs):
86
+ global processor,tmodel2
87
+ with torch.no_grad():
88
+ outputs = tmodel2(**inputs)
89
+ embedding = outputs.text_embeds
90
+
91
+ return embedding
92
+
93
+
94
+ initXLCLIPmodel(model_path2,model_config2)
95
+ inputs = processor(text="dummy", return_tensors="pt")
96
+ inputs.to(device)
97
+
98
+
99
+ with open("dictionary","r") as f:
100
+ tokendict = f.readlines()
101
+ tokendict = [token.strip() for token in tokendict] # Remove trailing newlines
102
+
103
+
104
+ count=1
105
+ all_embeddings = []
106
+
107
+ for word in tokendict:
108
+ emb = embed_from_text2(word)
109
+ emb=emb.unsqueeze(0) # stupid matrix magic to make the cat work
110
+ all_embeddings.append(emb)
111
+ count+=1
112
+ if (count %100) ==0:
113
+ print(count)
114
+
115
+
116
+
117
+ """
118
+ for id in range(49405):
119
+ inputs.input_ids[0][1]=id
120
+
121
+ emb=embed_from_inputs(inputs)
122
+ all_embeddings.append(emb)
123
+ if (id %100) ==0:
124
+ print(id)
125
+ """
126
+
127
+ embs = torch.cat(all_embeddings,dim=0)
128
+ print("Shape of result = ",embs.shape)
129
+
130
+ print(f"Saving the calculatiuons to {outputfile}...")
131
+ save_file({"embeddings": embs}, outputfile)
132
+
133
+