ppbrown commited on
Commit
93e438f
1 Parent(s): 9bded46

Upload 2 files

Browse files
compare-allids-embeds.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/env python
2
+
3
+ """ Work in progress
4
+ temp utility.
5
+ Load in two pre-calculated embeddings files.
6
+ (eg: *.allid.*)
7
+
8
+ Go through the full range and calculate distances between each.
9
+
10
+ Add up and display
11
+
12
+ This covers the full official range of tokenids,
13
+ 0-49405
14
+
15
+ """
16
+
17
+
18
+ import sys
19
+ import torch
20
+ from safetensors import safe_open
21
+
22
+ file1=sys.argv[1]
23
+ file2=sys.argv[2]
24
+
25
+
26
+
27
+ device=torch.device("cuda")
28
+ print(f"reading {file1} embeddings now",file=sys.stderr)
29
+ model = safe_open(file1,framework="pt",device="cuda")
30
+ embs1=model.get_tensor("embeddings")
31
+ embs1.to(device)
32
+ print("Shape of loaded embeds =",embs1.shape)
33
+
34
+ print(f"reading {file2} embeddings now",file=sys.stderr)
35
+ model = safe_open(file2,framework="pt",device="cuda")
36
+ embs2=model.get_tensor("embeddings")
37
+ embs2.to(device)
38
+ print("Shape of loaded embeds =",embs2.shape)
39
+
40
+ if torch.equal(embs1 , embs2):
41
+ print("HEY! Both files are identical!")
42
+ exit(0)
43
+
44
+ print(f"calculating distances...")
45
+
46
+ # This calculates a full cross matrix of ALL distances to ALL other points
47
+ # in other tensor
48
+ ##targetdistances = torch.cdist( embs1,embs2, p=2)
49
+
50
+
51
+ targetdistances = torch.norm(embs2 - embs1, dim=1)
52
+ print(targetdistances.shape)
53
+ tl=targetdistances.tolist()
54
+
55
+ print(tl[:10])
56
+
generate-allid-embeddings.py CHANGED
@@ -1,11 +1,31 @@
1
  #!/bin/env python
2
 
3
- """ Work in progress
4
- Plan:
5
- Similar to generate-embeddings.py
6
- However, instead of reading from a dictionary, just generate by pure
7
- numeric tokenID
8
- Save it out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  """
10
 
11
 
@@ -13,11 +33,17 @@ import sys
13
  import json
14
  import torch
15
  from safetensors.torch import save_file
16
- from transformers import CLIPProcessor,CLIPModel
17
 
18
  clipsrc="openai/clip-vit-large-patch14"
19
  processor=None
20
  model=None
 
 
 
 
 
 
21
 
22
  device=torch.device("cuda")
23
 
@@ -25,22 +51,42 @@ device=torch.device("cuda")
25
  def init():
26
  global processor
27
  global model
 
 
 
28
  # Load the processor and model
29
  print("loading processor from "+clipsrc,file=sys.stderr)
30
  processor = CLIPProcessor.from_pretrained(clipsrc)
31
  print("done",file=sys.stderr)
32
- print("loading model from "+clipsrc,file=sys.stderr)
33
- model = CLIPModel.from_pretrained(clipsrc)
 
 
 
 
 
 
 
 
 
 
 
 
34
  print("done",file=sys.stderr)
35
 
36
  model = model.to(device)
37
 
38
 
39
-
40
  def embed_from_inputs(inputs):
41
  with torch.no_grad():
42
- text_features = model.get_text_features(**inputs)
43
- embedding = text_features[0]
 
 
 
 
 
44
 
45
  return embedding
46
 
@@ -55,14 +101,15 @@ for id in range(49405):
55
  inputs.input_ids[0][1]=id
56
 
57
  emb=embed_from_inputs(inputs)
58
- emb=emb.unsqueeze(0) # stupid matrix magic to make the cat work
59
  all_embeddings.append(emb)
60
  if (id %100) ==0:
61
  print(id)
62
 
63
  embs = torch.cat(all_embeddings,dim=0)
64
  print("Shape of result = ",embs.shape)
65
- print("Saving all the things...")
66
- save_file({"embeddings": embs}, "embeddings.safetensors")
 
 
67
 
68
 
 
1
  #!/bin/env python
2
 
3
+ """
4
+ Take a CLIPTextModel compatible text encoder.
5
+ Go through the official range of tokens IDs (0-49405)
6
+ Generate the official "embedding" tensor for each one.
7
+ Save the result set to "temp.allids.safetensors"
8
+
9
+ Defaults to loading openai/clip-vit-large-patch14 from huggingface hub.
10
+ However, can take optional pair of arguments to a .safetensor model, and config file
11
+ RULES of the loader:
12
+ 1. the model file must appear to be either in current directory or one down. So,
13
+ badpath1=some/directory/tree/file.here
14
+ badpath2=/absolutepath
15
+ 2. yes, you MUST have a matching config.json file
16
+ 3. if you have no alternative, you can get away with using pytorch_model.bin
17
+
18
+ Sample location for such things that you can download:
19
+ https://huggingface.co/stablediffusionapi/edge-of-realism/tree/main/text_encoder/
20
+ If there is a .safetensors AND a .bin file, ignore the .bin file
21
+
22
+ You can also convert a singlefile model, such as is downloaded from civitai,
23
+ by using the utility at
24
+ https://github.com/huggingface/diffusers/blob/main/scripts/convert_original_stable_diffusion_to_diffusers.py
25
+ Args should look like
26
+ convert_original_stable_diffusion_to_diffusers.py --checkpoint_file somemodel.safetensors \
27
+ --dump_path extractdir --to_safetensors --from_safetensors
28
+
29
  """
30
 
31
 
 
33
  import json
34
  import torch
35
  from safetensors.torch import save_file
36
+ from transformers import CLIPProcessor,CLIPModel,CLIPTextModel
37
 
38
  clipsrc="openai/clip-vit-large-patch14"
39
  processor=None
40
  model=None
41
+ encfile=None
42
+ configfile=None
43
+
44
+ if len(sys.argv) == 3:
45
+ encfile=sys.argv[1]
46
+ configfile=sys.argv[2]
47
 
48
  device=torch.device("cuda")
49
 
 
51
  def init():
52
  global processor
53
  global model
54
+ global encfile
55
+ global configfile
56
+
57
  # Load the processor and model
58
  print("loading processor from "+clipsrc,file=sys.stderr)
59
  processor = CLIPProcessor.from_pretrained(clipsrc)
60
  print("done",file=sys.stderr)
61
+
62
+ # originally done this way. But its not the right one to use
63
+ #print("loading model from "+clipsrc,file=sys.stderr)
64
+ #model = CLIPModel.from_pretrained(clipsrc)
65
+ #print("done",file=sys.stderr)
66
+ if encfile != None:
67
+ print("loading model from "+encfile,file=sys.stderr)
68
+ model = CLIPTextModel.from_pretrained(
69
+ encfile,config=configfile,local_files_only=True,use_safetensors=True
70
+ )
71
+ else:
72
+ print("loading model from "+clipsrc,file=sys.stderr)
73
+ model = CLIPTextModel.from_pretrained(clipsrc)
74
+
75
  print("done",file=sys.stderr)
76
 
77
  model = model.to(device)
78
 
79
 
80
+ # "inputs" == magic pre-embedding format
81
  def embed_from_inputs(inputs):
82
  with torch.no_grad():
83
+ # This way is for CLIPModel
84
+ #text_features = model.get_text_features(**inputs)
85
+ #embedding = text_features[0]
86
+
87
+ outputs = model(**inputs)
88
+ embeddings = outputs.pooler_output
89
+ embedding = embeddings
90
 
91
  return embedding
92
 
 
101
  inputs.input_ids[0][1]=id
102
 
103
  emb=embed_from_inputs(inputs)
 
104
  all_embeddings.append(emb)
105
  if (id %100) ==0:
106
  print(id)
107
 
108
  embs = torch.cat(all_embeddings,dim=0)
109
  print("Shape of result = ",embs.shape)
110
+
111
+ outputfile="cliptextmodel.temp.allids.safetensors"
112
+ print(f"Saving the calculatiuons to {outputfile}...")
113
+ save_file({"embeddings": embs}, outputfile)
114
 
115