ppbrown commited on
Commit
195f981
1 Parent(s): 537a599

Util to find top token in each dimention

Browse files
Files changed (1) hide show
  1. generate-allid-toptokens.py +55 -0
generate-allid-toptokens.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/env python
2
+
3
+ """
4
+ CONCEPT:
5
+ Load in a precalculated embeddings file of all the tokenids (0-49405)
6
+ (see "generate-allid-embeddings[XL].py")
7
+
8
+ For each dimension, calculate which tokenid has the highest value.
9
+ Print out list, keyed by dimension.
10
+
11
+ In theory, this should auto-adjust, whether the embeddings file
12
+ is SD, or SDXL (clip_l or clip_g)
13
+
14
+ """
15
+
16
+
17
+ import sys
18
+ import json
19
+ import torch
20
+ from safetensors import safe_open
21
+
22
+ file1=sys.argv[1]
23
+ file2=sys.argv[2]
24
+
25
+ print(f"reading in json from {file2} now",file=sys.stderr)
26
+ with open(file2, "r") as file:
27
+ json_data = json.load(file)
28
+
29
+ token_names = {v: k for k, v in json_data.items()}
30
+
31
+ #print(token_names)
32
+
33
+ device=torch.device("cuda")
34
+ print(f"reading {file1} embeddings now",file=sys.stderr)
35
+ model = safe_open(file1,framework="pt",device="cuda")
36
+ embs1=model.get_tensor("embeddings")
37
+ embs1.to(device)
38
+ print("Shape of loaded embeds =",embs1.shape)
39
+
40
+
41
+ print(f"calculating distances...",file=sys.stderr)
42
+
43
+ indices = torch.argmax(embs1, dim=0)
44
+
45
+ print("Shape of results=",indices.shape,file=sys.stderr)
46
+
47
+ indices=indices.tolist()
48
+
49
+ counter=0
50
+ for token_num in indices:
51
+ #print("num:",token_num)
52
+ print(counter,token_names.get(token_num))
53
+ counter+=1
54
+
55
+