File size: 1,501 Bytes
3523378
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
#!/bin/env python

""" Work in progress
Plan:
    Take a pre-calculated embeddings file.
    calculate an average distance-from-origin across ALL IDs, and graph that.
    Typically, you would use
   "embeddings.allids.safetensors"
   This covers the full official range of tokenids, 0-49405
   But, you could use a partial file

"""


#embed_file="embeddings.allids.safetensors"
embed_file="cliptextmodel.embeddings.allids.safetensors"


import sys
if len(sys.argv) !=2:
    print("ERROR: Expect an embeddings.safetensors file as argument")
    sys.exit(1)
embed_file=sys.argv[1]

import torch
from safetensors import safe_open

import PyQt5
import matplotlib
matplotlib.use('QT5Agg') 

import matplotlib.pyplot as plt

device=torch.device("cuda")
print(f"reading {embed_file} embeddings now",file=sys.stderr)
model = safe_open(embed_file,framework="pt",device="cuda")
embs=model.get_tensor("embeddings")
embs.to(device)
print("Shape of loaded embeds =",embs.shape)

def embed_from_tokenid(num: int):
    embed = embs[num]
    return embed



fig, ax = plt.subplots()


#type="variance"
type="mean"
print(f"calculating {type}...")

#emb1 = torch.var(embs,dim=0)

emb1 = torch.mean(embs,dim=0)

print("shape of emb1:",emb1.shape)

graph1=emb1.tolist()
ax.plot(graph1, label=f"{type} of each all embedding")


# Add labels, title, and legend
#ax.set_xlabel('Index')
ax.set_ylabel('Values')
ax.set_title(f'Graph of {embed_file}')
ax.legend()

# Display the graph
print("Pulling up the graph")
plt.show()