ppbrown commited on
Commit
2416d2e
1 Parent(s): b10d562

Upload 3 files

Browse files
embeddings.allids.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0c899126f5f2ab05d0714a5b07461c087c4f43450317ae07b2bcb1fd237512b
3
+ size 151772248
generate-id-embeddings.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+
3
+ """ Work in progress
4
+ Plan:
5
+ Similar to generate-embeddings.py
6
+ However, instead of reading from a dictionary, just generate by pure
7
+ numeric tokenID
8
+ Save it out
9
+ """
10
+
11
+
12
+ import sys
13
+ import json
14
+ import torch
15
+ from safetensors.torch import save_file
16
+ from transformers import CLIPProcessor,CLIPModel
17
+
18
+ clipsrc="openai/clip-vit-large-patch14"
19
+ processor=None
20
+ model=None
21
+
22
+ device=torch.device("cuda")
23
+
24
+
25
+ def init():
26
+ global processor
27
+ global model
28
+ # Load the processor and model
29
+ print("loading processor from "+clipsrc,file=sys.stderr)
30
+ processor = CLIPProcessor.from_pretrained(clipsrc)
31
+ print("done",file=sys.stderr)
32
+ print("loading model from "+clipsrc,file=sys.stderr)
33
+ model = CLIPModel.from_pretrained(clipsrc)
34
+ print("done",file=sys.stderr)
35
+
36
+ model = model.to(device)
37
+
38
+
39
+
40
+ def embed_from_inputs(inputs):
41
+ with torch.no_grad():
42
+ text_features = model.get_text_features(**inputs)
43
+ embedding = text_features[0]
44
+
45
+ return embedding
46
+
47
+
48
+ init()
49
+ inputs = processor(text="dummy", return_tensors="pt")
50
+ inputs.to(device)
51
+
52
+ all_embeddings = []
53
+
54
+ for id in range(49405):
55
+ inputs.input_ids[0][1]=id
56
+
57
+ emb=embed_from_inputs(inputs)
58
+ emb=emb.unsqueeze(0) # stupid matrix magic to make the cat work
59
+ all_embeddings.append(emb)
60
+ if (id %100) ==0:
61
+ print(id)
62
+
63
+ embs = torch.cat(all_embeddings,dim=0)
64
+ print("Shape of result = ",embs.shape)
65
+ print("Saving all the things...")
66
+ save_file({"embeddings": embs}, "embeddings.safetensors")
67
+
68
+
graph-byid.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/env python
2
+
3
+ """ Work in progress
4
+ Plan:
5
+ Unlike the other versions of graph-*.py, this one
6
+ requires "embeddings.allids.safetensors"
7
+ (created by generate-id-embeddings.py)
8
+ This covers the full official range of tokenids,
9
+ 0-49405
10
+
11
+ It will then ask for 1-2 numeric token IDs.
12
+ It will pull the embedding matching those ids, and
13
+ graph them
14
+ """
15
+
16
+
17
+ import sys
18
+ import json
19
+ import torch
20
+ from safetensors import safe_open
21
+
22
+ import PyQt5
23
+ import matplotlib
24
+ matplotlib.use('QT5Agg') # Set the backend to TkAgg
25
+
26
+ import matplotlib.pyplot as plt
27
+
28
+ embed_file="embeddings.allids.safetensors"
29
+ device=torch.device("cuda")
30
+ print("read in embeddings now",file=sys.stderr)
31
+ model = safe_open(embed_file,framework="pt",device="cuda")
32
+ embs=model.get_tensor("embeddings")
33
+ embs.to(device)
34
+ print("Shape of loaded embeds =",embs.shape)
35
+
36
+ def embed_from_tokenid(num: int):
37
+ embed = embs[num]
38
+ return embed
39
+
40
+
41
+
42
+ fig, ax = plt.subplots()
43
+
44
+
45
+ text1 = input("First tokenid: ")
46
+ text2 = input("Second tokenid(or leave blank): ")
47
+
48
+
49
+ emb1 = embed_from_tokenid(int(text1))
50
+ print("shape of emb1:",emb1.shape)
51
+
52
+ graph1=emb1.tolist()
53
+ ax.plot(graph1, label=text1[:20])
54
+
55
+ if len(text2) >0:
56
+ emb2 = embed_from_tokenid(int(text2))
57
+ graph2=emb2.tolist()
58
+ ax.plot(graph2, label=text2[:20])
59
+
60
+ # Add labels, title, and legend
61
+ #ax.set_xlabel('Index')
62
+ ax.set_ylabel('Values')
63
+ ax.set_title('Comparative Graph of Two Embeddings')
64
+ ax.legend()
65
+
66
+ # Display the graph
67
+ print("Pulling up the graph")
68
+ plt.show()