File size: 1,483 Bytes
2416d2e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 |
#!/bin/env python
""" Work in progress
Plan:
Unlike the other versions of graph-*.py, this one
requires "embeddings.allids.safetensors"
(created by generate-id-embeddings.py)
This covers the full official range of tokenids,
0-49405
It will then ask for 1-2 numeric token IDs.
It will pull the embedding matching those ids, and
graph them
"""
import sys
import json
import torch
from safetensors import safe_open
import PyQt5
import matplotlib
matplotlib.use('QT5Agg') # Set the backend to TkAgg
import matplotlib.pyplot as plt
embed_file="embeddings.allids.safetensors"
device=torch.device("cuda")
print("read in embeddings now",file=sys.stderr)
model = safe_open(embed_file,framework="pt",device="cuda")
embs=model.get_tensor("embeddings")
embs.to(device)
print("Shape of loaded embeds =",embs.shape)
def embed_from_tokenid(num: int):
embed = embs[num]
return embed
fig, ax = plt.subplots()
text1 = input("First tokenid: ")
text2 = input("Second tokenid(or leave blank): ")
emb1 = embed_from_tokenid(int(text1))
print("shape of emb1:",emb1.shape)
graph1=emb1.tolist()
ax.plot(graph1, label=text1[:20])
if len(text2) >0:
emb2 = embed_from_tokenid(int(text2))
graph2=emb2.tolist()
ax.plot(graph2, label=text2[:20])
# Add labels, title, and legend
#ax.set_xlabel('Index')
ax.set_ylabel('Values')
ax.set_title('Comparative Graph of Two Embeddings')
ax.legend()
# Display the graph
print("Pulling up the graph")
plt.show()
|