|
|
|
|
|
""" Work in progress |
|
Plan: |
|
Unlike the other versions of graph-*.py, this one |
|
requires "embeddings.allids.safetensors" |
|
(created by generate-id-embeddings.py) |
|
This covers the full official range of tokenids, |
|
0-49405 |
|
|
|
It will then ask for 1-2 numeric token IDs. |
|
It will pull the embedding matching those ids, and |
|
graph them |
|
""" |
|
|
|
|
|
import sys |
|
import json |
|
import torch |
|
from safetensors import safe_open |
|
|
|
import PyQt5 |
|
import matplotlib |
|
matplotlib.use('QT5Agg') |
|
|
|
import matplotlib.pyplot as plt |
|
|
|
embed_file="embeddings.allids.safetensors" |
|
device=torch.device("cuda") |
|
print("read in embeddings now",file=sys.stderr) |
|
model = safe_open(embed_file,framework="pt",device="cuda") |
|
embs=model.get_tensor("embeddings") |
|
embs.to(device) |
|
print("Shape of loaded embeds =",embs.shape) |
|
|
|
def embed_from_tokenid(num: int): |
|
embed = embs[num] |
|
return embed |
|
|
|
|
|
|
|
fig, ax = plt.subplots() |
|
|
|
|
|
text1 = input("First tokenid: ") |
|
text2 = input("Second tokenid(or leave blank): ") |
|
|
|
|
|
emb1 = embed_from_tokenid(int(text1)) |
|
print("shape of emb1:",emb1.shape) |
|
|
|
graph1=emb1.tolist() |
|
ax.plot(graph1, label=text1[:20]) |
|
|
|
if len(text2) >0: |
|
emb2 = embed_from_tokenid(int(text2)) |
|
graph2=emb2.tolist() |
|
ax.plot(graph2, label=text2[:20]) |
|
|
|
|
|
|
|
ax.set_ylabel('Values') |
|
ax.set_title('Comparative Graph of Two Embeddings') |
|
ax.legend() |
|
|
|
|
|
print("Pulling up the graph") |
|
plt.show() |
|
|