tokenspace / graph-byclip.py
ppbrown's picture
Upload graph-byclip.py
baf29fc verified
raw
history blame
1.92 kB
#!/bin/env python
""" Work in progress
Plan:
Modded version of graph-embeddings.py
Just to see if using different CLIP module changes values significantly
(It does not)
This code requires
pip install git+https://github.com/openai/CLIP.git
"""
import sys
import json
import torch
import clip
import PyQt5
import matplotlib
matplotlib.use('QT5Agg') # Set the backend to TkAgg
import matplotlib.pyplot as plt
# Available models:
# 'RN50', 'RN101', 'RN50x4', 'RN50x16', 'RN50x64', 'ViT-B/32', 'ViT-B/16', 'ViT-L/14', 'ViT-L/14@336px'
#CLIPname= "ViT-L/14"
CLIPname= "ViT-B/16"
#CLIPname= "ViT-L/14@336px"
device=torch.device("cuda")
print("loading CLIP model")
model, processor = clip.load(CLIPname,device=device)
model.cuda().eval()
print("done")
def embed_from_tokenid(num):
# A bit sleazy, but, eh.
tokens = clip.tokenize("dummy").to(device)
tokens[0][1]=num
with torch.no_grad():
embed = model.encode_text(tokens)
return embed
def embed_from_text(text):
if text[0]=="#":
print("Converting string to number")
return embed_from_tokenid(int(text[1:]))
tokens = clip.tokenize(text).to(device)
print("Tokens for",text,"=",tokens)
with torch.no_grad():
embed = model.encode_text(tokens)
return embed
fig, ax = plt.subplots()
text1 = input("First word or prompt: ")
text2 = input("Second prompt(or leave blank): ")
print("generating embeddings for each now")
emb1 = embed_from_text(text1)
print("shape of emb1:",emb1.shape)
graph1=emb1[0].tolist()
ax.plot(graph1, label=text1[:20])
if len(text2) >0:
emb2 = embed_from_text(text2)
graph2=emb2[0].tolist()
ax.plot(graph2, label=text2[:20])
# Add labels, title, and legend
#ax.set_xlabel('Index')
ax.set_ylabel('Values')
ax.set_title('Comparative Graph of Two Embeddings')
ax.legend()
# Display the graph
print("Pulling up the graph")
plt.show()