|
|
|
|
|
""" Work in progress |
|
Plan: |
|
Modded version of graph-embeddings.py |
|
Just to see if using different CLIP module changes values significantly |
|
(It does not) |
|
This code requires |
|
pip install git+https://github.com/openai/CLIP.git |
|
""" |
|
|
|
|
|
import sys |
|
import json |
|
import torch |
|
import clip |
|
|
|
import PyQt5 |
|
import matplotlib |
|
matplotlib.use('QT5Agg') |
|
|
|
import matplotlib.pyplot as plt |
|
|
|
|
|
|
|
|
|
CLIPname= "ViT-B/16" |
|
|
|
|
|
device=torch.device("cuda") |
|
print("loading CLIP model") |
|
model, processor = clip.load(CLIPname,device=device) |
|
model.cuda().eval() |
|
print("done") |
|
|
|
def embed_from_tokenid(num): |
|
|
|
tokens = clip.tokenize("dummy").to(device) |
|
tokens[0][1]=num |
|
|
|
with torch.no_grad(): |
|
embed = model.encode_text(tokens) |
|
return embed |
|
|
|
|
|
|
|
def embed_from_text(text): |
|
if text[0]=="#": |
|
print("Converting string to number") |
|
return embed_from_tokenid(int(text[1:])) |
|
|
|
tokens = clip.tokenize(text).to(device) |
|
print("Tokens for",text,"=",tokens) |
|
|
|
with torch.no_grad(): |
|
embed = model.encode_text(tokens) |
|
return embed |
|
|
|
|
|
fig, ax = plt.subplots() |
|
|
|
|
|
text1 = input("First word or prompt: ") |
|
text2 = input("Second prompt(or leave blank): ") |
|
|
|
|
|
print("generating embeddings for each now") |
|
emb1 = embed_from_text(text1) |
|
print("shape of emb1:",emb1.shape) |
|
|
|
graph1=emb1[0].tolist() |
|
ax.plot(graph1, label=text1[:20]) |
|
|
|
if len(text2) >0: |
|
emb2 = embed_from_text(text2) |
|
graph2=emb2[0].tolist() |
|
ax.plot(graph2, label=text2[:20]) |
|
|
|
|
|
|
|
ax.set_ylabel('Values') |
|
ax.set_title('Comparative Graph of Two Embeddings') |
|
ax.legend() |
|
|
|
|
|
print("Pulling up the graph") |
|
plt.show() |
|
|