#!/bin/env python """ Work in progress Plan: Modded version of graph-embeddings.py Just to see if using different CLIP module changes values significantly (It does not) This requires pip install git+https://github.com/openai/CLIP.git """ import sys import json import torch import clip import PyQt5 import matplotlib matplotlib.use('QT5Agg') # Set the backend to TkAgg import matplotlib.pyplot as plt CLIPname= "ViT-L/14" device=torch.device("cuda") print("loading CLIP model") model, processor = clip.load(CLIPname,device=device) model.cuda().eval() print("done") def embed_from_text(text): tokens = clip.tokenize(text).to(device) with torch.no_grad(): embed = model.encode_text(tokens) return embed # Expect SINGLE WORD ONLY def standard_embed_calc(text): inputs = processor(text=text, return_tensors="pt") inputs.to(device) with torch.no_grad(): text_features = model.get_text_features(**inputs) embedding = text_features[0] return embedding fig, ax = plt.subplots() text1 = input("First word or prompt: ") text2 = input("Second prompt(or leave blank): ") print("generating embeddings for each now") emb1 = embed_from_text(text1) print("shape of emb1:",emb1.shape) graph1=emb1[0].tolist() ax.plot(graph1, label=text1[:20]) if len(text2) >0: emb2 = embed_from_text(text2) graph2=emb2[0].tolist() ax.plot(graph2, label=text2[:20]) # Add labels, title, and legend #ax.set_xlabel('Index') ax.set_ylabel('Values') ax.set_title('Comparative Graph of Two Embeddings') ax.legend() # Display the graph print("Pulling up the graph") plt.show()