File size: 2,204 Bytes
40cd7fc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
#!/bin/env python

"""
 (Similar to graph-embeddings, but for SDXL)

 This program requires two files as arguments: 
   A text encoder model (SDXL style), and matching config.json

 You can get the fancy SDXL "vit-bigg" based text encoding model and config, from
 https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/tree/main/text_encoder_2
 Take the config.json and one of the .safetensors files

 The sd1.5 encoding model resides at
 https://huggingface.co/runwayml/stable-diffusion-v1-5/tree/main/text_encoder

 Once it has read those files in, it asks for 1-2 text prompts, and then graphs them.
 (and pops up a prog to display the output)

"""
import sys
import torch
from transformers import CLIPProcessor, CLIPTextModel

if len(sys.argv) <3:
    print("Error: require clipmodel file and config file as arguments")
    exit(1)

# 1. Load the pretrained model
# Note that it doesnt like a leading "/" in the name!!
#
model_path = sys.argv[1]
model_config = sys.argv[2]
print("loading",model_path)
model = CLIPTextModel.from_pretrained( 
        model_path,config=model_config,local_files_only=True,use_safetensors=True)


# This is the tokenizer for sd1 and sdxl
CLIPname = "openai/clip-vit-large-patch14"
print("getting processor",CLIPname)
processor = CLIPProcessor.from_pretrained(CLIPname)

def embed_from_text(text):
    print("getting tokens for",text)
    inputs = processor(text=text, return_tensors="pt")
    outputs = model(**inputs)
    embeddings = outputs.pooler_output
    return embeddings


import PyQt5
import matplotlib
matplotlib.use('QT5Agg')  # Set the backend to TkAgg

import matplotlib.pyplot as plt


fig, ax = plt.subplots()


text1 = input("First prompt: ")
text2 = input("Second prompt(or leave blank): ")


emb1 = embed_from_text(text1)
print("shape of emb1:",emb1.shape)

graph1=emb1[0].tolist()
ax.plot(graph1, label=text1[:20])

if len(text2) >0:
    emb2 = embed_from_text(text2)
    graph2=emb2[0].tolist()
    ax.plot(graph2, label=text2[:20])

# Add labels, title, and legend
#ax.set_xlabel('Index')
ax.set_ylabel('Values')
ax.set_title(f"Graph of Embeddings in {model_path}")
ax.legend()

# Display the graph
print("Pulling up the graph")
plt.show()