ppbrown commited on
Commit
aea5e02
1 Parent(s): 14b75c0

Upload 2 files

Browse files
Files changed (2) hide show
  1. README.md +11 -3
  2. graph-textmodels.py +113 -0
README.md CHANGED
@@ -9,14 +9,22 @@ Primary tools are:
9
  * "graph-embeddings.py": plots graph of full values of two embeddings
10
 
11
 
12
- ## calculate-distances.py
13
 
14
  Loads the generated embeddings, reads in a word, calculates "distance" to every
15
  embedding, and then shows the closest "neighbours".
16
 
17
- To run this requires the files "embeddings.safetensors" and "dictionary"
 
18
 
19
- You will need to rename or copy appropriate files for this as mentioned below
 
 
 
 
 
 
 
20
 
21
  ## graph-embeddings.py
22
 
 
9
  * "graph-embeddings.py": plots graph of full values of two embeddings
10
 
11
 
12
+ ## (clipmodel,cliptextmodel)-calculate-distances.py
13
 
14
  Loads the generated embeddings, reads in a word, calculates "distance" to every
15
  embedding, and then shows the closest "neighbours".
16
 
17
+ To run this requires the files "embeddings.safetensors" and "dictionary",
18
+ in matching format
19
 
20
+ You will need to rename or copy appropriate files for this as mentioned below.
21
+
22
+ Note that SD models use cliptextmodel, NOT clipmodel
23
+
24
+ ## graph-textmodels.py
25
+
26
+ Shows the difference between the same word, embedded by CLIPTextModel
27
+ vs CLIPModel
28
 
29
  ## graph-embeddings.py
30
 
graph-textmodels.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/env python
2
+
3
+ """
4
+ Input a single word, and it will graph it,
5
+ as embedded by CLIPModel vs CLIPTextModel
6
+
7
+ It will then print out the "distance" between the two,
8
+ and then show you a coordinate graph
9
+
10
+ You will want to zoom in to actually see the differences, usually
11
+
12
+ """
13
+
14
+
15
+ import sys
16
+ import json
17
+ import torch
18
+ from transformers import CLIPProcessor,CLIPModel,CLIPTextModel
19
+ import logging
20
+ # Turn off stupid mesages from CLIPModel.load
21
+ logging.disable(logging.WARNING)
22
+
23
+ import PyQt5
24
+ import matplotlib
25
+ matplotlib.use('QT5Agg') # Set the backend to TkAgg
26
+
27
+ import matplotlib.pyplot as plt
28
+
29
+
30
+ clipsrc="openai/clip-vit-large-patch14"
31
+
32
+ overlaymodel="text_encoder.bin"
33
+ overlaymodel2="text_encoder2.bin"
34
+
35
+ processor=None
36
+ clipmodel=None
37
+ cliptextmodel=None
38
+
39
+ device=torch.device("cuda")
40
+
41
+ print("loading processor from "+clipsrc,file=sys.stderr)
42
+ processor = CLIPProcessor.from_pretrained(clipsrc)
43
+ print("done",file=sys.stderr)
44
+
45
+ def clipmodel_one_time(text):
46
+ global clipmodel
47
+ if clipmodel == None:
48
+ print("loading CLIPModel from "+clipsrc,file=sys.stderr)
49
+ clipmodel = CLIPModel.from_pretrained(clipsrc)
50
+ clipmodel = clipmodel.to(device)
51
+ print("done",file=sys.stderr)
52
+
53
+ inputs = processor(text=text, return_tensors="pt")
54
+ inputs.to(device)
55
+ with torch.no_grad():
56
+ text_features = clipmodel.get_text_features(**inputs)
57
+ return text_features
58
+ #shape = (1,768)
59
+
60
+
61
+ def cliptextmodel_one_time(text):
62
+ global cliptextmodel
63
+ if cliptextmodel == None:
64
+ print("loading CLIPTextModel from "+clipsrc,file=sys.stderr)
65
+ cliptextmodel = CLIPTextModel.from_pretrained(clipsrc)
66
+ cliptextmodel = cliptextmodel.to(device)
67
+ print("done",file=sys.stderr)
68
+ inputs = processor(text=text, return_tensors="pt")
69
+ inputs.to(device)
70
+ with torch.no_grad():
71
+ outputs = cliptextmodel(**inputs)
72
+ embeddings = outputs.pooler_output
73
+ return embeddings
74
+ # shape is (1,768)
75
+
76
+ def print_distance(emb1,emb2):
77
+ targetdistance = torch.norm( emb1 - emb2)
78
+ print("DISTANCE:",targetdistance)
79
+
80
+
81
+
82
+ def prompt_for_word():
83
+ fig, ax = plt.subplots()
84
+
85
+ text1 = input("Word or prompt: ")
86
+ if text1 == "q":
87
+ exit(0)
88
+
89
+ print("generating embeddings for each now")
90
+
91
+ emb1 = clipmodel_one_time(text1)[0]
92
+ graph1=emb1.tolist()
93
+ ax.plot(graph1, label="clipmodel")
94
+
95
+ emb2 = cliptextmodel_one_time(text1)[0]
96
+ graph2=emb2.tolist()
97
+ ax.plot(graph2, label="cliptextmodel")
98
+
99
+ print_distance(emb1,emb2)
100
+
101
+ # Add labels, title, and legend
102
+ #ax.set_xlabel('Index')
103
+ ax.set_ylabel('Values')
104
+ ax.set_title('Graph embedding from std libs')
105
+ ax.legend()
106
+
107
+ # Display the graph
108
+ print("Pulling up the graph. To calculate more distances, close graph")
109
+ plt.show()
110
+ # Dont know why plt.show only works once !
111
+
112
+ while True:
113
+ prompt_for_word()