ppbrown commited on
Commit
7eddabc
1 Parent(s): 4291c21

Upload 3 files

Browse files
Files changed (3) hide show
  1. README.md +9 -2
  2. graph-embeddings.py +79 -0
  3. requirements.txt +2 -0
README.md CHANGED
@@ -3,8 +3,10 @@
3
  This directory contains utilities for the purpose of browsing the
4
  "token space" of CLIP ViT-L/14
5
 
6
- Primary tool is "generate-distances.py",
7
- which allows command-line browsing of words and their neighbours
 
 
8
 
9
 
10
  ## generate-distances.py
@@ -16,6 +18,11 @@ To run this requires the files "embeddings.safetensors" and "dictionary"
16
 
17
  You will need to rename or copy appropriate files for this as mentioned below
18
 
 
 
 
 
 
19
  ### embeddings.safetensors
20
 
21
  You can either copy one of the provided files, or generate your own.
 
3
  This directory contains utilities for the purpose of browsing the
4
  "token space" of CLIP ViT-L/14
5
 
6
+ Primary tools are:
7
+
8
+ * "generate-distances.py": allows command-line browsing of words and their neighbours
9
+ * "graph-embeddings.py": plots graph of full values of two embeddings
10
 
11
 
12
  ## generate-distances.py
 
18
 
19
  You will need to rename or copy appropriate files for this as mentioned below
20
 
21
+ ## graph-embeddings.py
22
+
23
+ Run the script. It will ask you for two text strings.
24
+ Once you enter both, it will plot the graph and display it for you
25
+
26
  ### embeddings.safetensors
27
 
28
  You can either copy one of the provided files, or generate your own.
graph-embeddings.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+
3
+ """ Work in progress
4
+ Plan:
5
+ Generate two embeddings, from text prompts.
6
+ Create comparative graph of their values
7
+ """
8
+
9
+
10
+ import sys
11
+ import json
12
+ import torch
13
+ from transformers import CLIPProcessor,CLIPModel
14
+
15
+ import PyQt5
16
+ import matplotlib
17
+ matplotlib.use('QT5Agg') # Set the backend to TkAgg
18
+
19
+ import matplotlib.pyplot as plt
20
+
21
+
22
+ clipsrc="openai/clip-vit-large-patch14"
23
+ processor=None
24
+ model=None
25
+
26
+ device=torch.device("cuda")
27
+
28
+
29
+ def init():
30
+ global processor
31
+ global model
32
+ # Load the processor and model
33
+ print("loading processor from "+clipsrc,file=sys.stderr)
34
+ processor = CLIPProcessor.from_pretrained(clipsrc)
35
+ print("done",file=sys.stderr)
36
+ print("loading model from "+clipsrc,file=sys.stderr)
37
+ model = CLIPModel.from_pretrained(clipsrc)
38
+ print("done",file=sys.stderr)
39
+
40
+ model = model.to(device)
41
+
42
+ # Expect SINGLE WORD ONLY
43
+ def standard_embed_calc(text):
44
+ inputs = processor(text=text, return_tensors="pt")
45
+ inputs.to(device)
46
+ with torch.no_grad():
47
+ text_features = model.get_text_features(**inputs)
48
+ embedding = text_features[0]
49
+ return embedding
50
+
51
+
52
+ init()
53
+
54
+ text1 = input("First word or prompt? ")
55
+ text2 = input("Second word or prompt? ")
56
+
57
+
58
+ print("generating embeddings for each now")
59
+ emb1 = standard_embed_calc(text1)
60
+ emb2 = standard_embed_calc(text2)
61
+
62
+ graph1=emb1.tolist()
63
+ graph2=emb2.tolist()
64
+
65
+ fig, ax = plt.subplots()
66
+
67
+ # Plot the two lists on the same graph using the read labels
68
+ ax.plot(graph1, label=text1[:20])
69
+ ax.plot(graph2, label=text2[:20])
70
+
71
+ # Add labels, title, and legend
72
+ #ax.set_xlabel('Index')
73
+ ax.set_ylabel('Values')
74
+ ax.set_title('Comparative Graph of Two Embeddings')
75
+ ax.legend()
76
+
77
+ # Display the graph
78
+ print("Pulling up the graph")
79
+ plt.show()
requirements.txt CHANGED
@@ -1,3 +1,5 @@
1
  torch
2
  safetensors
3
  transformers
 
 
 
1
  torch
2
  safetensors
3
  transformers
4
+ PyQt5
5
+ matplotlib