ppbrown commited on
Commit
baf29fc
1 Parent(s): 0ccea15

Upload graph-byclip.py

Browse files
Files changed (1) hide show
  1. graph-byclip.py +21 -11
graph-byclip.py CHANGED
@@ -5,7 +5,7 @@ Plan:
5
  Modded version of graph-embeddings.py
6
  Just to see if using different CLIP module changes values significantly
7
  (It does not)
8
- This requires
9
  pip install git+https://github.com/openai/CLIP.git
10
  """
11
 
@@ -21,7 +21,11 @@ matplotlib.use('QT5Agg') # Set the backend to TkAgg
21
 
22
  import matplotlib.pyplot as plt
23
 
24
- CLIPname= "ViT-L/14"
 
 
 
 
25
 
26
  device=torch.device("cuda")
27
  print("loading CLIP model")
@@ -29,22 +33,28 @@ model, processor = clip.load(CLIPname,device=device)
29
  model.cuda().eval()
30
  print("done")
31
 
32
- def embed_from_text(text):
33
- tokens = clip.tokenize(text).to(device)
 
 
34
 
35
  with torch.no_grad():
36
  embed = model.encode_text(tokens)
37
  return embed
38
 
39
 
40
- # Expect SINGLE WORD ONLY
41
- def standard_embed_calc(text):
42
- inputs = processor(text=text, return_tensors="pt")
43
- inputs.to(device)
 
 
 
 
 
44
  with torch.no_grad():
45
- text_features = model.get_text_features(**inputs)
46
- embedding = text_features[0]
47
- return embedding
48
 
49
 
50
  fig, ax = plt.subplots()
 
5
  Modded version of graph-embeddings.py
6
  Just to see if using different CLIP module changes values significantly
7
  (It does not)
8
+ This code requires
9
  pip install git+https://github.com/openai/CLIP.git
10
  """
11
 
 
21
 
22
  import matplotlib.pyplot as plt
23
 
24
+ # Available models:
25
+ # 'RN50', 'RN101', 'RN50x4', 'RN50x16', 'RN50x64', 'ViT-B/32', 'ViT-B/16', 'ViT-L/14', 'ViT-L/14@336px'
26
+ #CLIPname= "ViT-L/14"
27
+ CLIPname= "ViT-B/16"
28
+ #CLIPname= "ViT-L/14@336px"
29
 
30
  device=torch.device("cuda")
31
  print("loading CLIP model")
 
33
  model.cuda().eval()
34
  print("done")
35
 
36
+ def embed_from_tokenid(num):
37
+ # A bit sleazy, but, eh.
38
+ tokens = clip.tokenize("dummy").to(device)
39
+ tokens[0][1]=num
40
 
41
  with torch.no_grad():
42
  embed = model.encode_text(tokens)
43
  return embed
44
 
45
 
46
+
47
+ def embed_from_text(text):
48
+ if text[0]=="#":
49
+ print("Converting string to number")
50
+ return embed_from_tokenid(int(text[1:]))
51
+
52
+ tokens = clip.tokenize(text).to(device)
53
+ print("Tokens for",text,"=",tokens)
54
+
55
  with torch.no_grad():
56
+ embed = model.encode_text(tokens)
57
+ return embed
 
58
 
59
 
60
  fig, ax = plt.subplots()