reset to ViT-L/14 again
Browse files- graph-byclip.py +3 -3
graph-byclip.py
CHANGED
@@ -29,8 +29,8 @@ import matplotlib.pyplot as plt
|
|
29 |
|
30 |
# Available models:
|
31 |
# 'RN50', 'RN101', 'RN50x4', 'RN50x16', 'RN50x64', 'ViT-B/32', 'ViT-B/16', 'ViT-L/14', 'ViT-L/14@336px'
|
32 |
-
|
33 |
-
CLIPname= "ViT-B/16"
|
34 |
#CLIPname= "ViT-L/14@336px"
|
35 |
|
36 |
device=torch.device("cuda")
|
@@ -66,7 +66,7 @@ def embed_from_text(text):
|
|
66 |
fig, ax = plt.subplots()
|
67 |
|
68 |
|
69 |
-
text1 = input("First
|
70 |
text2 = input("Second prompt(or leave blank): ")
|
71 |
|
72 |
|
|
|
29 |
|
30 |
# Available models:
|
31 |
# 'RN50', 'RN101', 'RN50x4', 'RN50x16', 'RN50x64', 'ViT-B/32', 'ViT-B/16', 'ViT-L/14', 'ViT-L/14@336px'
|
32 |
+
CLIPname= "ViT-L/14"
|
33 |
+
#CLIPname= "ViT-B/16"
|
34 |
#CLIPname= "ViT-L/14@336px"
|
35 |
|
36 |
device=torch.device("cuda")
|
|
|
66 |
fig, ax = plt.subplots()
|
67 |
|
68 |
|
69 |
+
text1 = input("First prompt or #tokenid: ")
|
70 |
text2 = input("Second prompt(or leave blank): ")
|
71 |
|
72 |
|