Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -69,7 +69,22 @@ pipe = pipe.to(device)
|
|
69 |
|
70 |
model = CLIPDensePredT(version='ViT-B/16', reduce_dim=64, complex_trans_conv=True)
|
71 |
model.eval() #.half()
|
72 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
|
74 |
imgRes = 256
|
75 |
|
|
|
69 |
|
70 |
model = CLIPDensePredT(version='ViT-B/16', reduce_dim=64, complex_trans_conv=True)
|
71 |
model.eval() #.half()
|
72 |
+
|
73 |
+
|
74 |
+
weightsPATH = './clipseg/weights/rd64-uni.pth'
|
75 |
+
|
76 |
+
model1 = torch.load(weightsPATH)
|
77 |
+
print ("Torch load : ", model1)
|
78 |
+
|
79 |
+
state = {'model': model.state_dict()}
|
80 |
+
torch.save(state, weightsPATH)
|
81 |
+
|
82 |
+
model.load_state_dict(torch.load(weightsPATH['model'], map_location=torch.device(device)), strict=False) #False
|
83 |
+
#model.load_state_dict(torch.load(weightsPATH)['model'])
|
84 |
+
# print weights
|
85 |
+
for k, v in model.named_parameters():
|
86 |
+
print(k, v)
|
87 |
+
|
88 |
|
89 |
imgRes = 256
|
90 |
|