Spaces:
Sleeping
Sleeping
printing devices
Browse files
app.py
CHANGED
@@ -231,10 +231,15 @@ def count(image, text, prompts, state, device):
|
|
231 |
input_image, _ = transform(image, {"exemplars": torch.tensor([])})
|
232 |
input_image = input_image.unsqueeze(0).to(device)
|
233 |
exemplars = get_box_inputs(prompts["points"])
|
234 |
-
|
235 |
input_image_exemplars, exemplars = transform(prompts["image"], {"exemplars": torch.tensor(exemplars)})
|
236 |
input_image_exemplars = input_image_exemplars.unsqueeze(0).to(device)
|
237 |
exemplars = [exemplars["exemplars"].to(device)]
|
|
|
|
|
|
|
|
|
|
|
238 |
|
239 |
with torch.no_grad():
|
240 |
model_output = model(
|
@@ -316,7 +321,7 @@ def count_main(image, text, prompts, device):
|
|
316 |
input_image, _ = transform(image, {"exemplars": torch.tensor([])})
|
317 |
input_image = input_image.unsqueeze(0).to(device)
|
318 |
exemplars = get_box_inputs(prompts["points"])
|
319 |
-
|
320 |
input_image_exemplars, exemplars = transform(prompts["image"], {"exemplars": torch.tensor(exemplars)})
|
321 |
input_image_exemplars = input_image_exemplars.unsqueeze(0).to(device)
|
322 |
exemplars = [exemplars["exemplars"].to(device)]
|
|
|
231 |
input_image, _ = transform(image, {"exemplars": torch.tensor([])})
|
232 |
input_image = input_image.unsqueeze(0).to(device)
|
233 |
exemplars = get_box_inputs(prompts["points"])
|
234 |
+
|
235 |
input_image_exemplars, exemplars = transform(prompts["image"], {"exemplars": torch.tensor(exemplars)})
|
236 |
input_image_exemplars = input_image_exemplars.unsqueeze(0).to(device)
|
237 |
exemplars = [exemplars["exemplars"].to(device)]
|
238 |
+
|
239 |
+
print("model device: " + str(model.device))
|
240 |
+
print("input image device: " + str(input_image.device))
|
241 |
+
print("input image exemplars device: " + str(input_image_exemplars.device))
|
242 |
+
print("exemplars device: " + str(exemplars[0].device))
|
243 |
|
244 |
with torch.no_grad():
|
245 |
model_output = model(
|
|
|
321 |
input_image, _ = transform(image, {"exemplars": torch.tensor([])})
|
322 |
input_image = input_image.unsqueeze(0).to(device)
|
323 |
exemplars = get_box_inputs(prompts["points"])
|
324 |
+
|
325 |
input_image_exemplars, exemplars = transform(prompts["image"], {"exemplars": torch.tensor(exemplars)})
|
326 |
input_image_exemplars = input_image_exemplars.unsqueeze(0).to(device)
|
327 |
exemplars = [exemplars["exemplars"].to(device)]
|