Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -315,7 +315,7 @@ def count(image, text, prompts, state, device):
|
|
315 |
@spaces.GPU
|
316 |
def count_main(image, text, prompts, device):
|
317 |
model.to(device)
|
318 |
-
|
319 |
keywords = "" # do not handle this for now
|
320 |
# Handle no prompt case.
|
321 |
if prompts is None:
|
@@ -327,6 +327,7 @@ def count_main(image, text, prompts, device):
|
|
327 |
input_image_exemplars, exemplars = transform(prompts["image"], {"exemplars": torch.tensor(exemplars)})
|
328 |
input_image_exemplars = input_image_exemplars.unsqueeze(0).to(device)
|
329 |
exemplars = [exemplars["exemplars"].to(device)]
|
|
|
330 |
|
331 |
with torch.no_grad():
|
332 |
model_output = model(
|
|
|
315 |
@spaces.GPU
|
316 |
def count_main(image, text, prompts, device):
|
317 |
model.to(device)
|
318 |
+
|
319 |
keywords = "" # do not handle this for now
|
320 |
# Handle no prompt case.
|
321 |
if prompts is None:
|
|
|
327 |
input_image_exemplars, exemplars = transform(prompts["image"], {"exemplars": torch.tensor(exemplars)})
|
328 |
input_image_exemplars = input_image_exemplars.unsqueeze(0).to(device)
|
329 |
exemplars = [exemplars["exemplars"].to(device)]
|
330 |
+
print("image device: " + str(input_image.device))
|
331 |
|
332 |
with torch.no_grad():
|
333 |
model_output = model(
|