Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -181,42 +181,26 @@ async def predict_single_dog(image):
|
|
181 |
# return dogs
|
182 |
|
183 |
|
184 |
-
async def detect_multiple_dogs(image, conf_threshold=0.25, iou_threshold=0.4):
|
185 |
-
try:
|
186 |
-
results = model_yolo(image, conf=conf_threshold, iou=iou_threshold)[0]
|
187 |
-
dogs = []
|
188 |
-
for box in results.boxes:
|
189 |
-
if box.cls == 16: # COCO dataset class for dog is 16
|
190 |
-
xyxy = box.xyxy[0].tolist()
|
191 |
-
confidence = box.conf.item()
|
192 |
-
cropped_image = image.crop((xyxy[0], xyxy[1], xyxy[2], xyxy[3]))
|
193 |
-
dogs.append((cropped_image, confidence, xyxy))
|
194 |
-
|
195 |
-
# If no dogs are detected, use the whole image
|
196 |
-
if not dogs:
|
197 |
-
logger.info("No dogs detected, using the whole image.")
|
198 |
-
dogs = [(image, 1.0, [0, 0, image.width, image.height])]
|
199 |
-
|
200 |
-
return dogs
|
201 |
-
except Exception as e:
|
202 |
-
logger.error(f"Error in detect_multiple_dogs: {str(e)}")
|
203 |
-
return [(image, 1.0, [0, 0, image.width, image.height])]
|
204 |
-
|
205 |
async def predict_single_dog(image):
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
|
|
|
|
|
|
|
|
|
|
220 |
|
221 |
|
222 |
async def process_single_dog(image):
|
@@ -418,7 +402,7 @@ async def process_single_dog(image):
|
|
418 |
|
419 |
async def predict(image):
|
420 |
if image is None:
|
421 |
-
return "Please upload an image to start.", None, gr.update(visible=False), None
|
422 |
|
423 |
try:
|
424 |
if isinstance(image, np.ndarray):
|
@@ -426,6 +410,9 @@ async def predict(image):
|
|
426 |
|
427 |
dogs = await detect_multiple_dogs(image)
|
428 |
|
|
|
|
|
|
|
429 |
color_list = ['#FF0000', '#00FF00', '#0000FF', '#FFFF00', '#00FFFF', '#FF00FF', '#800080', '#FFA500']
|
430 |
explanations = []
|
431 |
buttons = []
|
@@ -476,8 +463,8 @@ async def predict(image):
|
|
476 |
return final_explanation, annotated_image, gr.update(visible=False), initial_state
|
477 |
|
478 |
except Exception as e:
|
479 |
-
error_msg = f"An error occurred: {str(e)}
|
480 |
-
|
481 |
return error_msg, None, gr.update(visible=False), None
|
482 |
|
483 |
|
@@ -508,8 +495,8 @@ def go_back(state):
|
|
508 |
gr.update(visible=False),
|
509 |
state
|
510 |
)
|
|
|
511 |
|
512 |
-
# ไฟฎๆน Gradio ็้ข็ตๆง
|
513 |
with gr.Blocks() as iface:
|
514 |
gr.HTML("<h1 style='text-align: center;'>๐ถ Dog Breed Classifier ๐</h1>")
|
515 |
gr.HTML("<p style='text-align: center;'>Upload a picture of a dog, and the model will predict its breed, provide detailed information, and include an extra information link!</p>")
|
@@ -543,7 +530,7 @@ with gr.Blocks() as iface:
|
|
543 |
inputs=[initial_state],
|
544 |
outputs=[output, output_image, breed_buttons, back_button, initial_state]
|
545 |
)
|
546 |
-
|
547 |
gr.Examples(
|
548 |
examples=['Border_Collie.jpg', 'Golden_Retriever.jpeg', 'Saint_Bernard.jpeg', 'French_Bulldog.jpeg', 'Samoyed.jpg'],
|
549 |
inputs=input_image
|
|
|
181 |
# return dogs
|
182 |
|
183 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
184 |
async def predict_single_dog(image):
|
185 |
+
image_tensor = preprocess(image).unsqueeze(0)
|
186 |
+
with torch.no_grad():
|
187 |
+
output = model(image_tensor)
|
188 |
+
probabilities = torch.nn.functional.softmax(output[0], dim=0)
|
189 |
+
top3_prob, top3_catid = torch.topk(probabilities, 3)
|
190 |
+
top3_breeds = [dog_breeds[idx.item()] for idx in top3_catid]
|
191 |
+
top3_probs = [f"{prob.item()*100:.2f}%" for prob in top3_prob]
|
192 |
+
return top3_prob[0].item(), top3_breeds, top3_probs
|
193 |
+
|
194 |
+
async def detect_multiple_dogs(image, conf_threshold=0.3, iou_threshold=0.45):
|
195 |
+
results = model_yolo(image, conf=conf_threshold, iou=iou_threshold)[0]
|
196 |
+
dogs = []
|
197 |
+
for box in results.boxes:
|
198 |
+
if box.cls == 16: # COCO dataset class for dog is 16
|
199 |
+
xyxy = box.xyxy[0].tolist()
|
200 |
+
confidence = box.conf.item()
|
201 |
+
cropped_image = image.crop((xyxy[0], xyxy[1], xyxy[2], xyxy[3]))
|
202 |
+
dogs.append((cropped_image, confidence, xyxy))
|
203 |
+
return dogs
|
204 |
|
205 |
|
206 |
async def process_single_dog(image):
|
|
|
402 |
|
403 |
async def predict(image):
|
404 |
if image is None:
|
405 |
+
return "Please upload an image to start.", None, [], gr.update(visible=False), None
|
406 |
|
407 |
try:
|
408 |
if isinstance(image, np.ndarray):
|
|
|
410 |
|
411 |
dogs = await detect_multiple_dogs(image)
|
412 |
|
413 |
+
if len(dogs) == 0:
|
414 |
+
dogs = [(image, 1.0, [0, 0, image.width, image.height])]
|
415 |
+
|
416 |
color_list = ['#FF0000', '#00FF00', '#0000FF', '#FFFF00', '#00FFFF', '#FF00FF', '#800080', '#FFA500']
|
417 |
explanations = []
|
418 |
buttons = []
|
|
|
463 |
return final_explanation, annotated_image, gr.update(visible=False), initial_state
|
464 |
|
465 |
except Exception as e:
|
466 |
+
error_msg = f"An error occurred: {str(e)}"
|
467 |
+
print(error_msg)
|
468 |
return error_msg, None, gr.update(visible=False), None
|
469 |
|
470 |
|
|
|
495 |
gr.update(visible=False),
|
496 |
state
|
497 |
)
|
498 |
+
|
499 |
|
|
|
500 |
with gr.Blocks() as iface:
|
501 |
gr.HTML("<h1 style='text-align: center;'>๐ถ Dog Breed Classifier ๐</h1>")
|
502 |
gr.HTML("<p style='text-align: center;'>Upload a picture of a dog, and the model will predict its breed, provide detailed information, and include an extra information link!</p>")
|
|
|
530 |
inputs=[initial_state],
|
531 |
outputs=[output, output_image, breed_buttons, back_button, initial_state]
|
532 |
)
|
533 |
+
|
534 |
gr.Examples(
|
535 |
examples=['Border_Collie.jpg', 'Golden_Retriever.jpeg', 'Saint_Bernard.jpeg', 'French_Bulldog.jpeg', 'Samoyed.jpg'],
|
536 |
inputs=input_image
|