Spaces:
Sleeping
Sleeping
import matplotlib.pyplot as plt | |
import numpy as np | |
from collections import defaultdict | |
from ultralytics import YOLO | |
import gradio as gr | |
from PIL import Image | |
import io | |
import base64 | |
# import matplotlib.pyplot as plt | |
from transformers import pipeline | |
# import numpy as np | |
# from collections import defaultdict | |
# from ultralytics import YOLO | |
# import gradio as gr | |
# from PIL import Image | |
MODEL_PATH = 'best-1206.pt' | |
model = YOLO(MODEL_PATH) | |
checkpoint = "openai/clip-vit-base-patch32" | |
classifier = pipeline(model=checkpoint, | |
task="zero-shot-image-classification") | |
def shot(image, labels_text): | |
image = Image.fromarray(np.uint8(image)).convert('RGB') | |
if labels_text: | |
labels = labels_text.split(";") | |
else: | |
labels = ['PET bottle', | |
'Metal can', | |
'Plastic bag', | |
'Cardboard box'] | |
results = classifier(image, | |
candidate_labels=labels) | |
return {result["label"]: result["score"] for result in results} | |
def count_and_display_pie_chart(image, labels_text): | |
image = Image.fromarray(np.uint8(image)).convert('RGB') | |
if labels_text: | |
labels = labels_text.split(";") | |
else: | |
labels = ['PET bottle', 'Metal can', 'Plastic bag', 'Cardboard box'] | |
results = model(image) | |
boxes = results[0].boxes.xyxy.tolist() | |
classes = results[0].boxes.cls.tolist() | |
names = results[0].names | |
confidences = results[0].boxes.conf.tolist() | |
label_count = defaultdict(int) | |
for i, (box, cls, conf) in enumerate(zip(boxes, classes, confidences)): | |
x1, y1, x2, y2 = box | |
confidence = conf | |
detected_class = cls | |
name = names[int(cls)] | |
# Crop the bounding box from the image | |
#cropped_image = image[int(y1):int(y2), int(x1):int(x2)] | |
cropped_image = image.crop((int(x1), int(y1), int(x2), int(y2))) | |
# Pass the cropped image through the 'shot' function | |
#labels_text = 'PET bottle, Milk container, Soda can, Plastic cover' # Assuming 'name' contains the label text for the detected object | |
result = shot(cropped_image, labels_text) | |
# Find the label with the highest score | |
max_label = max(result, key=result.get) | |
max_score = result[max_label] | |
if max_score > 0.5: | |
# Increment the count for the detected label | |
label_count[max_label] += 1 | |
labels = list(label_count.keys()) | |
sizes = list(label_count.values()) | |
fig, ax = plt.subplots() | |
ax.pie(sizes, labels=labels, autopct='%1.1f%%', startangle=140) | |
ax.axis('equal') | |
ax.set_title('Detected Objects Pie Chart') | |
buffer = io.BytesIO() | |
fig.savefig(buffer, format='png') | |
buffer.seek(0) | |
img_str = base64.b64encode(buffer.read()).decode('utf-8') | |
img_html = '<img src="data:image/png;base64, {}">'.format(img_str) | |
return img_html | |
demo = gr.Interface( | |
count_and_display_pie_chart, | |
inputs=[ | |
gr.Image(type="numpy", label="Upload an image"), | |
gr.Textbox( | |
label="Labels", | |
info="Separated by a semicolon (;)", | |
lines=4, | |
value="""PET bottle; | |
Metal can; | |
Plastic bag; | |
Cardboard box""", | |
) | |
], | |
outputs=[gr.HTML(label="Pie Chart Image")], | |
examples=[['can.jpg', None]], | |
description="Upload an image to detect objects and display a pie chart of their counts.", | |
title="Object Detection and Pie Chart Demo" | |
) | |
demo.launch() | |
# import matplotlib.pyplot as plt | |
# from transformers import pipeline | |
# import numpy as np | |
# from collections import defaultdict | |
# from ultralytics import YOLO | |
# import gradio as gr | |
# from PIL import Image | |
# MODEL_PATH='best-1206.pt' | |
# model = YOLO(MODEL_PATH) | |
# pipe = pipeline("zero-shot-image-classification", model="openai/clip-vit-base-patch32") | |
# checkpoint = "openai/clip-vit-base-patch32" | |
# classifier = pipeline(model=checkpoint, | |
# task="zero-shot-image-classification") | |
# def shot(image, labels_text): | |
# image = Image.fromarray(np.uint8(image)).convert('RGB') | |
# if labels_text: | |
# labels = labels_text.split(";") | |
# else: | |
# labels = ['PET bottle', | |
# 'Metal can', | |
# 'Plastic bag', | |
# 'Cardboard box'] | |
# results = classifier(image, | |
# candidate_labels=labels) | |
# return {result["label"]: result["score"] for result in results} | |
# # # Create a defaultdict to count occurrences of each label | |
# # label_count = defaultdict(int) | |
# # # Iterate through the results | |
# # for i, (box, cls, conf) in enumerate(zip(boxes, classes, confidences)): | |
# # x1, y1, x2, y2 = box | |
# # confidence = conf | |
# # detected_class = cls | |
# # name = names[int(cls)] | |
# # cropped_image = image.crop((int(x1), int(y1), int(x2), int(y2))) | |
# # # For simplicity, assume 'labels_text' remains unchanged from the function arguments | |
# # result = shot(np.array(cropped_image), labels_text) | |
# # max_label = max(result, key=result.get) | |
# # max_score = result[max_label] | |
# # if max_score > 0.5: | |
# # label_count[max_label] += 1 | |
# # # Generate a pie chart based on the label counts | |
# # labels = list(label_count.keys()) | |
# # sizes = list(label_count.values()) | |
# # fig, ax = plt.subplots() | |
# # ax.pie(sizes, labels=labels, autopct='%1.1f%%', startangle=140) | |
# # ax.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. | |
# # ax.set_title('Detected Objects Pie Chart') | |
# # return fig # Return the Matplotlib figure | |
# # # # Create a defaultdict to count occurrences of each label | |
# # # label_count = defaultdict(int) | |
# # # # Iterate through the results | |
# # # for i, (box, cls, conf) in enumerate(zip(boxes, classes, confidences)): | |
# # # x1, y1, x2, y2 = box | |
# # # confidence = conf | |
# # # detected_class = cls | |
# # # name = names[int(cls)] | |
# # # # Crop the bounding box from the image | |
# # # #cropped_image = image[int(y1):int(y2), int(x1):int(x2)] | |
# # # cropped_image = image.crop((int(x1), int(y1), int(x2), int(y2))) | |
# # # # Pass the cropped image through the 'shot' function | |
# # # labels_text = 'PET bottle, Milk container, Soda can, Plastic cover' # Assuming 'name' contains the label text for the detected object | |
# # # result = shot(cropped_image, labels_text) | |
# # # # Find the label with the highest score | |
# # # max_label = max(result, key=result.get) | |
# # # max_score = result[max_label] | |
# # # if max_score > 0.5: | |
# # # # Increment the count for the detected label | |
# # # label_count[max_label] += 1 | |
# # # return label_count | |
# # # demo = gr.Interface(count, | |
# # # [gr.Image(type="pil"), | |
# # # gr.Textbox( | |
# # # label="Labels", | |
# # # info="Separated by a semicolon (;)", | |
# # # lines=4, | |
# # # value="""PET bottle; | |
# # # Metal can; | |
# # # Plastic bag; | |
# # # Cardboard box""", | |
# # # )], | |
# # # outputs="label", | |
# # # examples=[['can.jpg',None]], | |
# # # description="Upload an image", | |
# # # title="Recycling AI Data Demo") | |
# # # demo.launch() | |
# # demo = gr.Interface( | |
# # count_and_display_pie_chart, | |
# # inputs=[ | |
# # gr.Image(type="numpy", label="Upload an image"), | |
# # gr.Textbox( | |
# # label="Labels", | |
# # info="Separated by a semicolon (;)", | |
# # lines=4, | |
# # value="""PET bottle; | |
# # Metal can; | |
# # Plastic bag; | |
# # Cardboard box""", | |
# # ) | |
# # ], | |
# # outputs=gr.outputs.Image(type="plot", label="Pie Chart"), | |
# # examples=[['test.png',None]], | |
# # description="Upload an image to detect objects and display a pie chart of their counts.", | |
# # title="Object Detection and Pie Chart Demo" | |
# # ) | |
# # demo.launch() |