import gradio as gr import matplotlib.pyplot as plt from PIL import Image from ultralyticsplus import YOLO, render_result import cv2 import numpy as np from transformers import pipeline import requests from io import BytesIO model = YOLO('best (1).pt') model2 = pipeline('image-classification','Kaludi/csgo-weapon-classification') name = ['grenade','knife','pistol','rifle'] url_example="https://drive.google.com/file/d/1bBq0bNmJ5X83tDWCzdzHSYCdg-aUL4xO/view?usp=drive_link" url_example='https://drive.google.com/uc?id=' + url_example.split('/')[-2] r = requests.get(url_example) im1 = Image.open(BytesIO(r.content)) url_example="https://drive.google.com/file/d/16Z7QzvZ99fbEPj1sls_jOCJBsC0h_dYZ/view?usp=drive_link" url_example='https://drive.google.com/uc?id=' + url_example.split('/')[-2] r = requests.get(url_example) im2 = Image.open(BytesIO(r.content)) url_example="https://drive.google.com/file/d/13mjTMS3eR0AKYSbV-Fpb3fTBno_T42JN/view?usp=drive_link" url_example='https://drive.google.com/uc?id=' + url_example.split('/')[-2] r = requests.get(url_example) im3 = Image.open(BytesIO(r.content)) url_example="https://drive.google.com/file/d/1-XpFsa_nz506Ul6grKElVJDu_Jl3KZIF/view?usp=drive_link" url_example='https://drive.google.com/uc?id=' + url_example.split('/')[-2] r = requests.get(url_example) im4 = Image.open(BytesIO(r.content)) # for i, r in enumerate(results): # # Plot results image # im_bgr = r.plot() # im_rgb = im_bgr[..., ::-1] # Convert BGR to RGB def response(image): print(image) results = model(image) text = "" name_weap = "" for r in results: conf = np.array(r.boxes.conf) cls = np.array(r.boxes.cls) cls = cls.astype(int) xywh = np.array(r.boxes.xywh) xywh = xywh.astype(int) for con, cl, xy in zip(conf, cls, xywh): cone = con.astype(float) conef = round(cone,3) conef = conef * 100 text += (f"Detected {name[cl]} with confidence {round(conef,1)}% at ({xy[0]},{xy[1]})\n") if cl == 0: name_weap += name[cl] + '\n' elif cl == 1: name_weap += name[cl] + '\n' elif cl == 2: out = model2(image) name_weap += out[0]["label"] + '\n' elif cl == 3: out = model2(image) name_weap += out[0]["label"] + '\n' # im_rgb = Image.fromarray(im_rgb) return name_weap, text def response2(image: gr.Image = None,image_size: gr.Slider = 640, conf_threshold: gr.Slider = 0.3, iou_threshold: gr.Slider = 0.6): results = model.predict(image, conf=conf_threshold, iou=iou_threshold, imgsz=image_size) box = results[0].boxes render = render_result(model=model, image=image, result=results[0], rect_th = 1, text_th = 1) weapon_name, text_detection = response(image) # xywh = int(results.boxes.xywh) # x = xywh[0] # y = xywh[1] return render, text_detection, weapon_name inputs = [ gr.Image(type="filepath", label="Input Image"), gr.Slider(minimum=320, maximum=1280, value=640, step=32, label="Image Size"), gr.Slider(minimum=0.0, maximum=1.0, value=0.3, step=0.05, label="Confidence Threshold"), gr.Slider(minimum=0.0, maximum=1.0, value=0.6, step=0.05, label="IOU Threshold"), ] outputs = [gr.Image( type="filepath", label="Output Image"), gr.Textbox(label="Result"), gr.Textbox(label="Weapon Name") ] examples = [[im1, 640, 0.3, 0.6], [im2, 640, 0.3, 0.6], [im3, 640, 0.3, 0.6], [im4, 640, 0.15, 0.6] ] iface = gr.Interface(fn=response2, inputs=inputs, outputs=outputs, examples=examples) iface.launch()