# Based on https://huggingface.co/spaces/akhaliq/Detic/tree/main Thanks! import os os.system("pip install gradio==2.4.6") import sys import gradio as gr os.system('pip install detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu102/torch1.9/index.html') # clone and install Detic os.system("git clone https://github.com/facebookresearch/Detic.git --recurse-submodules") os.chdir("Detic") # Install detectron2 import torch # Some basic setup: # Setup detectron2 logger import detectron2 from detectron2.utils.logger import setup_logger setup_logger() # import some common libraries import sys import numpy as np import os, json, cv2, random # import some common detectron2 utilities from detectron2 import model_zoo from detectron2.engine import DefaultPredictor from detectron2.config import get_cfg from detectron2.utils.visualizer import Visualizer from detectron2.data import MetadataCatalog, DatasetCatalog # Detic libraries sys.path.insert(0, 'third_party/CenterNet2/projects/CenterNet2/') from centernet.config import add_centernet_config from detic.config import add_detic_config from detic.modeling.utils import reset_cls_test from detic.modeling.text.text_encoder import build_text_encoder from PIL import Image # Build the detector and download our pretrained weights cfg = get_cfg() add_centernet_config(cfg) add_detic_config(cfg) cfg.MODEL.DEVICE='cpu' cfg.merge_from_file("configs/Detic_LCOCOI21k_CLIP_SwinB_896b32_4x_ft4x_max-size.yaml") cfg.MODEL.WEIGHTS = 'https://dl.fbaipublicfiles.com/detic/Detic_LCOCOI21k_CLIP_SwinB_896b32_4x_ft4x_max-size.pth' cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model cfg.MODEL.ROI_BOX_HEAD.ZEROSHOT_WEIGHT_PATH = 'rand' cfg.MODEL.ROI_HEADS.ONE_CLASS_PER_PROPOSAL = True # For better visualization purpose. Set to False for all classes. predictor = DefaultPredictor(cfg) # Setup the model's vocabulary using build-in datasets BUILDIN_CLASSIFIER = { 'lvis': 'datasets/metadata/lvis_v1_clip_a+cname.npy', 'objects365': 'datasets/metadata/o365_clip_a+cnamefix.npy', 'openimages': 'datasets/metadata/oid_clip_a+cname.npy', 'coco': 'datasets/metadata/coco_clip_a+cname.npy', } BUILDIN_METADATA_PATH = { 'lvis': 'lvis_v1_val', 'objects365': 'objects365_v2_val', 'openimages': 'oid_val_expanded', 'coco': 'coco_2017_val', } text_encoder = build_text_encoder(pretrain=True) text_encoder.eval() def get_clip_embeddings(vocabulary, prompt='a '): texts = [prompt + x for x in vocabulary] emb = text_encoder(texts).detach().permute(1, 0).contiguous().cpu() return emb def update_test_score_thresh(predictor, test_score_thresh): for box_predictor in predictor.model.roi_heads.box_predictor: box_predictor.test_score_thresh = test_score_thresh def inference(custom_vocabulary, thresh, img): update_test_score_thresh(predictor, test_score_thresh=thresh) metadata = MetadataCatalog.get("__unused") metadata.thing_classes = custom_vocabulary.split(',') classifier = get_clip_embeddings(metadata.thing_classes) num_classes = len(metadata.thing_classes) reset_cls_test(predictor.model, classifier, num_classes) im = cv2.imread(img) outputs = predictor(im) v = Visualizer(im[:, :, ::-1], metadata) out = v.draw_instance_predictions(outputs["instances"].to("cpu")) MetadataCatalog.remove("__unused") return Image.fromarray(np.uint8(out.get_image())).convert('RGB') title = "Detic" description = "Gradio demo for Detic: Detecting Twenty-thousand Classes using Image-level Supervision. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below." article = "

Detecting Twenty-thousand Classes using Image-level Supervision | Github Repo

" examples = [ ['dog,cat' , 0.500, 'examples/dogs-and-cats.jpeg'], ['a boy jumping in the air', 0.037, 'examples/jump.jpeg'], ] gr.Interface(inference, inputs=[ gr.inputs.Textbox(placeholder="Type a class or text to find", default="dog,cat"), gr.inputs.Slider(minimum=0.001, maximum=0.999, step=0.001, default=0.5), gr.inputs.Image(type="filepath") ], outputs=gr.outputs.Image(type="pil"), title=title, description=description, article=article, examples=examples, enable_queue=True ).launch()