import sys import subprocess subprocess.check_call([sys.executable, '-m', 'pip', 'install','git+https://github.com/facebookresearch/detectron2.git']) import numpy as np import gradio as gr from Code import Inference import detectron2 # import some common detectron2 utilities from detectron2 import model_zoo from detectron2.engine import DefaultPredictor from detectron2.config import get_cfg from detectron2.utils.visualizer import Visualizer from detectron2.data import MetadataCatalog, DatasetCatalog import os os.environ['CUDA_VISIBLE_DEVICES'] = '-1' sys.path.append("Repositories/") from dlclive import DLCLive, Processor def run_Inference(input_img): ###Detectron: cfg = get_cfg() # add project-specific config (e.g., TensorMask) here if you're not running a model in detectron2's core library cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml")) cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set threshold for this model # Find a model from detectron2's model zoo. You can use the https://dl.fbaipublicfiles... url as well cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml") cfg.MODEL.DEVICE='cpu' predictor = DefaultPredictor(cfg) ##DLC: dlc_proc = Processor() dlc_liveObj = DLCLive("./Weights/DLC_DLC_Segmented_resnet_50_iteration-0_shuffle-1/", processor=dlc_proc) OutImg = Inference.Inference(input_img,predictor,dlc_liveObj,ScaleBBox=1,Dilate=5,DLCThreshold=0.3) return OutImg demo = gr.Interface(run_Inference, gr.Image(), "image", title="PigeonEverywhere", description="Upload a photo of a pigeon, and get 2D keypoint estimation.\nFor more info on how this was done, see:https://arxiv.org/abs/2308.15316 ") demo.launch()