intelliarts commited on
Commit
5e22471
1 Parent(s): 760650f

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +67 -0
app.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ try:
2
+ import detectron2
3
+ except:
4
+ import os
5
+ os.system('pip install git+https://github.com/facebookresearch/detectron2.git')
6
+
7
+ from matplotlib.pyplot import axis
8
+ import gradio as gr
9
+ import requests
10
+ import numpy as np
11
+ from torch import nn
12
+ import requests
13
+
14
+ import torch
15
+ import detectron2
16
+ from detectron2 import model_zoo
17
+ from detectron2.engine import DefaultPredictor
18
+ from detectron2.config import get_cfg
19
+ from detectron2.utils.visualizer import Visualizer
20
+ from detectron2.data import MetadataCatalog
21
+ from detectron2.utils.visualizer import ColorMode
22
+
23
+ model_path = 'model_final.pth'
24
+
25
+ cfg = get_cfg()
26
+ cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
27
+ cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.8
28
+ cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
29
+ cfg.MODEL.WEIGHTS = model_path
30
+
31
+
32
+ if not torch.cuda.is_available():
33
+ cfg.MODEL.DEVICE='cpu'
34
+
35
+ predictor = DefaultPredictor(cfg)
36
+ my_metadata = MetadataCatalog.get("car_dataset_val")
37
+ my_metadata.thing_classes = ["damage"]
38
+
39
+ def inference(image):
40
+ print(image.height)
41
+
42
+ height = image.height
43
+
44
+ # img = np.array(image.resize((500, height)))
45
+ img = np.array(image)
46
+ outputs = predictor(img)
47
+ v = Visualizer(img[:, :, ::-1],
48
+ metadata=my_metadata,
49
+ scale=0.5,
50
+ instance_mode=ColorMode.SEGMENTATION # remove the colors of unsegmented pixels. This option is only available for segmentation models
51
+ )
52
+ v = Visualizer(img,scale=1.2)
53
+ #print(outputs["instances"].to('cpu'))
54
+ out = v.draw_instance_predictions(outputs["instances"])
55
+
56
+ return out.get_image()[:, :, ::-1]
57
+
58
+ title = "Detectron2 Car Scratch Detection"
59
+ description = "This demo introduces an interactive playground for our trained Detectron2 model."
60
+
61
+ gr.Interface(
62
+ inference,
63
+ [gr.inputs.Image(type="pil", label="Input")],
64
+ gr.outputs.Image(type="numpy", label="Output"),
65
+ title=title,
66
+ description=description,
67
+ examples=[]).launch()