it@M InnovationLab commited on
Commit
8529105
1 Parent(s): 0c5cb38

Fix errors in demo.

Browse files
Files changed (2) hide show
  1. app.py +6 -2
  2. requirements.txt +3 -1
app.py CHANGED
@@ -3,9 +3,12 @@ import yolov5
3
  import numpy as np
4
  from PIL import Image, ImageDraw, ImageFilter
5
  from transformers import OneFormerProcessor, OneFormerForUniversalSegmentation
 
 
6
 
7
  person_processor = OneFormerProcessor.from_pretrained("shi-labs/oneformer_cityscapes_swin_large")
8
  person_model = OneFormerForUniversalSegmentation.from_pretrained("shi-labs/oneformer_cityscapes_swin_large")
 
9
 
10
  lp_model = yolov5.load('keremberke/yolov5m-license-plate')
11
  lp_model.conf = 0.25 # NMS confidence threshold
@@ -68,5 +71,6 @@ def test_gradio(image):
68
  anonymized = Image.composite(image, blurred, mask)
69
  return anonymized
70
 
71
- interface = gr.Interface(fn=test_gradio, inputs=gr.Image(type="pil"), outputs="image")
72
- interface.launch(share=True)
 
 
3
  import numpy as np
4
  from PIL import Image, ImageDraw, ImageFilter
5
  from transformers import OneFormerProcessor, OneFormerForUniversalSegmentation
6
+ import torchvision.transforms
7
+ import torch
8
 
9
  person_processor = OneFormerProcessor.from_pretrained("shi-labs/oneformer_cityscapes_swin_large")
10
  person_model = OneFormerForUniversalSegmentation.from_pretrained("shi-labs/oneformer_cityscapes_swin_large")
11
+ transform = torchvision.transforms.ToPILImage()
12
 
13
  lp_model = yolov5.load('keremberke/yolov5m-license-plate')
14
  lp_model.conf = 0.25 # NMS confidence threshold
 
71
  anonymized = Image.composite(image, blurred, mask)
72
  return anonymized
73
 
74
+ demo = gr.Interface(fn=test_gradio, inputs=gr.Image(type="pil"), outputs=gr.Image(type="pil"))
75
+ demo.launch()
76
+ #demo.launch(server_name="localhost", server_port=8080)
requirements.txt CHANGED
@@ -1,4 +1,6 @@
1
  yolov5
2
  transformers
3
  Pillow
4
- numpy
 
 
 
1
  yolov5
2
  transformers
3
  Pillow
4
+ numpy
5
+ torch
6
+ torchvision