hiraltalsaniya commited on
Commit
59166a9
1 Parent(s): bc7577c

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +41 -0
  2. best.pt +3 -0
  3. requirements.txt +38 -0
app.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ from huggingface_hub import hf_hub_download
4
+ from PIL import Image
5
+
6
+ REPO_ID = "hiraltalsaniya/YOLOv7_face_mask"
7
+ FILENAME = "best.pt"
8
+
9
+
10
+ yolov7_custom_weights = hf_hub_download(repo_id=REPO_ID, filename=FILENAME,repo_type='space')
11
+
12
+ model = torch.hub.load('WongKinYiu/yolov7:main',model='custom', path_or_model=yolov7_custom_weights, force_reload=True)
13
+ def object_detection(im, size=416):
14
+ results = model(im)
15
+ results.render()
16
+ return Image.fromarray(results.imgs[0])
17
+
18
+ title = "Yolov7 Custom"
19
+
20
+ image = gr.inputs.Image(shape=(416, 416), image_mode="RGB", source="upload", label="Upload Image", optional=False)
21
+ outputs = gr.outputs.Image(type="pil", label="Output Image")
22
+
23
+ Custom_description="Custom Training Performed on colab style='text-decoration: underline' target='_blank'>Link</a> </center><br> <center>Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors </center> <br> <b>1st</b> class is for Person Detected<br><b>2nd</b> class is for Car Detected"
24
+
25
+ Footer = (
26
+ "MOdel train on our custome dataset")
27
+
28
+ examples1=[["Image1.jpeg"],["Image2.jpeg"],["Image3.jpeg"],["Image4.jpeg"],["Image5.jpeg"],["Image6.jpeg"],["horses.jpeg"],["horses.jpeg"]]
29
+
30
+ Top_Title="<center>Yolov7 🚀 Custom Trained style='text-decoration: underline' target='_blank'></center></a>Face with mask and face without mask Detection"
31
+ css = ".output-image, .input-image {height: 50rem !important; width: 100% !important;}"
32
+ css = ".image-preview {height: auto !important;}"
33
+
34
+ gr.Interface(
35
+ fn=object_detection,
36
+ inputs=image,
37
+ outputs=outputs,
38
+ title=Top_Title,
39
+ description=Custom_description,
40
+ article=Footer,
41
+ examples=[["mask-person-2.jpg"], ["mask-person-2.jpg"]]).launch()
best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ddf943735392f1b54a13c2774267e8f55205aecba367540dce87df21635a7c1
3
+ size 12217893
requirements.txt ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #Yolov7 WongKinYiu Requirements
2
+
3
+ # Usage: pip install -r requirements.txt
4
+
5
+ # Base ----------------------------------------
6
+ matplotlib>=3.2.2
7
+ numpy>=1.18.5
8
+ opencv-python>=4.1.1
9
+ Pillow>=7.1.2
10
+ PyYAML>=5.3.1
11
+ requests>=2.23.0
12
+ scipy>=1.4.1
13
+ torch>=1.7.0,!=1.12.0
14
+ torchvision>=0.8.1,!=0.13.0
15
+ tqdm>=4.41.0
16
+ protobuf<4.21.3
17
+
18
+ # Logging -------------------------------------
19
+ tensorboard>=2.4.1
20
+ # wandb
21
+
22
+ # Plotting ------------------------------------
23
+ pandas>=1.1.4
24
+ seaborn>=0.11.0
25
+
26
+ # Export --------------------------------------
27
+ # coremltools>=4.1 # CoreML export
28
+ # onnx>=1.9.0 # ONNX export
29
+ # onnx-simplifier>=0.3.6 # ONNX simplifier
30
+ # scikit-learn==0.19.2 # CoreML quantization
31
+ # tensorflow>=2.4.1 # TFLite export
32
+ # tensorflowjs>=3.9.0 # TF.js export
33
+ # openvino-dev # OpenVINO export
34
+
35
+ # Extras --------------------------------------
36
+ ipython # interactive notebook
37
+ psutil # system utilization
38
+ thop # FLOPs computation