Atualli kadirnar commited on
Commit
1152ac9
β€’
0 Parent(s):

Duplicate from kadirnar/yolov7

Browse files

Co-authored-by: Kadir Nar <kadirnar@users.noreply.huggingface.co>

Files changed (4) hide show
  1. .gitattributes +34 -0
  2. README.md +16 -0
  3. app.py +64 -0
  4. requirements.txt +2 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Yolov7
3
+ emoji: πŸ“ˆ
4
+ colorFrom: gray
5
+ colorTo: indigo
6
+ sdk: gradio
7
+ sdk_version: 3.14.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: gpl-3.0
11
+ tags:
12
+ - making-demos
13
+ duplicated_from: kadirnar/yolov7
14
+ ---
15
+
16
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import yolov7
4
+
5
+
6
+ # Images
7
+ torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg', 'zidane.jpg')
8
+ torch.hub.download_url_to_file('https://raw.githubusercontent.com/obss/sahi/main/tests/data/small-vehicles1.jpeg', 'small-vehicles1.jpeg')
9
+
10
+ def yolov7_inference(
11
+ image: gr.inputs.Image = None,
12
+ model_path: gr.inputs.Dropdown = None,
13
+ image_size: gr.inputs.Slider = 640,
14
+ conf_threshold: gr.inputs.Slider = 0.25,
15
+ iou_threshold: gr.inputs.Slider = 0.45,
16
+ ):
17
+ """
18
+ YOLOv7 inference function
19
+ Args:
20
+ image: Input image
21
+ model_path: Path to the model
22
+ image_size: Image size
23
+ conf_threshold: Confidence threshold
24
+ iou_threshold: IOU threshold
25
+ Returns:
26
+ Rendered image
27
+ """
28
+
29
+ model = yolov7.load(model_path, device="cpu", hf_model=True, trace=False)
30
+ model.conf = conf_threshold
31
+ model.iou = iou_threshold
32
+ results = model([image], size=image_size)
33
+ return results.render()[0]
34
+
35
+
36
+ inputs = [
37
+ gr.inputs.Image(type="pil", label="Input Image"),
38
+ gr.inputs.Dropdown(
39
+ choices=[
40
+ "kadirnar/yolov7-tiny-v0.1",
41
+ "kadirnar/yolov7-v0.1",
42
+ ],
43
+ default="kadirnar/yolov7-tiny-v0.1",
44
+ label="Model",
45
+ ),
46
+ gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size"),
47
+ gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
48
+ gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold"),
49
+ ]
50
+
51
+ outputs = gr.outputs.Image(type="filepath", label="Output Image")
52
+ title = "Yolov7: Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors"
53
+
54
+ examples = [['small-vehicles1.jpeg', 'kadirnar/yolov7-tiny-v0.1', 640, 0.25, 0.45], ['zidane.jpg', 'kadirnar/yolov7-v0.1', 640, 0.25, 0.45]]
55
+ demo_app = gr.Interface(
56
+ fn=yolov7_inference,
57
+ inputs=inputs,
58
+ outputs=outputs,
59
+ title=title,
60
+ examples=examples,
61
+ cache_examples=True,
62
+ theme='huggingface',
63
+ )
64
+ demo_app.launch(debug=True, enable_queue=True)
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ torch
2
+ yolov7detect