hysts HF staff commited on
Commit
aa47213
β€’
1 Parent(s): 7852ca7
Files changed (4) hide show
  1. README.md +1 -29
  2. app.py +34 -35
  3. requirements.txt +4 -4
  4. style.css +3 -0
README.md CHANGED
@@ -4,35 +4,7 @@ emoji: πŸ“Š
4
  colorFrom: gray
5
  colorTo: red
6
  sdk: gradio
7
- sdk_version: 3.19.1
8
  app_file: app.py
9
  pinned: false
10
  ---
11
-
12
- # Configuration
13
-
14
- `title`: _string_
15
- Display title for the Space
16
-
17
- `emoji`: _string_
18
- Space emoji (emoji-only character allowed)
19
-
20
- `colorFrom`: _string_
21
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
22
-
23
- `colorTo`: _string_
24
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
25
-
26
- `sdk`: _string_
27
- Can be either `gradio`, `streamlit`, or `static`
28
-
29
- `sdk_version` : _string_
30
- Only applicable for `streamlit` SDK.
31
- See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
32
-
33
- `app_file`: _string_
34
- Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code).
35
- Path is relative to the root of the repository.
36
-
37
- `pinned`: _boolean_
38
- Whether the Space stays on top of your list.
 
4
  colorFrom: gray
5
  colorTo: red
6
  sdk: gradio
7
+ sdk_version: 3.34.0
8
  app_file: app.py
9
  pinned: false
10
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py CHANGED
@@ -21,13 +21,9 @@ from models.yolo import Model
21
  from utils.datasets import letterbox
22
  from utils.general import non_max_suppression, scale_coords
23
 
24
- TITLE = 'zymk9/yolov5_anime'
25
- DESCRIPTION = 'This is an unofficial demo for https://github.com/zymk9/yolov5_anime.'
26
 
27
- HF_TOKEN = os.getenv('HF_TOKEN')
28
- MODEL_REPO = 'hysts/yolov5_anime'
29
- MODEL_FILENAME = 'yolov5x_anime.pth'
30
- CONFIG_FILENAME = 'yolov5x.yaml'
31
 
32
 
33
  def load_sample_image_paths() -> list[pathlib.Path]:
@@ -36,8 +32,7 @@ def load_sample_image_paths() -> list[pathlib.Path]:
36
  dataset_repo = 'hysts/sample-images-TADNE'
37
  path = huggingface_hub.hf_hub_download(dataset_repo,
38
  'images.tar.gz',
39
- repo_type='dataset',
40
- use_auth_token=HF_TOKEN)
41
  with tarfile.open(path) as f:
42
  f.extractall()
43
  return sorted(image_dir.glob('*'))
@@ -46,11 +41,8 @@ def load_sample_image_paths() -> list[pathlib.Path]:
46
  def load_model(device: torch.device) -> torch.nn.Module:
47
  torch.set_grad_enabled(False)
48
  model_path = huggingface_hub.hf_hub_download(MODEL_REPO,
49
- MODEL_FILENAME,
50
- use_auth_token=HF_TOKEN)
51
- config_path = huggingface_hub.hf_hub_download(MODEL_REPO,
52
- CONFIG_FILENAME,
53
- use_auth_token=HF_TOKEN)
54
  state_dict = torch.load(model_path)
55
  model = Model(cfg=config_path)
56
  model.load_state_dict(state_dict)
@@ -98,25 +90,32 @@ examples = [[path.as_posix(), 0.4, 0.5] for path in image_paths]
98
 
99
  device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
100
  model = load_model(device)
101
- func = functools.partial(predict, device=device, model=model)
102
-
103
- gr.Interface(
104
- fn=func,
105
- inputs=[
106
- gr.Image(label='Input', type='pil'),
107
- gr.Slider(label='Score Threshold',
108
- minimum=0,
109
- maximum=1,
110
- step=0.05,
111
- value=0.4),
112
- gr.Slider(label='IoU Threshold',
113
- minimum=0,
114
- maximum=1,
115
- step=0.05,
116
- value=0.5),
117
- ],
118
- outputs=gr.Image(label='Output'),
119
- examples=examples,
120
- title=TITLE,
121
- description=DESCRIPTION,
122
- ).queue().launch(show_api=False)
 
 
 
 
 
 
 
 
21
  from utils.datasets import letterbox
22
  from utils.general import non_max_suppression, scale_coords
23
 
24
+ DESCRIPTION = '# [zymk9/yolov5_anime](https://github.com/zymk9/yolov5_anime)'
 
25
 
26
+ MODEL_REPO = 'public-data/yolov5_anime'
 
 
 
27
 
28
 
29
  def load_sample_image_paths() -> list[pathlib.Path]:
 
32
  dataset_repo = 'hysts/sample-images-TADNE'
33
  path = huggingface_hub.hf_hub_download(dataset_repo,
34
  'images.tar.gz',
35
+ repo_type='dataset')
 
36
  with tarfile.open(path) as f:
37
  f.extractall()
38
  return sorted(image_dir.glob('*'))
 
41
  def load_model(device: torch.device) -> torch.nn.Module:
42
  torch.set_grad_enabled(False)
43
  model_path = huggingface_hub.hf_hub_download(MODEL_REPO,
44
+ 'yolov5x_anime.pth')
45
+ config_path = huggingface_hub.hf_hub_download(MODEL_REPO, 'yolov5x.yaml')
 
 
 
46
  state_dict = torch.load(model_path)
47
  model = Model(cfg=config_path)
48
  model.load_state_dict(state_dict)
 
90
 
91
  device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
92
  model = load_model(device)
93
+ fn = functools.partial(predict, device=device, model=model)
94
+
95
+ with gr.Blocks(css='style.css') as demo:
96
+ gr.Markdown(DESCRIPTION)
97
+ with gr.Row():
98
+ with gr.Column():
99
+ image = gr.Image(label='Input', type='pil')
100
+ score_threshold = gr.Slider(label='Score Threshold',
101
+ minimum=0,
102
+ maximum=1,
103
+ step=0.05,
104
+ value=0.4)
105
+ iou_threshold = gr.Slider(label='IoU Threshold',
106
+ minimum=0,
107
+ maximum=1,
108
+ step=0.05,
109
+ value=0.5)
110
+ run_button = gr.Button('Run')
111
+ with gr.Column():
112
+ result = gr.Image(label='Output')
113
+
114
+ inputs = [image, score_threshold, iou_threshold]
115
+ gr.Examples(examples=examples,
116
+ inputs=inputs,
117
+ outputs=result,
118
+ fn=fn,
119
+ cache_examples=os.getenv('CACHE_EXAMPLES') == '1')
120
+ run_button.click(fn=fn, inputs=inputs, outputs=result, api_name='predict')
121
+ demo.queue(max_size=15).launch()
requirements.txt CHANGED
@@ -1,4 +1,4 @@
1
- opencv-python-headless==4.5.5.62
2
- scipy>=1.7.3
3
- torch>=1.10.1
4
- torchvision>=0.11.2
 
1
+ opencv-python-headless==4.7.0.72
2
+ scipy==1.10.1
3
+ torch==2.0.1
4
+ torchvision==0.15.2
style.css ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ h1 {
2
+ text-align: center;
3
+ }