hysts HF staff commited on
Commit
b8ce03a
1 Parent(s): bbe49e5
Files changed (7) hide show
  1. .pre-commit-config.yaml +59 -35
  2. .style.yapf +0 -5
  3. .vscode/settings.json +23 -11
  4. README.md +1 -1
  5. app.py +31 -44
  6. requirements.txt +1 -2
  7. style.css +8 -0
.pre-commit-config.yaml CHANGED
@@ -1,36 +1,60 @@
1
  repos:
2
- - repo: https://github.com/pre-commit/pre-commit-hooks
3
- rev: v4.2.0
4
- hooks:
5
- - id: check-executables-have-shebangs
6
- - id: check-json
7
- - id: check-merge-conflict
8
- - id: check-shebang-scripts-are-executable
9
- - id: check-toml
10
- - id: check-yaml
11
- - id: double-quote-string-fixer
12
- - id: end-of-file-fixer
13
- - id: mixed-line-ending
14
- args: ['--fix=lf']
15
- - id: requirements-txt-fixer
16
- - id: trailing-whitespace
17
- - repo: https://github.com/myint/docformatter
18
- rev: v1.4
19
- hooks:
20
- - id: docformatter
21
- args: ['--in-place']
22
- - repo: https://github.com/pycqa/isort
23
- rev: 5.12.0
24
- hooks:
25
- - id: isort
26
- - repo: https://github.com/pre-commit/mirrors-mypy
27
- rev: v0.991
28
- hooks:
29
- - id: mypy
30
- args: ['--ignore-missing-imports']
31
- additional_dependencies: ['types-python-slugify']
32
- - repo: https://github.com/google/yapf
33
- rev: v0.32.0
34
- hooks:
35
- - id: yapf
36
- args: ['--parallel', '--in-place']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  repos:
2
+ - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v4.5.0
4
+ hooks:
5
+ - id: check-executables-have-shebangs
6
+ - id: check-json
7
+ - id: check-merge-conflict
8
+ - id: check-shebang-scripts-are-executable
9
+ - id: check-toml
10
+ - id: check-yaml
11
+ - id: end-of-file-fixer
12
+ - id: mixed-line-ending
13
+ args: ["--fix=lf"]
14
+ - id: requirements-txt-fixer
15
+ - id: trailing-whitespace
16
+ - repo: https://github.com/myint/docformatter
17
+ rev: v1.7.5
18
+ hooks:
19
+ - id: docformatter
20
+ args: ["--in-place"]
21
+ - repo: https://github.com/pycqa/isort
22
+ rev: 5.13.2
23
+ hooks:
24
+ - id: isort
25
+ args: ["--profile", "black"]
26
+ - repo: https://github.com/pre-commit/mirrors-mypy
27
+ rev: v1.8.0
28
+ hooks:
29
+ - id: mypy
30
+ args: ["--ignore-missing-imports"]
31
+ additional_dependencies:
32
+ [
33
+ "types-python-slugify",
34
+ "types-requests",
35
+ "types-PyYAML",
36
+ "types-pytz",
37
+ ]
38
+ - repo: https://github.com/psf/black
39
+ rev: 24.2.0
40
+ hooks:
41
+ - id: black
42
+ language_version: python3.10
43
+ args: ["--line-length", "119"]
44
+ - repo: https://github.com/kynan/nbstripout
45
+ rev: 0.7.1
46
+ hooks:
47
+ - id: nbstripout
48
+ args:
49
+ [
50
+ "--extra-keys",
51
+ "metadata.interpreter metadata.kernelspec cell.metadata.pycharm",
52
+ ]
53
+ - repo: https://github.com/nbQA-dev/nbQA
54
+ rev: 1.7.1
55
+ hooks:
56
+ - id: nbqa-black
57
+ - id: nbqa-pyupgrade
58
+ args: ["--py37-plus"]
59
+ - id: nbqa-isort
60
+ args: ["--float-to-top"]
.style.yapf DELETED
@@ -1,5 +0,0 @@
1
- [style]
2
- based_on_style = pep8
3
- blank_line_before_nested_class_or_def = false
4
- spaces_before_comment = 2
5
- split_before_logical_operator = true
 
 
 
 
 
 
.vscode/settings.json CHANGED
@@ -1,18 +1,30 @@
1
  {
2
- "python.linting.enabled": true,
3
- "python.linting.flake8Enabled": true,
4
- "python.linting.pylintEnabled": false,
5
- "python.linting.lintOnSave": true,
6
- "python.formatting.provider": "yapf",
7
- "python.formatting.yapfArgs": [
8
- "--style={based_on_style: pep8, indent_width: 4, blank_line_before_nested_class_or_def: false, spaces_before_comment: 2, split_before_logical_operator: true}"
9
- ],
10
  "[python]": {
 
11
  "editor.formatOnType": true,
12
  "editor.codeActionsOnSave": {
13
- "source.organizeImports": true
14
  }
15
  },
16
- "editor.formatOnSave": true,
17
- "files.insertFinalNewline": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  }
 
1
  {
2
+ "editor.formatOnSave": true,
3
+ "files.insertFinalNewline": false,
 
 
 
 
 
 
4
  "[python]": {
5
+ "editor.defaultFormatter": "ms-python.black-formatter",
6
  "editor.formatOnType": true,
7
  "editor.codeActionsOnSave": {
8
+ "source.organizeImports": "explicit"
9
  }
10
  },
11
+ "[jupyter]": {
12
+ "files.insertFinalNewline": false
13
+ },
14
+ "black-formatter.args": [
15
+ "--line-length=119"
16
+ ],
17
+ "isort.args": ["--profile", "black"],
18
+ "flake8.args": [
19
+ "--max-line-length=119"
20
+ ],
21
+ "ruff.lint.args": [
22
+ "--line-length=119"
23
+ ],
24
+ "notebook.output.scrolling": true,
25
+ "notebook.formatOnCellExecution": true,
26
+ "notebook.formatOnSave.enabled": true,
27
+ "notebook.codeActionsOnSave": {
28
+ "source.organizeImports": "explicit"
29
+ }
30
  }
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🏃
4
  colorFrom: gray
5
  colorTo: purple
6
  sdk: gradio
7
- sdk_version: 3.37.0
8
  app_file: app.py
9
  pinned: false
10
  ---
 
4
  colorFrom: gray
5
  colorTo: purple
6
  sdk: gradio
7
+ sdk_version: 4.19.2
8
  app_file: app.py
9
  pinned: false
10
  ---
app.py CHANGED
@@ -13,31 +13,26 @@ import numpy as np
13
  import PIL.Image
14
  import tensorflow as tf
15
 
16
- DESCRIPTION = '# [KichangKim/DeepDanbooru](https://github.com/KichangKim/DeepDanbooru)'
17
 
18
 
19
  def load_sample_image_paths() -> list[pathlib.Path]:
20
- image_dir = pathlib.Path('images')
21
  if not image_dir.exists():
22
- path = huggingface_hub.hf_hub_download(
23
- 'public-data/sample-images-TADNE',
24
- 'images.tar.gz',
25
- repo_type='dataset')
26
  with tarfile.open(path) as f:
27
  f.extractall()
28
- return sorted(image_dir.glob('*'))
29
 
30
 
31
  def load_model() -> tf.keras.Model:
32
- path = huggingface_hub.hf_hub_download('public-data/DeepDanbooru',
33
- 'model-resnet_custom_v3.h5')
34
  model = tf.keras.models.load_model(path)
35
  return model
36
 
37
 
38
  def load_labels() -> list[str]:
39
- path = huggingface_hub.hf_hub_download('public-data/DeepDanbooru',
40
- 'tags.txt')
41
  with open(path) as f:
42
  labels = [line.strip() for line in f.readlines()]
43
  return labels
@@ -47,18 +42,13 @@ model = load_model()
47
  labels = load_labels()
48
 
49
 
50
- def predict(
51
- image: PIL.Image.Image, score_threshold: float
52
- ) -> tuple[dict[str, float], dict[str, float], str]:
53
  _, height, width, _ = model.input_shape
54
  image = np.asarray(image)
55
- image = tf.image.resize(image,
56
- size=(height, width),
57
- method=tf.image.ResizeMethod.AREA,
58
- preserve_aspect_ratio=True)
59
  image = image.numpy()
60
  image = dd.image.transform_and_pad_image(image, width, height)
61
- image = image / 255.
62
  probs = model.predict(image[None, ...])[0]
63
  probs = probs.astype(float)
64
 
@@ -72,45 +62,42 @@ def predict(
72
  if prob < score_threshold:
73
  break
74
  result_threshold[label] = prob
75
- result_text = ', '.join(result_all.keys())
76
  return result_threshold, result_all, result_text
77
 
78
 
79
  image_paths = load_sample_image_paths()
80
  examples = [[path.as_posix(), 0.5] for path in image_paths]
81
 
82
- with gr.Blocks(css='style.css') as demo:
83
  gr.Markdown(DESCRIPTION)
84
  with gr.Row():
85
  with gr.Column():
86
- image = gr.Image(label='Input', type='pil')
87
- score_threshold = gr.Slider(label='Score threshold',
88
- minimum=0,
89
- maximum=1,
90
- step=0.05,
91
- value=0.5)
92
- run_button = gr.Button('Run')
93
  with gr.Column():
94
  with gr.Tabs():
95
- with gr.Tab(label='Output'):
96
- result = gr.Label(label='Output', show_label=False)
97
- with gr.Tab(label='JSON'):
98
- result_json = gr.JSON(label='JSON output',
99
- show_label=False)
100
- with gr.Tab(label='Text'):
101
- result_text = gr.Text(label='Text output',
102
- show_label=False,
103
- lines=5)
104
- gr.Examples(examples=examples,
105
- inputs=[image, score_threshold],
106
- outputs=[result, result_json, result_text],
107
- fn=predict,
108
- cache_examples=os.getenv('CACHE_EXAMPLES') == '1')
109
 
110
  run_button.click(
111
  fn=predict,
112
  inputs=[image, score_threshold],
113
  outputs=[result, result_json, result_text],
114
- api_name='predict',
115
  )
116
- demo.queue().launch()
 
 
 
13
  import PIL.Image
14
  import tensorflow as tf
15
 
16
+ DESCRIPTION = "# [KichangKim/DeepDanbooru](https://github.com/KichangKim/DeepDanbooru)"
17
 
18
 
19
  def load_sample_image_paths() -> list[pathlib.Path]:
20
+ image_dir = pathlib.Path("images")
21
  if not image_dir.exists():
22
+ path = huggingface_hub.hf_hub_download("public-data/sample-images-TADNE", "images.tar.gz", repo_type="dataset")
 
 
 
23
  with tarfile.open(path) as f:
24
  f.extractall()
25
+ return sorted(image_dir.glob("*"))
26
 
27
 
28
  def load_model() -> tf.keras.Model:
29
+ path = huggingface_hub.hf_hub_download("public-data/DeepDanbooru", "model-resnet_custom_v3.h5")
 
30
  model = tf.keras.models.load_model(path)
31
  return model
32
 
33
 
34
  def load_labels() -> list[str]:
35
+ path = huggingface_hub.hf_hub_download("public-data/DeepDanbooru", "tags.txt")
 
36
  with open(path) as f:
37
  labels = [line.strip() for line in f.readlines()]
38
  return labels
 
42
  labels = load_labels()
43
 
44
 
45
+ def predict(image: PIL.Image.Image, score_threshold: float) -> tuple[dict[str, float], dict[str, float], str]:
 
 
46
  _, height, width, _ = model.input_shape
47
  image = np.asarray(image)
48
+ image = tf.image.resize(image, size=(height, width), method=tf.image.ResizeMethod.AREA, preserve_aspect_ratio=True)
 
 
 
49
  image = image.numpy()
50
  image = dd.image.transform_and_pad_image(image, width, height)
51
+ image = image / 255.0
52
  probs = model.predict(image[None, ...])[0]
53
  probs = probs.astype(float)
54
 
 
62
  if prob < score_threshold:
63
  break
64
  result_threshold[label] = prob
65
+ result_text = ", ".join(result_all.keys())
66
  return result_threshold, result_all, result_text
67
 
68
 
69
  image_paths = load_sample_image_paths()
70
  examples = [[path.as_posix(), 0.5] for path in image_paths]
71
 
72
+ with gr.Blocks(css="style.css") as demo:
73
  gr.Markdown(DESCRIPTION)
74
  with gr.Row():
75
  with gr.Column():
76
+ image = gr.Image(label="Input", type="pil")
77
+ score_threshold = gr.Slider(label="Score threshold", minimum=0, maximum=1, step=0.05, value=0.5)
78
+ run_button = gr.Button("Run")
 
 
 
 
79
  with gr.Column():
80
  with gr.Tabs():
81
+ with gr.Tab(label="Output"):
82
+ result = gr.Label(label="Output", show_label=False)
83
+ with gr.Tab(label="JSON"):
84
+ result_json = gr.JSON(label="JSON output", show_label=False)
85
+ with gr.Tab(label="Text"):
86
+ result_text = gr.Text(label="Text output", show_label=False, lines=5)
87
+ gr.Examples(
88
+ examples=examples,
89
+ inputs=[image, score_threshold],
90
+ outputs=[result, result_json, result_text],
91
+ fn=predict,
92
+ cache_examples=os.getenv("CACHE_EXAMPLES") == "1",
93
+ )
 
94
 
95
  run_button.click(
96
  fn=predict,
97
  inputs=[image, score_threshold],
98
  outputs=[result, result_json, result_text],
99
+ api_name="predict",
100
  )
101
+
102
+ if __name__ == "__main__":
103
+ demo.queue(max_size=20).launch()
requirements.txt CHANGED
@@ -1,4 +1,3 @@
1
  git+https://github.com/KichangKim/DeepDanbooru@v3-20200915-sgd-e30#egg=deepdanbooru
2
- pillow==10.0.0
3
- pydantic==1.10.11
4
  tensorflow==2.13.0
 
1
  git+https://github.com/KichangKim/DeepDanbooru@v3-20200915-sgd-e30#egg=deepdanbooru
2
+ pillow==10.2.0
 
3
  tensorflow==2.13.0
style.css CHANGED
@@ -1,3 +1,11 @@
1
  h1 {
2
  text-align: center;
 
 
 
 
 
 
 
 
3
  }
 
1
  h1 {
2
  text-align: center;
3
+ display: block;
4
+ }
5
+
6
+ #duplicate-button {
7
+ margin: auto;
8
+ color: #fff;
9
+ background: #1565c0;
10
+ border-radius: 100vh;
11
  }