hysts HF staff commited on
Commit
5721855
1 Parent(s): 4a63b72
Files changed (8) hide show
  1. .pre-commit-config.yaml +59 -35
  2. .style.yapf +0 -5
  3. .vscode/settings.json +30 -0
  4. README.md +1 -1
  5. app.py +29 -37
  6. model.py +15 -25
  7. requirements.txt +3 -3
  8. style.css +6 -6
.pre-commit-config.yaml CHANGED
@@ -1,36 +1,60 @@
1
- exclude: ^StyleSwin
2
  repos:
3
- - repo: https://github.com/pre-commit/pre-commit-hooks
4
- rev: v4.2.0
5
- hooks:
6
- - id: check-executables-have-shebangs
7
- - id: check-json
8
- - id: check-merge-conflict
9
- - id: check-shebang-scripts-are-executable
10
- - id: check-toml
11
- - id: check-yaml
12
- - id: double-quote-string-fixer
13
- - id: end-of-file-fixer
14
- - id: mixed-line-ending
15
- args: ['--fix=lf']
16
- - id: requirements-txt-fixer
17
- - id: trailing-whitespace
18
- - repo: https://github.com/myint/docformatter
19
- rev: v1.4
20
- hooks:
21
- - id: docformatter
22
- args: ['--in-place']
23
- - repo: https://github.com/pycqa/isort
24
- rev: 5.12.0
25
- hooks:
26
- - id: isort
27
- - repo: https://github.com/pre-commit/mirrors-mypy
28
- rev: v0.991
29
- hooks:
30
- - id: mypy
31
- args: ['--ignore-missing-imports']
32
- - repo: https://github.com/google/yapf
33
- rev: v0.32.0
34
- hooks:
35
- - id: yapf
36
- args: ['--parallel', '--in-place']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  repos:
2
+ - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v4.5.0
4
+ hooks:
5
+ - id: check-executables-have-shebangs
6
+ - id: check-json
7
+ - id: check-merge-conflict
8
+ - id: check-shebang-scripts-are-executable
9
+ - id: check-toml
10
+ - id: check-yaml
11
+ - id: end-of-file-fixer
12
+ - id: mixed-line-ending
13
+ args: ["--fix=lf"]
14
+ - id: requirements-txt-fixer
15
+ - id: trailing-whitespace
16
+ - repo: https://github.com/myint/docformatter
17
+ rev: v1.7.5
18
+ hooks:
19
+ - id: docformatter
20
+ args: ["--in-place"]
21
+ - repo: https://github.com/pycqa/isort
22
+ rev: 5.13.2
23
+ hooks:
24
+ - id: isort
25
+ args: ["--profile", "black"]
26
+ - repo: https://github.com/pre-commit/mirrors-mypy
27
+ rev: v1.8.0
28
+ hooks:
29
+ - id: mypy
30
+ args: ["--ignore-missing-imports"]
31
+ additional_dependencies:
32
+ [
33
+ "types-python-slugify",
34
+ "types-requests",
35
+ "types-PyYAML",
36
+ "types-pytz",
37
+ ]
38
+ - repo: https://github.com/psf/black
39
+ rev: 24.2.0
40
+ hooks:
41
+ - id: black
42
+ language_version: python3.10
43
+ args: ["--line-length", "119"]
44
+ - repo: https://github.com/kynan/nbstripout
45
+ rev: 0.7.1
46
+ hooks:
47
+ - id: nbstripout
48
+ args:
49
+ [
50
+ "--extra-keys",
51
+ "metadata.interpreter metadata.kernelspec cell.metadata.pycharm",
52
+ ]
53
+ - repo: https://github.com/nbQA-dev/nbQA
54
+ rev: 1.7.1
55
+ hooks:
56
+ - id: nbqa-black
57
+ - id: nbqa-pyupgrade
58
+ args: ["--py37-plus"]
59
+ - id: nbqa-isort
60
+ args: ["--float-to-top"]
.style.yapf DELETED
@@ -1,5 +0,0 @@
1
- [style]
2
- based_on_style = pep8
3
- blank_line_before_nested_class_or_def = false
4
- spaces_before_comment = 2
5
- split_before_logical_operator = true
 
 
 
 
 
 
.vscode/settings.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "editor.formatOnSave": true,
3
+ "files.insertFinalNewline": false,
4
+ "[python]": {
5
+ "editor.defaultFormatter": "ms-python.black-formatter",
6
+ "editor.formatOnType": true,
7
+ "editor.codeActionsOnSave": {
8
+ "source.organizeImports": "explicit"
9
+ }
10
+ },
11
+ "[jupyter]": {
12
+ "files.insertFinalNewline": false
13
+ },
14
+ "black-formatter.args": [
15
+ "--line-length=119"
16
+ ],
17
+ "isort.args": ["--profile", "black"],
18
+ "flake8.args": [
19
+ "--max-line-length=119"
20
+ ],
21
+ "ruff.lint.args": [
22
+ "--line-length=119"
23
+ ],
24
+ "notebook.output.scrolling": true,
25
+ "notebook.formatOnCellExecution": true,
26
+ "notebook.formatOnSave.enabled": true,
27
+ "notebook.codeActionsOnSave": {
28
+ "source.organizeImports": "explicit"
29
+ }
30
+ }
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 👁
4
  colorFrom: blue
5
  colorTo: red
6
  sdk: gradio
7
- sdk_version: 3.36.1
8
  app_file: app.py
9
  pinned: false
10
  suggested_hardware: t4-small
 
4
  colorFrom: blue
5
  colorTo: red
6
  sdk: gradio
7
+ sdk_version: 4.19.2
8
  app_file: app.py
9
  pinned: false
10
  suggested_hardware: t4-small
app.py CHANGED
@@ -7,71 +7,63 @@ import numpy as np
7
 
8
  from model import Model
9
 
10
- DESCRIPTION = '# [StyleSwin](https://github.com/microsoft/StyleSwin)'
11
 
12
 
13
  def get_sample_image_url(name: str) -> str:
14
- sample_image_dir = 'https://huggingface.co/spaces/hysts/StyleSwin/resolve/main/samples'
15
- return f'{sample_image_dir}/{name}.jpg'
16
 
17
 
18
  def get_sample_image_markdown(name: str) -> str:
19
  url = get_sample_image_url(name)
20
- if name == 'celeba-hq':
21
  size = 1024
22
- elif name == 'ffhq':
23
  size = 1024
24
- elif name == 'lsun-church':
25
  size = 256
26
  else:
27
  raise ValueError
28
- seed = '0-99'
29
- return f'''
30
  - size: {size}x{size}
31
  - seed: {seed}
32
- ![sample images]({url})'''
33
 
34
 
35
  model = Model()
36
 
37
- with gr.Blocks(css='style.css') as demo:
38
  gr.Markdown(DESCRIPTION)
39
 
40
  with gr.Tabs():
41
- with gr.TabItem('App'):
42
  with gr.Row():
43
  with gr.Column():
44
- model_name = gr.Dropdown(model.MODEL_NAMES,
45
- value=model.MODEL_NAMES[3],
46
- label='Model')
47
- seed = gr.Slider(0,
48
- np.iinfo(np.uint32).max,
49
- step=1,
50
- value=0,
51
- label='Seed')
52
- run_button = gr.Button('Run')
53
  with gr.Column():
54
- result = gr.Image(label='Result', elem_id='result')
55
 
56
- with gr.TabItem('Sample Images'):
57
  with gr.Row():
58
- model_name2 = gr.Dropdown([
59
- 'celeba-hq',
60
- 'ffhq',
61
- 'lsun-church',
62
- ],
63
- value='celeba-hq',
64
- label='Model')
 
 
65
  with gr.Row():
66
  text = get_sample_image_markdown(model_name2.value)
67
  sample_images = gr.Markdown(text)
68
 
69
- run_button.click(fn=model.set_model_and_generate_image,
70
- inputs=[model_name, seed],
71
- outputs=result,
72
- api_name='run')
73
- model_name2.change(fn=get_sample_image_markdown,
74
- inputs=model_name2,
75
- outputs=sample_images)
76
 
77
- demo.queue(max_size=15).launch()
 
 
7
 
8
  from model import Model
9
 
10
+ DESCRIPTION = "# [StyleSwin](https://github.com/microsoft/StyleSwin)"
11
 
12
 
13
  def get_sample_image_url(name: str) -> str:
14
+ sample_image_dir = "https://huggingface.co/spaces/hysts/StyleSwin/resolve/main/samples"
15
+ return f"{sample_image_dir}/{name}.jpg"
16
 
17
 
18
  def get_sample_image_markdown(name: str) -> str:
19
  url = get_sample_image_url(name)
20
+ if name == "celeba-hq":
21
  size = 1024
22
+ elif name == "ffhq":
23
  size = 1024
24
+ elif name == "lsun-church":
25
  size = 256
26
  else:
27
  raise ValueError
28
+ seed = "0-99"
29
+ return f"""
30
  - size: {size}x{size}
31
  - seed: {seed}
32
+ ![sample images]({url})"""
33
 
34
 
35
  model = Model()
36
 
37
+ with gr.Blocks(css="style.css") as demo:
38
  gr.Markdown(DESCRIPTION)
39
 
40
  with gr.Tabs():
41
+ with gr.TabItem("App"):
42
  with gr.Row():
43
  with gr.Column():
44
+ model_name = gr.Dropdown(model.MODEL_NAMES, value=model.MODEL_NAMES[3], label="Model")
45
+ seed = gr.Slider(0, np.iinfo(np.uint32).max, step=1, value=0, label="Seed")
46
+ run_button = gr.Button("Run")
 
 
 
 
 
 
47
  with gr.Column():
48
+ result = gr.Image(label="Result", elem_id="result")
49
 
50
+ with gr.TabItem("Sample Images"):
51
  with gr.Row():
52
+ model_name2 = gr.Dropdown(
53
+ [
54
+ "celeba-hq",
55
+ "ffhq",
56
+ "lsun-church",
57
+ ],
58
+ value="celeba-hq",
59
+ label="Model",
60
+ )
61
  with gr.Row():
62
  text = get_sample_image_markdown(model_name2.value)
63
  sample_images = gr.Markdown(text)
64
 
65
+ run_button.click(fn=model.set_model_and_generate_image, inputs=[model_name, seed], outputs=result, api_name="run")
66
+ model_name2.change(fn=get_sample_image_markdown, inputs=model_name2, outputs=sample_images)
 
 
 
 
 
67
 
68
+ if __name__ == "__main__":
69
+ demo.queue(max_size=15).launch()
model.py CHANGED
@@ -9,12 +9,12 @@ import numpy as np
9
  import torch
10
  import torch.nn as nn
11
 
12
- if os.getenv('SYSTEM') == 'spaces':
13
  os.system("sed -i '14,21d' StyleSwin/op/fused_act.py")
14
  os.system("sed -i '12,19d' StyleSwin/op/upfirdn2d.py")
15
 
16
  current_dir = pathlib.Path(__file__).parent
17
- submodule_dir = current_dir / 'StyleSwin'
18
  sys.path.insert(0, submodule_dir.as_posix())
19
 
20
  from models.generator import Generator
@@ -22,38 +22,29 @@ from models.generator import Generator
22
 
23
  class Model:
24
  MODEL_NAMES = [
25
- 'CelebAHQ_256',
26
- 'FFHQ_256',
27
- 'LSUNChurch_256',
28
- 'CelebAHQ_1024',
29
- 'FFHQ_1024',
30
  ]
31
 
32
  def __init__(self):
33
- self.device = torch.device(
34
- 'cuda:0' if torch.cuda.is_available() else 'cpu')
35
  self._download_all_models()
36
  self.model_name = self.MODEL_NAMES[3]
37
  self.model = self._load_model(self.model_name)
38
 
39
- self.std = torch.FloatTensor([0.229, 0.224,
40
- 0.225])[None, :, None,
41
- None].to(self.device)
42
- self.mean = torch.FloatTensor([0.485, 0.456,
43
- 0.406])[None, :, None,
44
- None].to(self.device)
45
 
46
  def _load_model(self, model_name: str) -> nn.Module:
47
- size = int(model_name.split('_')[1])
48
  channel_multiplier = 1 if size == 1024 else 2
49
- model = Generator(size,
50
- style_dim=512,
51
- n_mlp=8,
52
- channel_multiplier=channel_multiplier)
53
- ckpt_path = huggingface_hub.hf_hub_download('public-data/StyleSwin',
54
- f'models/{model_name}.pt')
55
  ckpt = torch.load(ckpt_path)
56
- model.load_state_dict(ckpt['g_ema'])
57
  model.to(self.device)
58
  model.eval()
59
  return model
@@ -86,7 +77,6 @@ class Model:
86
  out = self.postprocess(out)
87
  return out[0]
88
 
89
- def set_model_and_generate_image(self, model_name: str,
90
- seed: int) -> np.ndarray:
91
  self.set_model(model_name)
92
  return self.generate_image(seed)
 
9
  import torch
10
  import torch.nn as nn
11
 
12
+ if os.getenv("SYSTEM") == "spaces":
13
  os.system("sed -i '14,21d' StyleSwin/op/fused_act.py")
14
  os.system("sed -i '12,19d' StyleSwin/op/upfirdn2d.py")
15
 
16
  current_dir = pathlib.Path(__file__).parent
17
+ submodule_dir = current_dir / "StyleSwin"
18
  sys.path.insert(0, submodule_dir.as_posix())
19
 
20
  from models.generator import Generator
 
22
 
23
  class Model:
24
  MODEL_NAMES = [
25
+ "CelebAHQ_256",
26
+ "FFHQ_256",
27
+ "LSUNChurch_256",
28
+ "CelebAHQ_1024",
29
+ "FFHQ_1024",
30
  ]
31
 
32
  def __init__(self):
33
+ self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
 
34
  self._download_all_models()
35
  self.model_name = self.MODEL_NAMES[3]
36
  self.model = self._load_model(self.model_name)
37
 
38
+ self.std = torch.FloatTensor([0.229, 0.224, 0.225])[None, :, None, None].to(self.device)
39
+ self.mean = torch.FloatTensor([0.485, 0.456, 0.406])[None, :, None, None].to(self.device)
 
 
 
 
40
 
41
  def _load_model(self, model_name: str) -> nn.Module:
42
+ size = int(model_name.split("_")[1])
43
  channel_multiplier = 1 if size == 1024 else 2
44
+ model = Generator(size, style_dim=512, n_mlp=8, channel_multiplier=channel_multiplier)
45
+ ckpt_path = huggingface_hub.hf_hub_download("public-data/StyleSwin", f"models/{model_name}.pt")
 
 
 
 
46
  ckpt = torch.load(ckpt_path)
47
+ model.load_state_dict(ckpt["g_ema"])
48
  model.to(self.device)
49
  model.eval()
50
  return model
 
77
  out = self.postprocess(out)
78
  return out[0]
79
 
80
+ def set_model_and_generate_image(self, model_name: str, seed: int) -> np.ndarray:
 
81
  self.set_model(model_name)
82
  return self.generate_image(seed)
requirements.txt CHANGED
@@ -1,5 +1,5 @@
1
- numpy==1.23.5
2
- Pillow==9.5.0
3
- timm==0.9.2
4
  torch==2.0.1
5
  torchvision==0.15.2
 
1
+ numpy==1.26.4
2
+ Pillow==10.2.0
3
+ timm==0.9.16
4
  torch==2.0.1
5
  torchvision==0.15.2
style.css CHANGED
@@ -1,11 +1,11 @@
1
  h1 {
2
  text-align: center;
3
- }
4
- div#result {
5
- max-width: 600px;
6
- max-height: 600px;
7
- }
8
- img#visitor-badge {
9
  display: block;
 
 
 
10
  margin: auto;
 
 
 
11
  }
 
1
  h1 {
2
  text-align: center;
 
 
 
 
 
 
3
  display: block;
4
+ }
5
+
6
+ #duplicate-button {
7
  margin: auto;
8
+ color: #fff;
9
+ background: #1565c0;
10
+ border-radius: 100vh;
11
  }