hysts HF staff commited on
Commit
8575998
1 Parent(s): 70a9f38
Files changed (8) hide show
  1. .pre-commit-config.yaml +59 -34
  2. .style.yapf +0 -5
  3. .vscode/settings.json +30 -0
  4. README.md +1 -1
  5. app.py +25 -32
  6. model.py +14 -18
  7. requirements.txt +3 -3
  8. style.css +8 -0
.pre-commit-config.yaml CHANGED
@@ -1,35 +1,60 @@
1
  repos:
2
- - repo: https://github.com/pre-commit/pre-commit-hooks
3
- rev: v4.2.0
4
- hooks:
5
- - id: check-executables-have-shebangs
6
- - id: check-json
7
- - id: check-merge-conflict
8
- - id: check-shebang-scripts-are-executable
9
- - id: check-toml
10
- - id: check-yaml
11
- - id: double-quote-string-fixer
12
- - id: end-of-file-fixer
13
- - id: mixed-line-ending
14
- args: ['--fix=lf']
15
- - id: requirements-txt-fixer
16
- - id: trailing-whitespace
17
- - repo: https://github.com/myint/docformatter
18
- rev: v1.4
19
- hooks:
20
- - id: docformatter
21
- args: ['--in-place']
22
- - repo: https://github.com/pycqa/isort
23
- rev: 5.12.0
24
- hooks:
25
- - id: isort
26
- - repo: https://github.com/pre-commit/mirrors-mypy
27
- rev: v0.991
28
- hooks:
29
- - id: mypy
30
- args: ['--ignore-missing-imports']
31
- - repo: https://github.com/google/yapf
32
- rev: v0.32.0
33
- hooks:
34
- - id: yapf
35
- args: ['--parallel', '--in-place']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  repos:
2
+ - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v4.6.0
4
+ hooks:
5
+ - id: check-executables-have-shebangs
6
+ - id: check-json
7
+ - id: check-merge-conflict
8
+ - id: check-shebang-scripts-are-executable
9
+ - id: check-toml
10
+ - id: check-yaml
11
+ - id: end-of-file-fixer
12
+ - id: mixed-line-ending
13
+ args: ["--fix=lf"]
14
+ - id: requirements-txt-fixer
15
+ - id: trailing-whitespace
16
+ - repo: https://github.com/myint/docformatter
17
+ rev: v1.7.5
18
+ hooks:
19
+ - id: docformatter
20
+ args: ["--in-place"]
21
+ - repo: https://github.com/pycqa/isort
22
+ rev: 5.13.2
23
+ hooks:
24
+ - id: isort
25
+ args: ["--profile", "black"]
26
+ - repo: https://github.com/pre-commit/mirrors-mypy
27
+ rev: v1.10.0
28
+ hooks:
29
+ - id: mypy
30
+ args: ["--ignore-missing-imports"]
31
+ additional_dependencies:
32
+ [
33
+ "types-python-slugify",
34
+ "types-requests",
35
+ "types-PyYAML",
36
+ "types-pytz",
37
+ ]
38
+ - repo: https://github.com/psf/black
39
+ rev: 24.4.2
40
+ hooks:
41
+ - id: black
42
+ language_version: python3.10
43
+ args: ["--line-length", "119"]
44
+ - repo: https://github.com/kynan/nbstripout
45
+ rev: 0.7.1
46
+ hooks:
47
+ - id: nbstripout
48
+ args:
49
+ [
50
+ "--extra-keys",
51
+ "metadata.interpreter metadata.kernelspec cell.metadata.pycharm",
52
+ ]
53
+ - repo: https://github.com/nbQA-dev/nbQA
54
+ rev: 1.8.5
55
+ hooks:
56
+ - id: nbqa-black
57
+ - id: nbqa-pyupgrade
58
+ args: ["--py37-plus"]
59
+ - id: nbqa-isort
60
+ args: ["--float-to-top"]
.style.yapf DELETED
@@ -1,5 +0,0 @@
1
- [style]
2
- based_on_style = pep8
3
- blank_line_before_nested_class_or_def = false
4
- spaces_before_comment = 2
5
- split_before_logical_operator = true
 
 
 
 
 
 
.vscode/settings.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "editor.formatOnSave": true,
3
+ "files.insertFinalNewline": false,
4
+ "[python]": {
5
+ "editor.defaultFormatter": "ms-python.black-formatter",
6
+ "editor.formatOnType": true,
7
+ "editor.codeActionsOnSave": {
8
+ "source.organizeImports": "explicit"
9
+ }
10
+ },
11
+ "[jupyter]": {
12
+ "files.insertFinalNewline": false
13
+ },
14
+ "black-formatter.args": [
15
+ "--line-length=119"
16
+ ],
17
+ "isort.args": ["--profile", "black"],
18
+ "flake8.args": [
19
+ "--max-line-length=119"
20
+ ],
21
+ "ruff.lint.args": [
22
+ "--line-length=119"
23
+ ],
24
+ "notebook.output.scrolling": true,
25
+ "notebook.formatOnCellExecution": true,
26
+ "notebook.formatOnSave.enabled": true,
27
+ "notebook.codeActionsOnSave": {
28
+ "source.organizeImports": "explicit"
29
+ }
30
+ }
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 📚
4
  colorFrom: indigo
5
  colorTo: pink
6
  sdk: gradio
7
- sdk_version: 3.36.1
8
  app_file: app.py
9
  pinned: false
10
  suggested_hardware: t4-small
 
4
  colorFrom: indigo
5
  colorTo: pink
6
  sdk: gradio
7
+ sdk_version: 4.33.0
8
  app_file: app.py
9
  pinned: false
10
  suggested_hardware: t4-small
app.py CHANGED
@@ -12,25 +12,25 @@ from huggingface_hub import hf_hub_download
12
 
13
  from model import Model
14
 
15
- DESCRIPTION = '# [MobileStyleGAN](https://github.com/bes-dev/MobileStyleGAN.pytorch)'
16
- SAMPLE_IMAGE_DIR = 'https://huggingface.co/spaces/hysts/MobileStyleGAN/resolve/main/samples'
17
- ARTICLE = f'''## Generated images
18
  ### FFHQ
19
  - size: 1024x1024
20
  - seed: 0-99
21
  - truncation: 1.0
22
  ![FFHQ]({SAMPLE_IMAGE_DIR}/ffhq.jpg)
23
- '''
24
 
25
 
26
  def generate_z(z_dim: int, seed: int, device: torch.device) -> torch.Tensor:
27
- return torch.from_numpy(np.random.RandomState(seed).randn(
28
- 1, z_dim)).to(device).float()
29
 
30
 
31
  @torch.inference_mode()
32
- def generate_image(seed: int, truncation_psi: float, generator: str,
33
- model: nn.Module, device: torch.device) -> np.ndarray:
 
34
  seed = int(np.clip(seed, 0, np.iinfo(np.uint32).max))
35
 
36
  z = generate_z(model.mapping_net.style_dim, seed, device)
@@ -41,11 +41,10 @@ def generate_image(seed: int, truncation_psi: float, generator: str,
41
 
42
 
43
  def load_model(device: torch.device) -> nn.Module:
44
- path = hf_hub_download('public-data/MobileStyleGAN',
45
- 'models/mobilestylegan_ffhq_v2.pth')
46
  ckpt = torch.load(path)
47
  model = Model()
48
- model.load_state_dict(ckpt['state_dict'], strict=False)
49
  model.eval()
50
  model.to(device)
51
  with torch.inference_mode():
@@ -54,36 +53,30 @@ def load_model(device: torch.device) -> nn.Module:
54
  return model
55
 
56
 
57
- device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
58
  model = load_model(device)
59
 
60
  fn = functools.partial(generate_image, model=model, device=device)
61
 
62
- with gr.Blocks(css='style.css') as demo:
63
  gr.Markdown(DESCRIPTION)
64
  with gr.Row():
65
  with gr.Column():
66
  with gr.Group():
67
- seed = gr.Slider(label='Seed',
68
- minimum=0,
69
- maximum=100000,
70
- step=1,
71
- value=0,
72
- randomize=True)
73
- psi = gr.Slider(label='Truncation psi',
74
- minimum=0,
75
- maximum=2,
76
- step=0.05,
77
- value=1.0)
78
- generator = gr.Radio(label='Generator',
79
- choices=['student', 'teacher'],
80
- type='value',
81
- value='student')
82
- run_button = gr.Button('Run')
83
  with gr.Column():
84
- result = gr.Image(label='Output', type='numpy')
85
  with gr.Row():
86
  gr.Markdown(ARTICLE)
87
 
88
- run_button.click(fn=fn, inputs=[seed, psi, generator], outputs=result)
89
- demo.queue(max_size=10).launch()
 
 
 
 
 
 
 
12
 
13
  from model import Model
14
 
15
+ DESCRIPTION = "# [MobileStyleGAN](https://github.com/bes-dev/MobileStyleGAN.pytorch)"
16
+ SAMPLE_IMAGE_DIR = "https://huggingface.co/spaces/hysts/MobileStyleGAN/resolve/main/samples"
17
+ ARTICLE = f"""## Generated images
18
  ### FFHQ
19
  - size: 1024x1024
20
  - seed: 0-99
21
  - truncation: 1.0
22
  ![FFHQ]({SAMPLE_IMAGE_DIR}/ffhq.jpg)
23
+ """
24
 
25
 
26
  def generate_z(z_dim: int, seed: int, device: torch.device) -> torch.Tensor:
27
+ return torch.from_numpy(np.random.RandomState(seed).randn(1, z_dim)).to(device).float()
 
28
 
29
 
30
  @torch.inference_mode()
31
+ def generate_image(
32
+ seed: int, truncation_psi: float, generator: str, model: nn.Module, device: torch.device
33
+ ) -> np.ndarray:
34
  seed = int(np.clip(seed, 0, np.iinfo(np.uint32).max))
35
 
36
  z = generate_z(model.mapping_net.style_dim, seed, device)
 
41
 
42
 
43
  def load_model(device: torch.device) -> nn.Module:
44
+ path = hf_hub_download("public-data/MobileStyleGAN", "models/mobilestylegan_ffhq_v2.pth")
 
45
  ckpt = torch.load(path)
46
  model = Model()
47
+ model.load_state_dict(ckpt["state_dict"], strict=False)
48
  model.eval()
49
  model.to(device)
50
  with torch.inference_mode():
 
53
  return model
54
 
55
 
56
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
57
  model = load_model(device)
58
 
59
  fn = functools.partial(generate_image, model=model, device=device)
60
 
61
+ with gr.Blocks(css="style.css") as demo:
62
  gr.Markdown(DESCRIPTION)
63
  with gr.Row():
64
  with gr.Column():
65
  with gr.Group():
66
+ seed = gr.Slider(label="Seed", minimum=0, maximum=100000, step=1, value=0, randomize=True)
67
+ psi = gr.Slider(label="Truncation psi", minimum=0, maximum=2, step=0.05, value=1.0)
68
+ generator = gr.Radio(label="Generator", choices=["student", "teacher"], type="value", value="student")
69
+ run_button = gr.Button("Run")
 
 
 
 
 
 
 
 
 
 
 
 
70
  with gr.Column():
71
+ result = gr.Image(label="Output", type="numpy")
72
  with gr.Row():
73
  gr.Markdown(ARTICLE)
74
 
75
+ run_button.click(
76
+ fn=fn,
77
+ inputs=[seed, psi, generator],
78
+ outputs=result,
79
+ )
80
+
81
+ if __name__ == "__main__":
82
+ demo.queue(max_size=10).launch()
model.py CHANGED
@@ -3,7 +3,7 @@ import sys
3
  import torch
4
  import torch.nn as nn
5
 
6
- sys.path.insert(0, 'MobileStyleGAN.pytorch')
7
 
8
  from core.models.mapping_network import MappingNetwork
9
  from core.models.mobile_synthesis_network import MobileSynthesisNetwork
@@ -14,33 +14,29 @@ class Model(nn.Module):
14
  def __init__(self):
15
  super().__init__()
16
  # teacher model
17
- mapping_net_params = {'style_dim': 512, 'n_layers': 8, 'lr_mlp': 0.01}
18
  synthesis_net_params = {
19
- 'size': 1024,
20
- 'style_dim': 512,
21
- 'blur_kernel': [1, 3, 3, 1],
22
- 'channels': [512, 512, 512, 512, 512, 256, 128, 64, 32]
23
  }
24
  self.mapping_net = MappingNetwork(**mapping_net_params).eval()
25
  self.synthesis_net = SynthesisNetwork(**synthesis_net_params).eval()
26
  # student network
27
  self.student = MobileSynthesisNetwork(
28
- style_dim=self.mapping_net.style_dim,
29
- channels=synthesis_net_params['channels'][:-1])
30
 
31
- self.style_mean = nn.Parameter(torch.zeros((1, 512)),
32
- requires_grad=False)
33
 
34
- def forward(self,
35
- var: torch.Tensor,
36
- truncation_psi: float = 0.5,
37
- generator: str = 'student') -> torch.Tensor:
38
  style = self.mapping_net(var)
39
  style = self.style_mean + truncation_psi * (style - self.style_mean)
40
- if generator == 'student':
41
- img = self.student(style)['img']
42
- elif generator == 'teacher':
43
- img = self.synthesis_net(style)['img']
44
  else:
45
  raise ValueError
46
  return img
 
3
  import torch
4
  import torch.nn as nn
5
 
6
+ sys.path.insert(0, "MobileStyleGAN.pytorch")
7
 
8
  from core.models.mapping_network import MappingNetwork
9
  from core.models.mobile_synthesis_network import MobileSynthesisNetwork
 
14
  def __init__(self):
15
  super().__init__()
16
  # teacher model
17
+ mapping_net_params = {"style_dim": 512, "n_layers": 8, "lr_mlp": 0.01}
18
  synthesis_net_params = {
19
+ "size": 1024,
20
+ "style_dim": 512,
21
+ "blur_kernel": [1, 3, 3, 1],
22
+ "channels": [512, 512, 512, 512, 512, 256, 128, 64, 32],
23
  }
24
  self.mapping_net = MappingNetwork(**mapping_net_params).eval()
25
  self.synthesis_net = SynthesisNetwork(**synthesis_net_params).eval()
26
  # student network
27
  self.student = MobileSynthesisNetwork(
28
+ style_dim=self.mapping_net.style_dim, channels=synthesis_net_params["channels"][:-1]
29
+ )
30
 
31
+ self.style_mean = nn.Parameter(torch.zeros((1, 512)), requires_grad=False)
 
32
 
33
+ def forward(self, var: torch.Tensor, truncation_psi: float = 0.5, generator: str = "student") -> torch.Tensor:
 
 
 
34
  style = self.mapping_net(var)
35
  style = self.style_mean + truncation_psi * (style - self.style_mean)
36
+ if generator == "student":
37
+ img = self.student(style)["img"]
38
+ elif generator == "teacher":
39
+ img = self.synthesis_net(style)["img"]
40
  else:
41
  raise ValueError
42
  return img
requirements.txt CHANGED
@@ -1,8 +1,8 @@
1
  git+https://github.com/fbcotter/pytorch_wavelets.git
2
- numpy==1.23.5
3
- Pillow==10.0.0
4
  piq==0.6.0
5
  PyWavelets==1.2.0
6
- scipy==1.10.1
7
  torch==2.0.1
8
  torchvision==0.15.2
 
1
  git+https://github.com/fbcotter/pytorch_wavelets.git
2
+ numpy==1.26.4
3
+ Pillow==10.3.0
4
  piq==0.6.0
5
  PyWavelets==1.2.0
6
+ scipy==1.13.1
7
  torch==2.0.1
8
  torchvision==0.15.2
style.css CHANGED
@@ -1,3 +1,11 @@
1
  h1 {
2
  text-align: center;
 
 
 
 
 
 
 
 
3
  }
 
1
  h1 {
2
  text-align: center;
3
+ display: block;
4
+ }
5
+
6
+ #duplicate-button {
7
+ margin: auto;
8
+ color: #fff;
9
+ background: #1565c0;
10
+ border-radius: 100vh;
11
  }