hysts commited on
Commit
e858292
1 Parent(s): c448dd2
Files changed (4) hide show
  1. .gitmodules +3 -0
  2. StyleGAN-Human +1 -0
  3. app.py +138 -0
  4. requirements.txt +5 -0
.gitmodules ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ [submodule "StyleGAN-Human"]
2
+ path = StyleGAN-Human
3
+ url = https://github.com/stylegan-human/StyleGAN-Human
StyleGAN-Human ADDED
@@ -0,0 +1 @@
 
1
+ Subproject commit d2514c145a451453804f60a12de8f13d40c9fe4f
app.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ from __future__ import annotations
4
+
5
+ import argparse
6
+ import functools
7
+ import os
8
+ import pickle
9
+ import sys
10
+
11
+ import gradio as gr
12
+ import numpy as np
13
+ import torch
14
+ import torch.nn as nn
15
+ from huggingface_hub import hf_hub_download
16
+
17
+ sys.path.insert(0, 'StyleGAN-Human')
18
+
19
+ TITLE = 'StyleGAN-Human (Interpolation)'
20
+ DESCRIPTION = 'This is a demo for https://github.com/stylegan-human/StyleGAN-Human.'
21
+ ARTICLE = None
22
+
23
+ TOKEN = os.environ['TOKEN']
24
+
25
+
26
+ def parse_args() -> argparse.Namespace:
27
+ parser = argparse.ArgumentParser()
28
+ parser.add_argument('--device', type=str, default='cpu')
29
+ parser.add_argument('--theme', type=str)
30
+ parser.add_argument('--live', action='store_true')
31
+ parser.add_argument('--share', action='store_true')
32
+ parser.add_argument('--port', type=int)
33
+ parser.add_argument('--disable-queue',
34
+ dest='enable_queue',
35
+ action='store_false')
36
+ parser.add_argument('--allow-flagging', type=str, default='never')
37
+ parser.add_argument('--allow-screenshot', action='store_true')
38
+ return parser.parse_args()
39
+
40
+
41
+ def load_model(file_name: str, device: torch.device) -> nn.Module:
42
+ path = hf_hub_download('hysts/StyleGAN-Human',
43
+ f'models/{file_name}',
44
+ use_auth_token=TOKEN)
45
+ with open(path, 'rb') as f:
46
+ model = pickle.load(f)['G_ema']
47
+ model.eval()
48
+ model.to(device)
49
+ with torch.inference_mode():
50
+ z = torch.zeros((1, model.z_dim)).to(device)
51
+ label = torch.zeros([1, model.c_dim], device=device)
52
+ model(z, label, force_fp32=True)
53
+ return model
54
+
55
+
56
+ def generate_z(z_dim: int, seed: int, device: torch.device) -> torch.Tensor:
57
+ return torch.from_numpy(np.random.RandomState(seed).randn(
58
+ 1, z_dim)).to(device).float()
59
+
60
+
61
+ @torch.inference_mode()
62
+ def generate_interpolated_images(
63
+ seed0: int, psi0: float, seed1: int, psi1: float,
64
+ num_intermediate: int, model: nn.Module,
65
+ device: torch.device) -> tuple[list[np.ndarray], np.ndarray]:
66
+ seed0 = int(np.clip(seed0, 0, np.iinfo(np.uint32).max))
67
+ seed1 = int(np.clip(seed1, 0, np.iinfo(np.uint32).max))
68
+
69
+ z0 = generate_z(model.z_dim, seed0, device)
70
+ z1 = generate_z(model.z_dim, seed1, device)
71
+ vec = z1 - z0
72
+ dvec = vec / (num_intermediate + 1)
73
+ zs = [z0 + dvec * i for i in range(num_intermediate + 2)]
74
+ dpsi = (psi1 - psi0) / (num_intermediate + 1)
75
+ psis = [psi0 + dpsi * i for i in range(num_intermediate + 2)]
76
+
77
+ label = torch.zeros([1, model.c_dim], device=device)
78
+
79
+ res = []
80
+ for z, psi in zip(zs, psis):
81
+ out = model(z, label, truncation_psi=psi, force_fp32=True)
82
+ out = (out.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(
83
+ torch.uint8)
84
+ out = out[0].cpu().numpy()
85
+ res.append(out)
86
+ concatenated = np.hstack(res)
87
+ return res, concatenated
88
+
89
+
90
+ def main():
91
+ gr.close_all()
92
+
93
+ args = parse_args()
94
+ device = torch.device(args.device)
95
+
96
+ model = load_model('stylegan_human_v2_1024.pkl', device)
97
+
98
+ func = functools.partial(generate_interpolated_images,
99
+ model=model,
100
+ device=device)
101
+ func = functools.update_wrapper(func, generate_interpolated_images)
102
+
103
+ gr.Interface(
104
+ func,
105
+ [
106
+ gr.inputs.Number(default=0, label='Seed 1'),
107
+ gr.inputs.Slider(
108
+ 0, 2, step=0.05, default=0.7, label='Truncation psi 1'),
109
+ gr.inputs.Number(default=1, label='Seed 2'),
110
+ gr.inputs.Slider(
111
+ 0, 2, step=0.05, default=0.7, label='Truncation psi 2'),
112
+ gr.inputs.Slider(0,
113
+ 21,
114
+ step=1,
115
+ default=7,
116
+ label='Number of Intermediate Frames'),
117
+ ],
118
+ [
119
+ gr.outputs.Carousel(gr.outputs.Image(type='numpy'),
120
+ label='Output Images'),
121
+ gr.outputs.Image(type='numpy', label='Concatenated'),
122
+ ],
123
+ title=TITLE,
124
+ description=DESCRIPTION,
125
+ article=ARTICLE,
126
+ theme=args.theme,
127
+ allow_screenshot=args.allow_screenshot,
128
+ allow_flagging=args.allow_flagging,
129
+ live=args.live,
130
+ ).launch(
131
+ enable_queue=args.enable_queue,
132
+ server_port=args.port,
133
+ share=args.share,
134
+ )
135
+
136
+
137
+ if __name__ == '__main__':
138
+ main()
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
1
+ numpy==1.22.3
2
+ Pillow==9.1.0
3
+ scipy==1.8.0
4
+ torch==1.11.0
5
+ torchvision==0.12.0