Elle McFarlane commited on
Commit
da5b2c1
·
1 Parent(s): 19ec9f5

fix avantgan

Browse files
Files changed (2) hide show
  1. app.py +0 -1
  2. model.py +53 -59
app.py CHANGED
@@ -39,7 +39,6 @@ with gr.Blocks(css="style.css") as demo:
39
  run_button = gr.Button()
40
  with gr.Column():
41
  result = gr.Image(label="Result", elem_id="result", width=300, height=300)
42
- print("RESULT", result, type(result), result.__dict__)
43
 
44
  with gr.TabItem("Sample Images"):
45
  with gr.Row():
 
39
  run_button = gr.Button()
40
  with gr.Column():
41
  result = gr.Image(label="Result", elem_id="result", width=300, height=300)
 
42
 
43
  with gr.TabItem("Sample Images"):
44
  with gr.Row():
model.py CHANGED
@@ -27,66 +27,66 @@ dcgan_img_size = 64
27
  nc = 3
28
 
29
 
30
- class Generator(nn.Module):
31
- def __init__(self, ngpu, nz):
32
- super(Generator, self).__init__()
33
- self.ngpu = ngpu
34
- self.main = nn.Sequential(
35
- # input is Z, going into a convolution
36
- nn.ConvTranspose2d( nz, ngf * 8, 4, 1, 0, bias=False),
37
- nn.BatchNorm2d(ngf * 8),
38
- nn.LeakyReLU(0.2, inplace=True),
39
- # state size. (ngf*8) x 4 x 4
40
- nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
41
- nn.BatchNorm2d(ngf * 4),
42
- nn.LeakyReLU(0.2, inplace=True),
43
- # state size. (ngf*4) x 8 x 8
44
- nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
45
- nn.BatchNorm2d(ngf * 2),
46
- nn.LeakyReLU(0.2, inplace=True),
47
- # state size. (ngf*2) x 16 x 16
48
- nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
49
- nn.BatchNorm2d(ngf),
50
- nn.LeakyReLU(0.2, inplace=True),
51
- # state size. (ngf) x 32 x 32
52
- nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False),
53
- nn.Tanh()
54
- # state size. (nc) x 64 x 64
55
- )
56
-
57
- def forward(self, input):
58
- return self.main(input)
59
-
60
  # class Generator(nn.Module):
61
- # def __init__(self, n_gen_feats, n_gpu, z_dim, n_channels):
62
  # super(Generator, self).__init__()
63
- # self.n_gpu = n_gpu
64
  # self.main = nn.Sequential(
65
  # # input is Z, going into a convolution
66
- # nn.ConvTranspose2d(z_dim, n_gen_feats * 8, 4, 1, 0, bias=False),
67
- # nn.BatchNorm2d(n_gen_feats * 8),
68
  # nn.LeakyReLU(0.2, inplace=True),
69
- # # state size. (n_gen_feats*8) x 4 x 4
70
- # nn.ConvTranspose2d(n_gen_feats * 8, n_gen_feats * 4, 4, 2, 1, bias=False),
71
- # nn.BatchNorm2d(n_gen_feats * 4),
72
  # nn.LeakyReLU(0.2, inplace=True),
73
- # # state size. (n_gen_feats*4) x 8 x 8
74
- # nn.ConvTranspose2d(n_gen_feats * 4, n_gen_feats * 2, 4, 2, 1, bias=False),
75
- # nn.BatchNorm2d(n_gen_feats * 2),
76
  # nn.LeakyReLU(0.2, inplace=True),
77
- # # state size. (n_gen_feats*2) x 16 x 16
78
- # nn.ConvTranspose2d(n_gen_feats * 2, n_gen_feats, 4, 2, 1, bias=False),
79
- # nn.BatchNorm2d(n_gen_feats),
80
  # nn.LeakyReLU(0.2, inplace=True),
81
- # # state size. (n_gen_feats) x 32 x 32
82
- # nn.ConvTranspose2d(n_gen_feats, n_channels, 4, 2, 1, bias=False),
83
  # nn.Tanh()
84
- # # state size. (n_channels) x 64 x 64
85
  # )
86
 
87
  # def forward(self, input):
88
  # return self.main(input)
89
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
  class Model:
91
  MODEL_DICT = {
92
  "stylegan3-abstract": {"name": "abstract-560eps.pkl", "repo": "avantStyleGAN3"},
@@ -109,9 +109,9 @@ class Model:
109
  model = pickle.load(f)["G_ema"]
110
  else:
111
  # todo (elle): don't hardcode the config
112
- # model = Generator(dcgan_gen_feats, 1, dcgan_z_dim, 3)
113
- print("WAS HERE")
114
- model = Generator(0, 100)
115
 
116
  model.eval()
117
  model.to(self.device)
@@ -149,10 +149,6 @@ class Model:
149
  tensor = (tensor.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
150
  return tensor.cpu().numpy()
151
 
152
- def dcgan_postprocess(self, tensor: torch.Tensor) -> np.ndarray:
153
- tensor = (tensor.permute(0, 2, 3, 1)).clamp(0, 255).to(torch.uint8)
154
- return tensor.cpu().numpy()
155
-
156
  def set_transform(self, tx: float = 0, ty: float = 0, angle: float = 0) -> None:
157
  mat = self.make_transform((tx, ty), angle)
158
  mat = np.linalg.inv(mat)
@@ -173,20 +169,18 @@ class Model:
173
  return out[0]
174
 
175
  def dcgan_generate_image(self, seed: int) -> np.ndarray:
176
- dcgan_img_size = 64
177
- dcgan_z_dim = 100
 
178
 
179
  with torch.no_grad():
180
  n_images = 1
181
  z = torch.randn(n_images, dcgan_z_dim, 1, 1, device=self.device)
182
  fake_images = self.model(z.to(self.device)).cpu()
183
  fake_images = fake_images.view(fake_images.size(0), 3, dcgan_img_size, dcgan_img_size)
184
-
185
- print('fake', fake_images)
186
- print(fake_images.min(), fake_images.max())
187
  # Create a grid of images
188
  grid = vutils.make_grid(fake_images, normalize=True)
189
- print('grid', grid)
190
  # Plot the grid and save it to a buffer
191
  fig, ax = plt.subplots()
192
  ax.imshow(grid.permute(1, 2, 0)) # Convert from CHW to HWC for imshow
 
27
  nc = 3
28
 
29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  # class Generator(nn.Module):
31
+ # def __init__(self, ngpu, nz):
32
  # super(Generator, self).__init__()
33
+ # self.ngpu = ngpu
34
  # self.main = nn.Sequential(
35
  # # input is Z, going into a convolution
36
+ # nn.ConvTranspose2d( nz, ngf * 8, 4, 1, 0, bias=False),
37
+ # nn.BatchNorm2d(ngf * 8),
38
  # nn.LeakyReLU(0.2, inplace=True),
39
+ # # state size. (ngf*8) x 4 x 4
40
+ # nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
41
+ # nn.BatchNorm2d(ngf * 4),
42
  # nn.LeakyReLU(0.2, inplace=True),
43
+ # # state size. (ngf*4) x 8 x 8
44
+ # nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
45
+ # nn.BatchNorm2d(ngf * 2),
46
  # nn.LeakyReLU(0.2, inplace=True),
47
+ # # state size. (ngf*2) x 16 x 16
48
+ # nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
49
+ # nn.BatchNorm2d(ngf),
50
  # nn.LeakyReLU(0.2, inplace=True),
51
+ # # state size. (ngf) x 32 x 32
52
+ # nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False),
53
  # nn.Tanh()
54
+ # # state size. (nc) x 64 x 64
55
  # )
56
 
57
  # def forward(self, input):
58
  # return self.main(input)
59
 
60
+ class Generator(nn.Module):
61
+ def __init__(self, n_gen_feats, n_gpu, z_dim, n_channels):
62
+ super(Generator, self).__init__()
63
+ self.n_gpu = n_gpu
64
+ self.main = nn.Sequential(
65
+ # input is Z, going into a convolution
66
+ nn.ConvTranspose2d(z_dim, n_gen_feats * 8, 4, 1, 0, bias=False),
67
+ nn.BatchNorm2d(n_gen_feats * 8),
68
+ nn.LeakyReLU(0.2, inplace=True),
69
+ # state size. (n_gen_feats*8) x 4 x 4
70
+ nn.ConvTranspose2d(n_gen_feats * 8, n_gen_feats * 4, 4, 2, 1, bias=False),
71
+ nn.BatchNorm2d(n_gen_feats * 4),
72
+ nn.LeakyReLU(0.2, inplace=True),
73
+ # state size. (n_gen_feats*4) x 8 x 8
74
+ nn.ConvTranspose2d(n_gen_feats * 4, n_gen_feats * 2, 4, 2, 1, bias=False),
75
+ nn.BatchNorm2d(n_gen_feats * 2),
76
+ nn.LeakyReLU(0.2, inplace=True),
77
+ # state size. (n_gen_feats*2) x 16 x 16
78
+ nn.ConvTranspose2d(n_gen_feats * 2, n_gen_feats, 4, 2, 1, bias=False),
79
+ nn.BatchNorm2d(n_gen_feats),
80
+ nn.LeakyReLU(0.2, inplace=True),
81
+ # state size. (n_gen_feats) x 32 x 32
82
+ nn.ConvTranspose2d(n_gen_feats, n_channels, 4, 2, 1, bias=False),
83
+ nn.Tanh()
84
+ # state size. (n_channels) x 64 x 64
85
+ )
86
+
87
+ def forward(self, input):
88
+ return self.main(input)
89
+
90
  class Model:
91
  MODEL_DICT = {
92
  "stylegan3-abstract": {"name": "abstract-560eps.pkl", "repo": "avantStyleGAN3"},
 
109
  model = pickle.load(f)["G_ema"]
110
  else:
111
  # todo (elle): don't hardcode the config
112
+ model = Generator(dcgan_gen_feats, 1, dcgan_z_dim, 3)
113
+ # model = Generator(0, 100)
114
+ model.load_state_dict(torch.load(path, map_location=self.device))
115
 
116
  model.eval()
117
  model.to(self.device)
 
149
  tensor = (tensor.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
150
  return tensor.cpu().numpy()
151
 
 
 
 
 
152
  def set_transform(self, tx: float = 0, ty: float = 0, angle: float = 0) -> None:
153
  mat = self.make_transform((tx, ty), angle)
154
  mat = np.linalg.inv(mat)
 
169
  return out[0]
170
 
171
  def dcgan_generate_image(self, seed: int) -> np.ndarray:
172
+ torch.manual_seed(seed)
173
+ if self.device == 'cuda':
174
+ torch.cuda.manual_seed(seed)
175
 
176
  with torch.no_grad():
177
  n_images = 1
178
  z = torch.randn(n_images, dcgan_z_dim, 1, 1, device=self.device)
179
  fake_images = self.model(z.to(self.device)).cpu()
180
  fake_images = fake_images.view(fake_images.size(0), 3, dcgan_img_size, dcgan_img_size)
 
 
 
181
  # Create a grid of images
182
  grid = vutils.make_grid(fake_images, normalize=True)
183
+
184
  # Plot the grid and save it to a buffer
185
  fig, ax = plt.subplots()
186
  ax.imshow(grid.permute(1, 2, 0)) # Convert from CHW to HWC for imshow