File size: 12,979 Bytes
26555ee
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#	  http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Optional, Tuple, Union

import torch
import torch.nn as nn

from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.utils import BaseOutput

try:
	from diffusers.utils import apply_forward_hook
except:
	from diffusers.utils.accelerate_utils import apply_forward_hook

from diffusers.models.modeling_utils import ModelMixin
from diffusers.models.vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder


@dataclass
class AutoencoderKLOutput(BaseOutput):
	"""
	Output of AutoencoderKL encoding method.

	Args:
		latent_dist (`DiagonalGaussianDistribution`):
			Encoded outputs of `Encoder` represented as the mean and logvar of `DiagonalGaussianDistribution`.
			`DiagonalGaussianDistribution` allows for sampling latents from the distribution.
	"""

	latent_dist: "DiagonalGaussianDistribution"


class AutoencoderKL(ModelMixin, ConfigMixin):
	r"""Variational Autoencoder (VAE) model with KL loss from the paper Auto-Encoding Variational Bayes by Diederik P. Kingma
	and Max Welling.

	This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library
	implements for all the model (such as downloading or saving, etc.)

	Parameters:
		in_channels (int, *optional*, defaults to 3): Number of channels in the input image.
		out_channels (int,	*optional*, defaults to 3): Number of channels in the output.
		down_block_types (`Tuple[str]`, *optional*, defaults to :
			obj:`("DownEncoderBlock2D",)`): Tuple of downsample block types.
		up_block_types (`Tuple[str]`, *optional*, defaults to :
			obj:`("UpDecoderBlock2D",)`): Tuple of upsample block types.
		block_out_channels (`Tuple[int]`, *optional*, defaults to :
			obj:`(64,)`): Tuple of block output channels.
		act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
		latent_channels (`int`, *optional*, defaults to 4): Number of channels in the latent space.
		sample_size (`int`, *optional*, defaults to `32`): TODO
		scaling_factor (`float`, *optional*, defaults to 0.18215):
			The component-wise standard deviation of the trained latent space computed using the first batch of the
			training set. This is used to scale the latent space to have unit variance when training the diffusion
			model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
			diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
			/ scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
			Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper.
	"""

	_supports_gradient_checkpointing = True

	@register_to_config
	def __init__(
		self,
		in_channels: int = 3,
		out_channels: int = 3,
		down_block_types: Tuple[str] = ("DownEncoderBlock2D",),
		up_block_types: Tuple[str] = ("UpDecoderBlock2D",),
		block_out_channels: Tuple[int] = (64,),
		layers_per_block: int = 1,
		act_fn: str = "silu",
		latent_channels: int = 4,
		norm_num_groups: int = 32,
		sample_size: int = 32,
		scaling_factor: float = 0.18215,
	):
		super().__init__()

		# pass init params to Encoder
		self.encoder = Encoder(
			in_channels=in_channels,
			out_channels=latent_channels,
			down_block_types=down_block_types,
			block_out_channels=block_out_channels,
			layers_per_block=layers_per_block,
			act_fn=act_fn,
			norm_num_groups=norm_num_groups,
			double_z=True,
		)

		# pass init params to Decoder
		self.decoder = Decoder(
			in_channels=latent_channels,
			out_channels=out_channels,
			up_block_types=up_block_types,
			block_out_channels=block_out_channels,
			layers_per_block=layers_per_block,
			norm_num_groups=norm_num_groups,
			act_fn=act_fn,
		)

		self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1)
		self.post_quant_conv = nn.Conv2d(latent_channels, latent_channels, 1)

		self.use_slicing = False
		self.use_tiling = False

		# only relevant if vae tiling is enabled
		self.tile_sample_min_size = self.config.sample_size
		sample_size = (
			self.config.sample_size[0]
			if isinstance(self.config.sample_size, (list, tuple))
			else self.config.sample_size
		)
		self.tile_latent_min_size = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1)))
		self.tile_overlap_factor = 0.25

	def _set_gradient_checkpointing(self, module, value=False):
		if isinstance(module, (Encoder, Decoder)):
			module.gradient_checkpointing = value

	def enable_tiling(self, use_tiling: bool = True):
		r"""
		Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
		compute decoding and encoding in several steps. This is useful to save a large amount of memory and to allow
		the processing of larger images.
		"""
		self.use_tiling = use_tiling

	def disable_tiling(self):
		r"""
		Disable tiled VAE decoding. If `enable_vae_tiling` was previously invoked, this method will go back to
		computing decoding in one step.
		"""
		self.enable_tiling(False)

	def enable_slicing(self):
		r"""
		Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
		compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
		"""
		self.use_slicing = True

	def disable_slicing(self):
		r"""
		Disable sliced VAE decoding. If `enable_slicing` was previously invoked, this method will go back to computing
		decoding in one step.
		"""
		self.use_slicing = False

	@apply_forward_hook
	def encode(self, x: torch.FloatTensor, return_dict: bool = True) -> AutoencoderKLOutput:
		if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
			return self.tiled_encode(x, return_dict=return_dict)

		h = self.encoder(x)
		moments = self.quant_conv(h)
		posterior = DiagonalGaussianDistribution(moments)

		if not return_dict:
			return (posterior,)

		return AutoencoderKLOutput(latent_dist=posterior)

	def _decode(self, z: torch.FloatTensor, return_dict: bool = True) -> Union[DecoderOutput, torch.FloatTensor]:
		if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
			return self.tiled_decode(z, return_dict=return_dict)

		z = self.post_quant_conv(z)
		dec = self.decoder(z)

		if not return_dict:
			return (dec,)

		return DecoderOutput(sample=dec)

	@apply_forward_hook
	def decode(self, z: torch.FloatTensor, return_dict: bool = True) -> Union[DecoderOutput, torch.FloatTensor]:
		if self.use_slicing and z.shape[0] > 1:
			decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)]
			decoded = torch.cat(decoded_slices)
		else:
			decoded = self._decode(z).sample

		if not return_dict:
			return (decoded,)

		return DecoderOutput(sample=decoded)

	def blend_v(self, a, b, blend_extent):
		for y in range(min(a.shape[2], b.shape[2], blend_extent)):
			b[:, :, y, :] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
		return b

	def blend_h(self, a, b, blend_extent):
		for x in range(min(a.shape[3], b.shape[3], blend_extent)):
			b[:, :, :, x] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
		return b

	def tiled_encode(self, x: torch.FloatTensor, return_dict: bool = True) -> AutoencoderKLOutput:
		r"""Encode a batch of images using a tiled encoder.

		Args:
		When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several
		steps. This is useful to keep memory use constant regardless of image size. The end result of tiled encoding is:
		different from non-tiled encoding due to each tile using a different encoder. To avoid tiling artifacts, the
		tiles overlap and are blended together to form a smooth output. You may still see tile-sized changes in the
		look of the output, but they should be much less noticeable.
			x (`torch.FloatTensor`): Input batch of images. return_dict (`bool`, *optional*, defaults to `True`):
				Whether or not to return a [`AutoencoderKLOutput`] instead of a plain tuple.
		"""
		overlap_size = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor))
		blend_extent = int(self.tile_latent_min_size * self.tile_overlap_factor)
		row_limit = self.tile_latent_min_size - blend_extent

		# Split the image into 512x512 tiles and encode them separately.
		rows = []
		for i in range(0, x.shape[2], overlap_size):
			row = []
			for j in range(0, x.shape[3], overlap_size):
				tile = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
				tile = self.encoder(tile)
				tile = self.quant_conv(tile)
				row.append(tile)
			rows.append(row)
		result_rows = []
		for i, row in enumerate(rows):
			result_row = []
			for j, tile in enumerate(row):
				# blend the above tile and the left tile
				# to the current tile and add the current tile to the result row
				if i > 0:
					tile = self.blend_v(rows[i - 1][j], tile, blend_extent)
				if j > 0:
					tile = self.blend_h(row[j - 1], tile, blend_extent)
				result_row.append(tile[:, :, :row_limit, :row_limit])
			result_rows.append(torch.cat(result_row, dim=3))

		moments = torch.cat(result_rows, dim=2)
		posterior = DiagonalGaussianDistribution(moments)

		if not return_dict:
			return (posterior,)

		return AutoencoderKLOutput(latent_dist=posterior)

	def tiled_decode(self, z: torch.FloatTensor, return_dict: bool = True) -> Union[DecoderOutput, torch.FloatTensor]:
		r"""Decode a batch of images using a tiled decoder.

		Args:
		When this option is enabled, the VAE will split the input tensor into tiles to compute decoding in several
		steps. This is useful to keep memory use constant regardless of image size. The end result of tiled decoding is:
		different from non-tiled decoding due to each tile using a different decoder. To avoid tiling artifacts, the
		tiles overlap and are blended together to form a smooth output. You may still see tile-sized changes in the
		look of the output, but they should be much less noticeable.
			z (`torch.FloatTensor`): Input batch of latent vectors. return_dict (`bool`, *optional*, defaults to
			`True`):
				Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
		"""
		overlap_size = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor))
		blend_extent = int(self.tile_sample_min_size * self.tile_overlap_factor)
		row_limit = self.tile_sample_min_size - blend_extent

		# Split z into overlapping 64x64 tiles and decode them separately.
		# The tiles have an overlap to avoid seams between tiles.
		rows = []
		for i in range(0, z.shape[2], overlap_size):
			row = []
			for j in range(0, z.shape[3], overlap_size):
				tile = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
				tile = self.post_quant_conv(tile)
				decoded = self.decoder(tile)
				row.append(decoded)
			rows.append(row)
		result_rows = []
		for i, row in enumerate(rows):
			result_row = []
			for j, tile in enumerate(row):
				# blend the above tile and the left tile
				# to the current tile and add the current tile to the result row
				if i > 0:
					tile = self.blend_v(rows[i - 1][j], tile, blend_extent)
				if j > 0:
					tile = self.blend_h(row[j - 1], tile, blend_extent)
				result_row.append(tile[:, :, :row_limit, :row_limit])
			result_rows.append(torch.cat(result_row, dim=3))

		dec = torch.cat(result_rows, dim=2)
		if not return_dict:
			return (dec,)

		return DecoderOutput(sample=dec)

	def forward(
		self,
		sample: torch.FloatTensor,
		sample_posterior: bool = False,
		return_dict: bool = True,
		generator: Optional[torch.Generator] = None,
	) -> Union[DecoderOutput, torch.FloatTensor]:
		r"""
		Args:
			sample (`torch.FloatTensor`): Input sample.
			sample_posterior (`bool`, *optional*, defaults to `False`):
				Whether to sample from the posterior.
			return_dict (`bool`, *optional*, defaults to `True`):
				Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
		"""
		x = sample
		posterior = self.encode(x).latent_dist
		if sample_posterior:
			z = posterior.sample(generator=generator)
		else:
			z = posterior.mode()
		dec = self.decode(z).sample

		if not return_dict:
			return (dec,)

		return DecoderOutput(sample=dec)