Create README.md
Browse files
README.md
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
image_size = 128 # the generated image resolution
|
2 |
+
train_batch_size = 16
|
3 |
+
eval_batch_size = 16 # how many images to sample during evaluation
|
4 |
+
num_epochs = 15000
|
5 |
+
gradient_accumulation_steps = 1
|
6 |
+
learning_rate = 1e-5
|
7 |
+
lr_warmup_steps = 500
|
8 |
+
save_image_epochs = 50
|
9 |
+
save_model_epochs = 50
|
10 |
+
|
11 |
+
block_out_channels=(128, 128, 256, 256, 512, 512), # the number of output channels for each UNet block
|
12 |
+
down_block_types=(
|
13 |
+
"DownBlock2D", # a regular ResNet downsampling block
|
14 |
+
"DownBlock2D",
|
15 |
+
"DownBlock2D",
|
16 |
+
"DownBlock2D",
|
17 |
+
"AttnDownBlock2D", # a ResNet downsampling block with spatial self-attention
|
18 |
+
"DownBlock2D",
|
19 |
+
),
|
20 |
+
|
21 |
+
|
22 |
+
preprocess = transforms.Compose(
|
23 |
+
[
|
24 |
+
transforms.Resize((config.image_size, config.image_size)),
|
25 |
+
transforms.RandomRotation(45),
|
26 |
+
transforms.RandomHorizontalFlip(),
|
27 |
+
transforms.RandomVerticalFlip(),
|
28 |
+
transforms.ToTensor(),
|
29 |
+
transforms.Normalize([0.5], [0.5]),
|
30 |
+
]
|
31 |
+
)
|