Muhammad Rama Nurimani commited on
Commit
9ba7ddb
·
1 Parent(s): 0286864

test deploy

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .replit +2 -0
  2. CycleGAN.ipynb +273 -0
  3. LICENSE +58 -0
  4. README.md +0 -13
  5. app.py +62 -120
  6. latest_net_G.pth → checkpoints/color_pix2pix/latest_net_G.pth +0 -0
  7. checkpoints/color_pix2pix/test_opt.txt +41 -0
  8. data/__init__.py +93 -0
  9. data/__pycache__/__init__.cpython-311.pyc +0 -0
  10. data/__pycache__/base_dataset.cpython-311.pyc +0 -0
  11. data/__pycache__/colorization_dataset.cpython-311.pyc +0 -0
  12. data/__pycache__/image_folder.cpython-311.pyc +0 -0
  13. data/aligned_dataset.py +60 -0
  14. data/base_dataset.py +167 -0
  15. data/colorization_dataset.py +68 -0
  16. data/image_folder.py +65 -0
  17. data/single_dataset.py +40 -0
  18. data/template_dataset.py +75 -0
  19. data/unaligned_dataset.py +71 -0
  20. datasets/bibtex/cityscapes.tex +6 -0
  21. datasets/bibtex/facades.tex +7 -0
  22. datasets/bibtex/handbags.tex +13 -0
  23. datasets/bibtex/shoes.tex +14 -0
  24. datasets/bibtex/transattr.tex +8 -0
  25. datasets/combine_A_and_B.py +67 -0
  26. datasets/data/test/input_image.png +0 -0
  27. datasets/data/test/test_demo_1.png +0 -0
  28. datasets/download_cyclegan_dataset.sh +21 -0
  29. datasets/download_pix2pix_dataset.sh +22 -0
  30. datasets/make_dataset_aligned.py +63 -0
  31. datasets/prepare_cityscapes_dataset.py +99 -0
  32. debug_images/L_channel.png +0 -0
  33. debug_images/input_grayscale.png +0 -0
  34. debug_images/input_original.png +0 -0
  35. debug_images/lab_image.png +0 -0
  36. debug_images/lab_image_debug.png +0 -0
  37. debug_images/lab_to_rgb_debug.png +0 -0
  38. debug_images/output_rgb.png +0 -0
  39. debug_images/postprocessed_output.png +0 -0
  40. debug_images/preprocessed_input.pt +1 -1
  41. debug_images/raw_AB_channels.png +0 -0
  42. debug_images/raw_output.pt +1 -1
  43. docs/Dockerfile +16 -0
  44. docs/README_es.md +238 -0
  45. docs/datasets.md +44 -0
  46. docs/docker.md +38 -0
  47. docs/overview.md +45 -0
  48. docs/qa.md +148 -0
  49. docs/tips.md +74 -0
  50. environment.yml +17 -0
.replit ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ language = "python3"
2
+ run = "<p><a href=\"https://github.com/affinelayer/pix2pix-tensorflow\"> [Tensorflow]</a> (by Christopher Hesse), <a href=\"https://github.com/Eyyub/tensorflow-pix2pix\">[Tensorflow]</a> (by Eyyüb Sariu), <a href=\"https://github.com/datitran/face2face-demo\"> [Tensorflow (face2face)]</a> (by Dat Tran), <a href=\"https://github.com/awjuliani/Pix2Pix-Film\"> [Tensorflow (film)]</a> (by Arthur Juliani), <a href=\"https://github.com/kaonashi-tyc/zi2zi\">[Tensorflow (zi2zi)]</a> (by Yuchen Tian), <a href=\"https://github.com/pfnet-research/chainer-pix2pix\">[Chainer]</a> (by mattya), <a href=\"https://github.com/tjwei/GANotebooks\">[tf/torch/keras/lasagne]</a> (by tjwei), <a href=\"https://github.com/taey16/pix2pixBEGAN.pytorch\">[Pytorch]</a> (by taey16) </p> </ul>"
CycleGAN.ipynb ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {
6
+ "colab_type": "text",
7
+ "id": "view-in-github"
8
+ },
9
+ "source": [
10
+ "<a href=\"https://colab.research.google.com/github/bkkaggle/pytorch-CycleGAN-and-pix2pix/blob/master/CycleGAN.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
11
+ ]
12
+ },
13
+ {
14
+ "cell_type": "markdown",
15
+ "metadata": {
16
+ "colab_type": "text",
17
+ "id": "5VIGyIus8Vr7"
18
+ },
19
+ "source": [
20
+ "Take a look at the [repository](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix) for more information"
21
+ ]
22
+ },
23
+ {
24
+ "cell_type": "markdown",
25
+ "metadata": {
26
+ "colab_type": "text",
27
+ "id": "7wNjDKdQy35h"
28
+ },
29
+ "source": [
30
+ "# Install"
31
+ ]
32
+ },
33
+ {
34
+ "cell_type": "code",
35
+ "execution_count": null,
36
+ "metadata": {
37
+ "colab": {},
38
+ "colab_type": "code",
39
+ "id": "TRm-USlsHgEV"
40
+ },
41
+ "outputs": [],
42
+ "source": [
43
+ "!git clone https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix"
44
+ ]
45
+ },
46
+ {
47
+ "cell_type": "code",
48
+ "execution_count": null,
49
+ "metadata": {
50
+ "colab": {},
51
+ "colab_type": "code",
52
+ "id": "Pt3igws3eiVp"
53
+ },
54
+ "outputs": [],
55
+ "source": [
56
+ "import os\n",
57
+ "os.chdir('pytorch-CycleGAN-and-pix2pix/')"
58
+ ]
59
+ },
60
+ {
61
+ "cell_type": "code",
62
+ "execution_count": null,
63
+ "metadata": {
64
+ "colab": {},
65
+ "colab_type": "code",
66
+ "id": "z1EySlOXwwoa"
67
+ },
68
+ "outputs": [],
69
+ "source": [
70
+ "!pip install -r requirements.txt"
71
+ ]
72
+ },
73
+ {
74
+ "cell_type": "markdown",
75
+ "metadata": {
76
+ "colab_type": "text",
77
+ "id": "8daqlgVhw29P"
78
+ },
79
+ "source": [
80
+ "# Datasets\n",
81
+ "\n",
82
+ "Download one of the official datasets with:\n",
83
+ "\n",
84
+ "- `bash ./datasets/download_cyclegan_dataset.sh [apple2orange, summer2winter_yosemite, horse2zebra, monet2photo, cezanne2photo, ukiyoe2photo, vangogh2photo, maps, cityscapes, facades, iphone2dslr_flower, ae_photos]`\n",
85
+ "\n",
86
+ "Or use your own dataset by creating the appropriate folders and adding in the images.\n",
87
+ "\n",
88
+ "- Create a dataset folder under `/dataset` for your dataset.\n",
89
+ "- Create subfolders `testA`, `testB`, `trainA`, and `trainB` under your dataset's folder. Place any images you want to transform from a to b (cat2dog) in the `testA` folder, images you want to transform from b to a (dog2cat) in the `testB` folder, and do the same for the `trainA` and `trainB` folders."
90
+ ]
91
+ },
92
+ {
93
+ "cell_type": "code",
94
+ "execution_count": null,
95
+ "metadata": {
96
+ "colab": {},
97
+ "colab_type": "code",
98
+ "id": "vrdOettJxaCc"
99
+ },
100
+ "outputs": [],
101
+ "source": [
102
+ "!bash ./datasets/download_cyclegan_dataset.sh horse2zebra"
103
+ ]
104
+ },
105
+ {
106
+ "cell_type": "markdown",
107
+ "metadata": {
108
+ "colab_type": "text",
109
+ "id": "gdUz4116xhpm"
110
+ },
111
+ "source": [
112
+ "# Pretrained models\n",
113
+ "\n",
114
+ "Download one of the official pretrained models with:\n",
115
+ "\n",
116
+ "- `bash ./scripts/download_cyclegan_model.sh [apple2orange, orange2apple, summer2winter_yosemite, winter2summer_yosemite, horse2zebra, zebra2horse, monet2photo, style_monet, style_cezanne, style_ukiyoe, style_vangogh, sat2map, map2sat, cityscapes_photo2label, cityscapes_label2photo, facades_photo2label, facades_label2photo, iphone2dslr_flower]`\n",
117
+ "\n",
118
+ "Or add your own pretrained model to `./checkpoints/{NAME}_pretrained/latest_net_G.pt`"
119
+ ]
120
+ },
121
+ {
122
+ "cell_type": "code",
123
+ "execution_count": null,
124
+ "metadata": {
125
+ "colab": {},
126
+ "colab_type": "code",
127
+ "id": "B75UqtKhxznS"
128
+ },
129
+ "outputs": [],
130
+ "source": [
131
+ "!bash ./scripts/download_cyclegan_model.sh horse2zebra"
132
+ ]
133
+ },
134
+ {
135
+ "cell_type": "markdown",
136
+ "metadata": {
137
+ "colab_type": "text",
138
+ "id": "yFw1kDQBx3LN"
139
+ },
140
+ "source": [
141
+ "# Training\n",
142
+ "\n",
143
+ "- `python train.py --dataroot ./datasets/horse2zebra --name horse2zebra --model cycle_gan`\n",
144
+ "\n",
145
+ "Change the `--dataroot` and `--name` to your own dataset's path and model's name. Use `--gpu_ids 0,1,..` to train on multiple GPUs and `--batch_size` to change the batch size. I've found that a batch size of 16 fits onto 4 V100s and can finish training an epoch in ~90s.\n",
146
+ "\n",
147
+ "Once your model has trained, copy over the last checkpoint to a format that the testing model can automatically detect:\n",
148
+ "\n",
149
+ "Use `cp ./checkpoints/horse2zebra/latest_net_G_A.pth ./checkpoints/horse2zebra/latest_net_G.pth` if you want to transform images from class A to class B and `cp ./checkpoints/horse2zebra/latest_net_G_B.pth ./checkpoints/horse2zebra/latest_net_G.pth` if you want to transform images from class B to class A.\n"
150
+ ]
151
+ },
152
+ {
153
+ "cell_type": "code",
154
+ "execution_count": null,
155
+ "metadata": {
156
+ "colab": {},
157
+ "colab_type": "code",
158
+ "id": "0sp7TCT2x9dB"
159
+ },
160
+ "outputs": [],
161
+ "source": [
162
+ "!python train.py --dataroot ./datasets/horse2zebra --name horse2zebra --model cycle_gan --display_id -1"
163
+ ]
164
+ },
165
+ {
166
+ "cell_type": "markdown",
167
+ "metadata": {
168
+ "colab_type": "text",
169
+ "id": "9UkcaFZiyASl"
170
+ },
171
+ "source": [
172
+ "# Testing\n",
173
+ "\n",
174
+ "- `python test.py --dataroot datasets/horse2zebra/testA --name horse2zebra_pretrained --model test --no_dropout`\n",
175
+ "\n",
176
+ "Change the `--dataroot` and `--name` to be consistent with your trained model's configuration.\n",
177
+ "\n",
178
+ "> from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix:\n",
179
+ "> The option --model test is used for generating results of CycleGAN only for one side. This option will automatically set --dataset_mode single, which only loads the images from one set. On the contrary, using --model cycle_gan requires loading and generating results in both directions, which is sometimes unnecessary. The results will be saved at ./results/. Use --results_dir {directory_path_to_save_result} to specify the results directory.\n",
180
+ "\n",
181
+ "> For your own experiments, you might want to specify --netG, --norm, --no_dropout to match the generator architecture of the trained model."
182
+ ]
183
+ },
184
+ {
185
+ "cell_type": "code",
186
+ "execution_count": null,
187
+ "metadata": {
188
+ "colab": {},
189
+ "colab_type": "code",
190
+ "id": "uCsKkEq0yGh0"
191
+ },
192
+ "outputs": [],
193
+ "source": [
194
+ "!python test.py --dataroot datasets/horse2zebra/testA --name horse2zebra_pretrained --model test --no_dropout"
195
+ ]
196
+ },
197
+ {
198
+ "cell_type": "markdown",
199
+ "metadata": {
200
+ "colab_type": "text",
201
+ "id": "OzSKIPUByfiN"
202
+ },
203
+ "source": [
204
+ "# Visualize"
205
+ ]
206
+ },
207
+ {
208
+ "cell_type": "code",
209
+ "execution_count": null,
210
+ "metadata": {
211
+ "colab": {},
212
+ "colab_type": "code",
213
+ "id": "9Mgg8raPyizq"
214
+ },
215
+ "outputs": [],
216
+ "source": [
217
+ "import matplotlib.pyplot as plt\n",
218
+ "\n",
219
+ "img = plt.imread('./results/horse2zebra_pretrained/test_latest/images/n02381460_1010_fake.png')\n",
220
+ "plt.imshow(img)"
221
+ ]
222
+ },
223
+ {
224
+ "cell_type": "code",
225
+ "execution_count": null,
226
+ "metadata": {
227
+ "colab": {},
228
+ "colab_type": "code",
229
+ "id": "0G3oVH9DyqLQ"
230
+ },
231
+ "outputs": [],
232
+ "source": [
233
+ "import matplotlib.pyplot as plt\n",
234
+ "\n",
235
+ "img = plt.imread('./results/horse2zebra_pretrained/test_latest/images/n02381460_1010_real.png')\n",
236
+ "plt.imshow(img)"
237
+ ]
238
+ }
239
+ ],
240
+ "metadata": {
241
+ "accelerator": "GPU",
242
+ "colab": {
243
+ "collapsed_sections": [],
244
+ "include_colab_link": true,
245
+ "name": "CycleGAN",
246
+ "provenance": []
247
+ },
248
+ "environment": {
249
+ "name": "tf2-gpu.2-3.m74",
250
+ "type": "gcloud",
251
+ "uri": "gcr.io/deeplearning-platform-release/tf2-gpu.2-3:m74"
252
+ },
253
+ "kernelspec": {
254
+ "display_name": "Python 3",
255
+ "language": "python",
256
+ "name": "python3"
257
+ },
258
+ "language_info": {
259
+ "codemirror_mode": {
260
+ "name": "ipython",
261
+ "version": 3
262
+ },
263
+ "file_extension": ".py",
264
+ "mimetype": "text/x-python",
265
+ "name": "python",
266
+ "nbconvert_exporter": "python",
267
+ "pygments_lexer": "ipython3",
268
+ "version": "3.7.10"
269
+ }
270
+ },
271
+ "nbformat": 4,
272
+ "nbformat_minor": 4
273
+ }
LICENSE ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2017, Jun-Yan Zhu and Taesung Park
2
+ All rights reserved.
3
+
4
+ Redistribution and use in source and binary forms, with or without
5
+ modification, are permitted provided that the following conditions are met:
6
+
7
+ * Redistributions of source code must retain the above copyright notice, this
8
+ list of conditions and the following disclaimer.
9
+
10
+ * Redistributions in binary form must reproduce the above copyright notice,
11
+ this list of conditions and the following disclaimer in the documentation
12
+ and/or other materials provided with the distribution.
13
+
14
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
18
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
20
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
21
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
22
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24
+
25
+
26
+ --------------------------- LICENSE FOR pix2pix --------------------------------
27
+ BSD License
28
+
29
+ For pix2pix software
30
+ Copyright (c) 2016, Phillip Isola and Jun-Yan Zhu
31
+ All rights reserved.
32
+
33
+ Redistribution and use in source and binary forms, with or without
34
+ modification, are permitted provided that the following conditions are met:
35
+
36
+ * Redistributions of source code must retain the above copyright notice, this
37
+ list of conditions and the following disclaimer.
38
+
39
+ * Redistributions in binary form must reproduce the above copyright notice,
40
+ this list of conditions and the following disclaimer in the documentation
41
+ and/or other materials provided with the distribution.
42
+
43
+ ----------------------------- LICENSE FOR DCGAN --------------------------------
44
+ BSD License
45
+
46
+ For dcgan.torch software
47
+
48
+ Copyright (c) 2015, Facebook, Inc. All rights reserved.
49
+
50
+ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
51
+
52
+ Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
53
+
54
+ Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
55
+
56
+ Neither the name Facebook nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
57
+
58
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Grayscale Coloritzation
3
- emoji: 📚
4
- colorFrom: gray
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 5.9.1
8
- app_file: app.py
9
- pinned: false
10
- short_description: 'colorization with pix2pix GAN '
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py CHANGED
@@ -1,132 +1,74 @@
1
- import gradio as gr
2
- import torch
3
- from torchvision import transforms
4
- from skimage import color
5
- from PIL import Image
6
- from models.networks import define_G
7
  import os
8
- import numpy as np
9
-
10
- def load_model(model_path):
11
- """
12
- Load the pre-trained pix2pix model.
13
- """
14
- # Define the generator model with matching training configuration
15
- model = define_G(
16
- input_nc=1, # Grayscale input (L channel)
17
- output_nc=2, # AB output (color channels)
18
- ngf=64, # Number of generator filters
19
- netG='unet_256', # U-Net architecture
20
- norm='batch', # Batch normalization
21
- init_gain=0.02, # Initialization gain
22
- init_type='normal', # Initialization type
23
- gpu_ids=[] # Use CPU
24
- )
25
-
26
- # Ensure the model checkpoint exists
27
- if not os.path.exists(model_path):
28
- raise FileNotFoundError(f"Model file not found at: {model_path}")
29
-
30
- # Load the state dictionary
31
- state_dict = torch.load(model_path, map_location="cpu")
32
- try:
33
- model.load_state_dict(state_dict)
34
- except RuntimeError as e:
35
- print(f"Error loading state_dict: {e}")
36
- raise
37
-
38
- model.eval() # Set the model to evaluation mode
39
- return model
40
-
41
- def lab_to_rgb(L, AB, debug_dir):
42
- """
43
- Convert the L and AB channels from LAB to RGB.
44
- """
45
- # Scale LAB channels to proper ranges
46
- AB2 = AB * 110.0 # Scale to [-128, 127]
47
- L2 = (L + 1.0) * 50.0 # Scale to [0, 100]
48
-
49
- # Combine L and AB channels
50
- Lab = torch.cat([L2, AB2], dim=1).squeeze(0).permute(1, 2, 0).cpu().numpy()
51
-
52
- # Debug: Save LAB visualization
53
- Lab_debug = Lab.copy()
54
- Lab_debug[:, :, 0] = (Lab_debug[:, :, 0] / 100 * 255).astype("uint8") # Normalize for visualization
55
- Lab_debug[:, :, 1:] = (Lab_debug[:, :, 1:] + 128).astype("uint8") # Offset for visualization
56
- lab_debug_path = os.path.join(debug_dir, "lab_image_debug.png")
57
- Image.fromarray(Lab_debug.astype("uint8")).save(lab_debug_path)
58
-
59
- # Ensure values are clipped to valid LAB ranges
60
- Lab[:, :, 0] = np.clip(Lab[:, :, 0], 0, 100)
61
- Lab[:, :, 1:] = np.clip(Lab[:, :, 1:], -128, 127)
62
-
63
- # Convert LAB to RGB
64
- rgb = color.lab2rgb(Lab) * 255 # Convert and scale to [0, 255]
65
- rgb_image = rgb.astype("uint8")
66
-
67
- return rgb_image
68
-
69
- def colorize_image(input_image):
70
- """
71
- Colorize the input black-and-white image.
72
- """
73
- # Create a debug directory for saving intermediate results
74
- debug_dir = "debug_images"
75
- os.makedirs(debug_dir, exist_ok=True)
76
-
77
- # Save original input image
78
- input_image.save(os.path.join(debug_dir, "input_original.png"))
79
-
80
- # Ensure the image is grayscale
81
- if input_image.mode != "L":
82
- input_image = input_image.convert("L")
83
- input_image.save(os.path.join(debug_dir, "input_grayscale.png"))
84
-
85
- # Preprocess the image
86
- transform = transforms.Compose([
87
- transforms.Resize((256, 256)), # Resize to 256x256
88
- transforms.ToTensor(), # Convert to tensor
89
- transforms.Normalize(mean=[0.5], std=[0.5]) # Normalize L channel to [-1, 1]
90
- ])
91
- input_tensor = transform(input_image).unsqueeze(0)
92
-
93
- # Save preprocessed input tensor for debugging
94
- preprocessed_input_path = os.path.join(debug_dir, "preprocessed_input.pt")
95
- torch.save(input_tensor, preprocessed_input_path)
96
-
97
- # Load the pre-trained model
98
- model_path = "./latest_net_G.pth" # Adjust to your model's path
99
- model = load_model(model_path)
100
-
101
- # Pass through the model
102
- with torch.no_grad():
103
- output_tensor = model(input_tensor)
104
-
105
- # Save raw output tensor for debugging
106
- raw_output_path = os.path.join(debug_dir, "raw_output.pt")
107
- torch.save(output_tensor, raw_output_path)
108
 
109
- # Post-process the output tensor
110
- L = input_tensor[:, 0:1, :, :] # Extract L channel from input
111
- AB = output_tensor # AB channels from model output
112
 
113
- # Convert LAB to RGB
114
- rgb_image = lab_to_rgb(L, AB, debug_dir)
115
 
116
- # Save post-processed output image
117
- postprocessed_output_path = os.path.join(debug_dir, "postprocessed_output.png")
118
- Image.fromarray(rgb_image).save(postprocessed_output_path)
 
119
 
120
- return Image.fromarray(rgb_image)
 
 
121
 
122
- # Define Gradio interface
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
  iface = gr.Interface(
124
- fn=colorize_image,
125
  inputs=gr.Image(type="pil"),
126
  outputs=gr.Image(type="pil"),
127
  live=True,
128
- title="Colorization",
129
- description="Colorize your black and white photos with pix2pix."
130
  )
131
 
132
- iface.launch()
 
 
 
1
+ import subprocess
 
 
 
 
 
2
  import os
3
+ from PIL import Image
4
+ import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
+ def save_image(image, path):
7
+ image.save(path)
 
8
 
9
+ def load_image(path):
10
+ return Image.open(path)
11
 
12
+ def process_image(input_image):
13
+ input_path = "./datasets/data/test/input_image.png"
14
+ output_dir = "./results/demo/color_pix2pix/test_latest"
15
+ output_image_path = os.path.join(output_dir, "images", "fake_B.png")
16
 
17
+ # Save the input image
18
+ save_image(input_image, input_path)
19
+
20
 
21
+ cmd = [
22
+ "python", "test.py", # Command to run the test script
23
+ "--dataroot", "./datasets/data", # Adjust path as needed
24
+ "--name", "color_pix2pix", # Model name (set according to your setup)
25
+ "--model", "colorization", # Model type (colorization)
26
+ "--dataset_mode", "colorization", # Dataset mode
27
+ "--num_test", "1", # Number of tests
28
+ "--results_dir", "./results/demo",
29
+ "--gpu_ids", "-1" # Use CPU
30
+ ]
31
+
32
+ try:
33
+ # Execute the command to process the image
34
+ subprocess.run(cmd, check=True)
35
+ except subprocess.CalledProcessError as e:
36
+ print(f"Error while running command: {e}")
37
+ return None
38
+
39
+ # Check if the output directory exists
40
+ if not os.path.exists(output_dir):
41
+ print(f"Error: Output directory {output_dir} does not exist.")
42
+ return None
43
+
44
+ # After processing, load the output image from the results directory
45
+ output_files = [f for f in os.listdir(os.path.join(output_dir, "images")) if f.endswith('fake_B_rgb.png')]
46
+ print(output_files)
47
+ if not output_files:
48
+ print(f"Error: No output files found in {os.path.join(output_dir, 'images')}.")
49
+ return None
50
+
51
+ print(output_files)
52
+
53
+ newest_file = max(output_files, key=lambda f: os.path.getctime(os.path.join(output_dir, "images", f)))
54
+ output_image_path = os.path.join(output_dir, "images", newest_file)
55
+ if os.path.exists(output_image_path):
56
+ print(f"Output image saved at {output_image_path}")
57
+ return load_image(output_image_path)
58
+ else:
59
+ print(f"Error: Output image {output_image_path} not found.")
60
+ return None
61
+
62
+ # Define the Gradio interface
63
  iface = gr.Interface(
64
+ fn=process_image,
65
  inputs=gr.Image(type="pil"),
66
  outputs=gr.Image(type="pil"),
67
  live=True,
68
+ title="Pix2Pix Colorization",
69
+ description="Upload an image, which will be processed using Pix2Pix model and the output will be displayed."
70
  )
71
 
72
+
73
+ # Launch the app
74
+ iface.launch()
latest_net_G.pth → checkpoints/color_pix2pix/latest_net_G.pth RENAMED
File without changes
checkpoints/color_pix2pix/test_opt.txt ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ----------------- Options ---------------
2
+ aspect_ratio: 1.0
3
+ batch_size: 1
4
+ checkpoints_dir: ./checkpoints
5
+ crop_size: 256
6
+ dataroot: ./datasets/data [default: None]
7
+ dataset_mode: colorization
8
+ direction: AtoB
9
+ display_winsize: 256
10
+ epoch: latest
11
+ eval: False
12
+ gpu_ids: -1 [default: 0]
13
+ init_gain: 0.02
14
+ init_type: normal
15
+ input_nc: 1
16
+ isTrain: False [default: None]
17
+ load_iter: 0 [default: 0]
18
+ load_size: 256
19
+ max_dataset_size: inf
20
+ model: colorization [default: test]
21
+ n_layers_D: 3
22
+ name: color_pix2pix [default: experiment_name]
23
+ ndf: 64
24
+ netD: basic
25
+ netG: unet_256
26
+ ngf: 64
27
+ no_dropout: False
28
+ no_flip: False
29
+ norm: batch
30
+ num_test: 1 [default: 50]
31
+ num_threads: 4
32
+ output_nc: 2
33
+ phase: test
34
+ preprocess: resize_and_crop
35
+ results_dir: ./results/demo [default: ./results/]
36
+ serial_batches: False
37
+ suffix:
38
+ use_wandb: False
39
+ verbose: False
40
+ wandb_project_name: CycleGAN-and-pix2pix
41
+ ----------------- End -------------------
data/__init__.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """This package includes all the modules related to data loading and preprocessing
2
+
3
+ To add a custom dataset class called 'dummy', you need to add a file called 'dummy_dataset.py' and define a subclass 'DummyDataset' inherited from BaseDataset.
4
+ You need to implement four functions:
5
+ -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
6
+ -- <__len__>: return the size of dataset.
7
+ -- <__getitem__>: get a data point from data loader.
8
+ -- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.
9
+
10
+ Now you can use the dataset class by specifying flag '--dataset_mode dummy'.
11
+ See our template dataset class 'template_dataset.py' for more details.
12
+ """
13
+ import importlib
14
+ import torch.utils.data
15
+ from data.base_dataset import BaseDataset
16
+
17
+
18
+ def find_dataset_using_name(dataset_name):
19
+ """Import the module "data/[dataset_name]_dataset.py".
20
+
21
+ In the file, the class called DatasetNameDataset() will
22
+ be instantiated. It has to be a subclass of BaseDataset,
23
+ and it is case-insensitive.
24
+ """
25
+ dataset_filename = "data." + dataset_name + "_dataset"
26
+ datasetlib = importlib.import_module(dataset_filename)
27
+
28
+ dataset = None
29
+ target_dataset_name = dataset_name.replace('_', '') + 'dataset'
30
+ for name, cls in datasetlib.__dict__.items():
31
+ if name.lower() == target_dataset_name.lower() \
32
+ and issubclass(cls, BaseDataset):
33
+ dataset = cls
34
+
35
+ if dataset is None:
36
+ raise NotImplementedError("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name))
37
+
38
+ return dataset
39
+
40
+
41
+ def get_option_setter(dataset_name):
42
+ """Return the static method <modify_commandline_options> of the dataset class."""
43
+ dataset_class = find_dataset_using_name(dataset_name)
44
+ return dataset_class.modify_commandline_options
45
+
46
+
47
+ def create_dataset(opt):
48
+ """Create a dataset given the option.
49
+
50
+ This function wraps the class CustomDatasetDataLoader.
51
+ This is the main interface between this package and 'train.py'/'test.py'
52
+
53
+ Example:
54
+ >>> from data import create_dataset
55
+ >>> dataset = create_dataset(opt)
56
+ """
57
+ data_loader = CustomDatasetDataLoader(opt)
58
+ dataset = data_loader.load_data()
59
+ return dataset
60
+
61
+
62
+ class CustomDatasetDataLoader():
63
+ """Wrapper class of Dataset class that performs multi-threaded data loading"""
64
+
65
+ def __init__(self, opt):
66
+ """Initialize this class
67
+
68
+ Step 1: create a dataset instance given the name [dataset_mode]
69
+ Step 2: create a multi-threaded data loader.
70
+ """
71
+ self.opt = opt
72
+ dataset_class = find_dataset_using_name(opt.dataset_mode)
73
+ self.dataset = dataset_class(opt)
74
+ print("dataset [%s] was created" % type(self.dataset).__name__)
75
+ self.dataloader = torch.utils.data.DataLoader(
76
+ self.dataset,
77
+ batch_size=opt.batch_size,
78
+ shuffle=not opt.serial_batches,
79
+ num_workers=int(opt.num_threads))
80
+
81
+ def load_data(self):
82
+ return self
83
+
84
+ def __len__(self):
85
+ """Return the number of data in the dataset"""
86
+ return min(len(self.dataset), self.opt.max_dataset_size)
87
+
88
+ def __iter__(self):
89
+ """Return a batch of data"""
90
+ for i, data in enumerate(self.dataloader):
91
+ if i * self.opt.batch_size >= self.opt.max_dataset_size:
92
+ break
93
+ yield data
data/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (5.45 kB). View file
 
data/__pycache__/base_dataset.cpython-311.pyc ADDED
Binary file (9.86 kB). View file
 
data/__pycache__/colorization_dataset.cpython-311.pyc ADDED
Binary file (4.58 kB). View file
 
data/__pycache__/image_folder.cpython-311.pyc ADDED
Binary file (3.93 kB). View file
 
data/aligned_dataset.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from data.base_dataset import BaseDataset, get_params, get_transform
3
+ from data.image_folder import make_dataset
4
+ from PIL import Image
5
+
6
+
7
+ class AlignedDataset(BaseDataset):
8
+ """A dataset class for paired image dataset.
9
+
10
+ It assumes that the directory '/path/to/data/train' contains image pairs in the form of {A,B}.
11
+ During test time, you need to prepare a directory '/path/to/data/test'.
12
+ """
13
+
14
+ def __init__(self, opt):
15
+ """Initialize this dataset class.
16
+
17
+ Parameters:
18
+ opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
19
+ """
20
+ BaseDataset.__init__(self, opt)
21
+ self.dir_AB = os.path.join(opt.dataroot, opt.phase) # get the image directory
22
+ self.AB_paths = sorted(make_dataset(self.dir_AB, opt.max_dataset_size)) # get image paths
23
+ assert(self.opt.load_size >= self.opt.crop_size) # crop_size should be smaller than the size of loaded image
24
+ self.input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc
25
+ self.output_nc = self.opt.input_nc if self.opt.direction == 'BtoA' else self.opt.output_nc
26
+
27
+ def __getitem__(self, index):
28
+ """Return a data point and its metadata information.
29
+
30
+ Parameters:
31
+ index - - a random integer for data indexing
32
+
33
+ Returns a dictionary that contains A, B, A_paths and B_paths
34
+ A (tensor) - - an image in the input domain
35
+ B (tensor) - - its corresponding image in the target domain
36
+ A_paths (str) - - image paths
37
+ B_paths (str) - - image paths (same as A_paths)
38
+ """
39
+ # read a image given a random integer index
40
+ AB_path = self.AB_paths[index]
41
+ AB = Image.open(AB_path).convert('RGB')
42
+ # split AB image into A and B
43
+ w, h = AB.size
44
+ w2 = int(w / 2)
45
+ A = AB.crop((0, 0, w2, h))
46
+ B = AB.crop((w2, 0, w, h))
47
+
48
+ # apply the same transform to both A and B
49
+ transform_params = get_params(self.opt, A.size)
50
+ A_transform = get_transform(self.opt, transform_params, grayscale=(self.input_nc == 1))
51
+ B_transform = get_transform(self.opt, transform_params, grayscale=(self.output_nc == 1))
52
+
53
+ A = A_transform(A)
54
+ B = B_transform(B)
55
+
56
+ return {'A': A, 'B': B, 'A_paths': AB_path, 'B_paths': AB_path}
57
+
58
+ def __len__(self):
59
+ """Return the total number of images in the dataset."""
60
+ return len(self.AB_paths)
data/base_dataset.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """This module implements an abstract base class (ABC) 'BaseDataset' for datasets.
2
+
3
+ It also includes common transformation functions (e.g., get_transform, __scale_width), which can be later used in subclasses.
4
+ """
5
+ import random
6
+ import numpy as np
7
+ import torch.utils.data as data
8
+ from PIL import Image
9
+ import torchvision.transforms as transforms
10
+ from abc import ABC, abstractmethod
11
+
12
+
13
+ class BaseDataset(data.Dataset, ABC):
14
+ """This class is an abstract base class (ABC) for datasets.
15
+
16
+ To create a subclass, you need to implement the following four functions:
17
+ -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
18
+ -- <__len__>: return the size of dataset.
19
+ -- <__getitem__>: get a data point.
20
+ -- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.
21
+ """
22
+
23
+ def __init__(self, opt):
24
+ """Initialize the class; save the options in the class
25
+
26
+ Parameters:
27
+ opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
28
+ """
29
+ self.opt = opt
30
+ self.root = opt.dataroot
31
+
32
+ @staticmethod
33
+ def modify_commandline_options(parser, is_train):
34
+ """Add new dataset-specific options, and rewrite default values for existing options.
35
+
36
+ Parameters:
37
+ parser -- original option parser
38
+ is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
39
+
40
+ Returns:
41
+ the modified parser.
42
+ """
43
+ return parser
44
+
45
+ @abstractmethod
46
+ def __len__(self):
47
+ """Return the total number of images in the dataset."""
48
+ return 0
49
+
50
+ @abstractmethod
51
+ def __getitem__(self, index):
52
+ """Return a data point and its metadata information.
53
+
54
+ Parameters:
55
+ index - - a random integer for data indexing
56
+
57
+ Returns:
58
+ a dictionary of data with their names. It ususally contains the data itself and its metadata information.
59
+ """
60
+ pass
61
+
62
+
63
+ def get_params(opt, size):
64
+ w, h = size
65
+ new_h = h
66
+ new_w = w
67
+ if opt.preprocess == 'resize_and_crop':
68
+ new_h = new_w = opt.load_size
69
+ elif opt.preprocess == 'scale_width_and_crop':
70
+ new_w = opt.load_size
71
+ new_h = opt.load_size * h // w
72
+
73
+ x = random.randint(0, np.maximum(0, new_w - opt.crop_size))
74
+ y = random.randint(0, np.maximum(0, new_h - opt.crop_size))
75
+
76
+ flip = random.random() > 0.5
77
+
78
+ return {'crop_pos': (x, y), 'flip': flip}
79
+
80
+
81
+ def get_transform(opt, params=None, grayscale=False, method=transforms.InterpolationMode.BICUBIC, convert=True):
82
+ transform_list = []
83
+ if grayscale:
84
+ transform_list.append(transforms.Grayscale(1))
85
+ if 'resize' in opt.preprocess:
86
+ osize = [opt.load_size, opt.load_size]
87
+ transform_list.append(transforms.Resize(osize, method))
88
+ elif 'scale_width' in opt.preprocess:
89
+ transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, opt.crop_size, method)))
90
+
91
+ if 'crop' in opt.preprocess:
92
+ if params is None:
93
+ transform_list.append(transforms.RandomCrop(opt.crop_size))
94
+ else:
95
+ transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size)))
96
+
97
+ if opt.preprocess == 'none':
98
+ transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method)))
99
+
100
+ if not opt.no_flip:
101
+ if params is None:
102
+ transform_list.append(transforms.RandomHorizontalFlip())
103
+ elif params['flip']:
104
+ transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip'])))
105
+
106
+ if convert:
107
+ transform_list += [transforms.ToTensor()]
108
+ if grayscale:
109
+ transform_list += [transforms.Normalize((0.5,), (0.5,))]
110
+ else:
111
+ transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
112
+ return transforms.Compose(transform_list)
113
+
114
+
115
+ def __transforms2pil_resize(method):
116
+ mapper = {transforms.InterpolationMode.BILINEAR: Image.BILINEAR,
117
+ transforms.InterpolationMode.BICUBIC: Image.BICUBIC,
118
+ transforms.InterpolationMode.NEAREST: Image.NEAREST,
119
+ transforms.InterpolationMode.LANCZOS: Image.LANCZOS,}
120
+ return mapper[method]
121
+
122
+
123
+ def __make_power_2(img, base, method=transforms.InterpolationMode.BICUBIC):
124
+ method = __transforms2pil_resize(method)
125
+ ow, oh = img.size
126
+ h = int(round(oh / base) * base)
127
+ w = int(round(ow / base) * base)
128
+ if h == oh and w == ow:
129
+ return img
130
+
131
+ __print_size_warning(ow, oh, w, h)
132
+ return img.resize((w, h), method)
133
+
134
+
135
+ def __scale_width(img, target_size, crop_size, method=transforms.InterpolationMode.BICUBIC):
136
+ method = __transforms2pil_resize(method)
137
+ ow, oh = img.size
138
+ if ow == target_size and oh >= crop_size:
139
+ return img
140
+ w = target_size
141
+ h = int(max(target_size * oh / ow, crop_size))
142
+ return img.resize((w, h), method)
143
+
144
+
145
+ def __crop(img, pos, size):
146
+ ow, oh = img.size
147
+ x1, y1 = pos
148
+ tw = th = size
149
+ if (ow > tw or oh > th):
150
+ return img.crop((x1, y1, x1 + tw, y1 + th))
151
+ return img
152
+
153
+
154
+ def __flip(img, flip):
155
+ if flip:
156
+ return img.transpose(Image.FLIP_LEFT_RIGHT)
157
+ return img
158
+
159
+
160
+ def __print_size_warning(ow, oh, w, h):
161
+ """Print warning information about image size(only print once)"""
162
+ if not hasattr(__print_size_warning, 'has_printed'):
163
+ print("The image size needs to be a multiple of 4. "
164
+ "The loaded image size was (%d, %d), so it was adjusted to "
165
+ "(%d, %d). This adjustment will be done to all images "
166
+ "whose sizes are not multiples of 4" % (ow, oh, w, h))
167
+ __print_size_warning.has_printed = True
data/colorization_dataset.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from data.base_dataset import BaseDataset, get_transform
3
+ from data.image_folder import make_dataset
4
+ from skimage import color # require skimage
5
+ from PIL import Image
6
+ import numpy as np
7
+ import torchvision.transforms as transforms
8
+
9
+
10
+ class ColorizationDataset(BaseDataset):
11
+ """This dataset class can load a set of natural images in RGB, and convert RGB format into (L, ab) pairs in Lab color space.
12
+
13
+ This dataset is required by pix2pix-based colorization model ('--model colorization')
14
+ """
15
+ @staticmethod
16
+ def modify_commandline_options(parser, is_train):
17
+ """Add new dataset-specific options, and rewrite default values for existing options.
18
+
19
+ Parameters:
20
+ parser -- original option parser
21
+ is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
22
+
23
+ Returns:
24
+ the modified parser.
25
+
26
+ By default, the number of channels for input image is 1 (L) and
27
+ the number of channels for output image is 2 (ab). The direction is from A to B
28
+ """
29
+ parser.set_defaults(input_nc=1, output_nc=2, direction='AtoB')
30
+ return parser
31
+
32
+ def __init__(self, opt):
33
+ """Initialize this dataset class.
34
+
35
+ Parameters:
36
+ opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
37
+ """
38
+ BaseDataset.__init__(self, opt)
39
+ self.dir = os.path.join(opt.dataroot, opt.phase)
40
+ self.AB_paths = sorted(make_dataset(self.dir, opt.max_dataset_size))
41
+ assert(opt.input_nc == 1 and opt.output_nc == 2 and opt.direction == 'AtoB')
42
+ self.transform = get_transform(self.opt, convert=False)
43
+
44
+ def __getitem__(self, index):
45
+ """Return a data point and its metadata information.
46
+
47
+ Parameters:
48
+ index - - a random integer for data indexing
49
+
50
+ Returns a dictionary that contains A, B, A_paths and B_paths
51
+ A (tensor) - - the L channel of an image
52
+ B (tensor) - - the ab channels of the same image
53
+ A_paths (str) - - image paths
54
+ B_paths (str) - - image paths (same as A_paths)
55
+ """
56
+ path = self.AB_paths[index]
57
+ im = Image.open(path).convert('RGB')
58
+ im = self.transform(im)
59
+ im = np.array(im)
60
+ lab = color.rgb2lab(im).astype(np.float32)
61
+ lab_t = transforms.ToTensor()(lab)
62
+ A = lab_t[[0], ...] / 50.0 - 1.0
63
+ B = lab_t[[1, 2], ...] / 110.0
64
+ return {'A': A, 'B': B, 'A_paths': path, 'B_paths': path}
65
+
66
+ def __len__(self):
67
+ """Return the total number of images in the dataset."""
68
+ return len(self.AB_paths)
data/image_folder.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """A modified image folder class
2
+
3
+ We modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py)
4
+ so that this class can load images from both current directory and its subdirectories.
5
+ """
6
+
7
+ import torch.utils.data as data
8
+
9
+ from PIL import Image
10
+ import os
11
+
12
+ IMG_EXTENSIONS = [
13
+ '.jpg', '.JPG', '.jpeg', '.JPEG',
14
+ '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
15
+ '.tif', '.TIF', '.tiff', '.TIFF',
16
+ ]
17
+
18
+
19
+ def is_image_file(filename):
20
+ return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
21
+
22
+
23
+ def make_dataset(dir, max_dataset_size=float("inf")):
24
+ images = []
25
+ assert os.path.isdir(dir), '%s is not a valid directory' % dir
26
+
27
+ for root, _, fnames in sorted(os.walk(dir)):
28
+ for fname in fnames:
29
+ if is_image_file(fname):
30
+ path = os.path.join(root, fname)
31
+ images.append(path)
32
+ return images[:min(max_dataset_size, len(images))]
33
+
34
+
35
+ def default_loader(path):
36
+ return Image.open(path).convert('RGB')
37
+
38
+
39
+ class ImageFolder(data.Dataset):
40
+
41
+ def __init__(self, root, transform=None, return_paths=False,
42
+ loader=default_loader):
43
+ imgs = make_dataset(root)
44
+ if len(imgs) == 0:
45
+ raise(RuntimeError("Found 0 images in: " + root + "\n"
46
+ "Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
47
+
48
+ self.root = root
49
+ self.imgs = imgs
50
+ self.transform = transform
51
+ self.return_paths = return_paths
52
+ self.loader = loader
53
+
54
+ def __getitem__(self, index):
55
+ path = self.imgs[index]
56
+ img = self.loader(path)
57
+ if self.transform is not None:
58
+ img = self.transform(img)
59
+ if self.return_paths:
60
+ return img, path
61
+ else:
62
+ return img
63
+
64
+ def __len__(self):
65
+ return len(self.imgs)
data/single_dataset.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from data.base_dataset import BaseDataset, get_transform
2
+ from data.image_folder import make_dataset
3
+ from PIL import Image
4
+
5
+
6
+ class SingleDataset(BaseDataset):
7
+ """This dataset class can load a set of images specified by the path --dataroot /path/to/data.
8
+
9
+ It can be used for generating CycleGAN results only for one side with the model option '-model test'.
10
+ """
11
+
12
+ def __init__(self, opt):
13
+ """Initialize this dataset class.
14
+
15
+ Parameters:
16
+ opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
17
+ """
18
+ BaseDataset.__init__(self, opt)
19
+ self.A_paths = sorted(make_dataset(opt.dataroot, opt.max_dataset_size))
20
+ input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc
21
+ self.transform = get_transform(opt, grayscale=(input_nc == 1))
22
+
23
+ def __getitem__(self, index):
24
+ """Return a data point and its metadata information.
25
+
26
+ Parameters:
27
+ index - - a random integer for data indexing
28
+
29
+ Returns a dictionary that contains A and A_paths
30
+ A(tensor) - - an image in one domain
31
+ A_paths(str) - - the path of the image
32
+ """
33
+ A_path = self.A_paths[index]
34
+ A_img = Image.open(A_path).convert('RGB')
35
+ A = self.transform(A_img)
36
+ return {'A': A, 'A_paths': A_path}
37
+
38
+ def __len__(self):
39
+ """Return the total number of images in the dataset."""
40
+ return len(self.A_paths)
data/template_dataset.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Dataset class template
2
+
3
+ This module provides a template for users to implement custom datasets.
4
+ You can specify '--dataset_mode template' to use this dataset.
5
+ The class name should be consistent with both the filename and its dataset_mode option.
6
+ The filename should be <dataset_mode>_dataset.py
7
+ The class name should be <Dataset_mode>Dataset.py
8
+ You need to implement the following functions:
9
+ -- <modify_commandline_options>: Add dataset-specific options and rewrite default values for existing options.
10
+ -- <__init__>: Initialize this dataset class.
11
+ -- <__getitem__>: Return a data point and its metadata information.
12
+ -- <__len__>: Return the number of images.
13
+ """
14
+ from data.base_dataset import BaseDataset, get_transform
15
+ # from data.image_folder import make_dataset
16
+ # from PIL import Image
17
+
18
+
19
+ class TemplateDataset(BaseDataset):
20
+ """A template dataset class for you to implement custom datasets."""
21
+ @staticmethod
22
+ def modify_commandline_options(parser, is_train):
23
+ """Add new dataset-specific options, and rewrite default values for existing options.
24
+
25
+ Parameters:
26
+ parser -- original option parser
27
+ is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
28
+
29
+ Returns:
30
+ the modified parser.
31
+ """
32
+ parser.add_argument('--new_dataset_option', type=float, default=1.0, help='new dataset option')
33
+ parser.set_defaults(max_dataset_size=10, new_dataset_option=2.0) # specify dataset-specific default values
34
+ return parser
35
+
36
+ def __init__(self, opt):
37
+ """Initialize this dataset class.
38
+
39
+ Parameters:
40
+ opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
41
+
42
+ A few things can be done here.
43
+ - save the options (have been done in BaseDataset)
44
+ - get image paths and meta information of the dataset.
45
+ - define the image transformation.
46
+ """
47
+ # save the option and dataset root
48
+ BaseDataset.__init__(self, opt)
49
+ # get the image paths of your dataset;
50
+ self.image_paths = [] # You can call sorted(make_dataset(self.root, opt.max_dataset_size)) to get all the image paths under the directory self.root
51
+ # define the default transform function. You can use <base_dataset.get_transform>; You can also define your custom transform function
52
+ self.transform = get_transform(opt)
53
+
54
+ def __getitem__(self, index):
55
+ """Return a data point and its metadata information.
56
+
57
+ Parameters:
58
+ index -- a random integer for data indexing
59
+
60
+ Returns:
61
+ a dictionary of data with their names. It usually contains the data itself and its metadata information.
62
+
63
+ Step 1: get a random image path: e.g., path = self.image_paths[index]
64
+ Step 2: load your data from the disk: e.g., image = Image.open(path).convert('RGB').
65
+ Step 3: convert your data to a PyTorch tensor. You can use helpder functions such as self.transform. e.g., data = self.transform(image)
66
+ Step 4: return a data point as a dictionary.
67
+ """
68
+ path = 'temp' # needs to be a string
69
+ data_A = None # needs to be a tensor
70
+ data_B = None # needs to be a tensor
71
+ return {'data_A': data_A, 'data_B': data_B, 'path': path}
72
+
73
+ def __len__(self):
74
+ """Return the total number of images."""
75
+ return len(self.image_paths)
data/unaligned_dataset.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from data.base_dataset import BaseDataset, get_transform
3
+ from data.image_folder import make_dataset
4
+ from PIL import Image
5
+ import random
6
+
7
+
8
+ class UnalignedDataset(BaseDataset):
9
+ """
10
+ This dataset class can load unaligned/unpaired datasets.
11
+
12
+ It requires two directories to host training images from domain A '/path/to/data/trainA'
13
+ and from domain B '/path/to/data/trainB' respectively.
14
+ You can train the model with the dataset flag '--dataroot /path/to/data'.
15
+ Similarly, you need to prepare two directories:
16
+ '/path/to/data/testA' and '/path/to/data/testB' during test time.
17
+ """
18
+
19
+ def __init__(self, opt):
20
+ """Initialize this dataset class.
21
+
22
+ Parameters:
23
+ opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
24
+ """
25
+ BaseDataset.__init__(self, opt)
26
+ self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A') # create a path '/path/to/data/trainA'
27
+ self.dir_B = os.path.join(opt.dataroot, opt.phase + 'B') # create a path '/path/to/data/trainB'
28
+
29
+ self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA'
30
+ self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB'
31
+ self.A_size = len(self.A_paths) # get the size of dataset A
32
+ self.B_size = len(self.B_paths) # get the size of dataset B
33
+ btoA = self.opt.direction == 'BtoA'
34
+ input_nc = self.opt.output_nc if btoA else self.opt.input_nc # get the number of channels of input image
35
+ output_nc = self.opt.input_nc if btoA else self.opt.output_nc # get the number of channels of output image
36
+ self.transform_A = get_transform(self.opt, grayscale=(input_nc == 1))
37
+ self.transform_B = get_transform(self.opt, grayscale=(output_nc == 1))
38
+
39
+ def __getitem__(self, index):
40
+ """Return a data point and its metadata information.
41
+
42
+ Parameters:
43
+ index (int) -- a random integer for data indexing
44
+
45
+ Returns a dictionary that contains A, B, A_paths and B_paths
46
+ A (tensor) -- an image in the input domain
47
+ B (tensor) -- its corresponding image in the target domain
48
+ A_paths (str) -- image paths
49
+ B_paths (str) -- image paths
50
+ """
51
+ A_path = self.A_paths[index % self.A_size] # make sure index is within then range
52
+ if self.opt.serial_batches: # make sure index is within then range
53
+ index_B = index % self.B_size
54
+ else: # randomize the index for domain B to avoid fixed pairs.
55
+ index_B = random.randint(0, self.B_size - 1)
56
+ B_path = self.B_paths[index_B]
57
+ A_img = Image.open(A_path).convert('RGB')
58
+ B_img = Image.open(B_path).convert('RGB')
59
+ # apply image transformation
60
+ A = self.transform_A(A_img)
61
+ B = self.transform_B(B_img)
62
+
63
+ return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path}
64
+
65
+ def __len__(self):
66
+ """Return the total number of images in the dataset.
67
+
68
+ As we have two datasets with potentially different number of images,
69
+ we take a maximum of
70
+ """
71
+ return max(self.A_size, self.B_size)
datasets/bibtex/cityscapes.tex ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ @inproceedings{Cordts2016Cityscapes,
2
+ title={The Cityscapes Dataset for Semantic Urban Scene Understanding},
3
+ author={Cordts, Marius and Omran, Mohamed and Ramos, Sebastian and Rehfeld, Timo and Enzweiler, Markus and Benenson, Rodrigo and Franke, Uwe and Roth, Stefan and Schiele, Bernt},
4
+ booktitle={Proc. of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
5
+ year={2016}
6
+ }
datasets/bibtex/facades.tex ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ @INPROCEEDINGS{Tylecek13,
2
+ author = {Radim Tyle{\v c}ek, Radim {\v S}{\' a}ra},
3
+ title = {Spatial Pattern Templates for Recognition of Objects with Regular Structure},
4
+ booktitle = {Proc. GCPR},
5
+ year = {2013},
6
+ address = {Saarbrucken, Germany},
7
+ }
datasets/bibtex/handbags.tex ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @inproceedings{zhu2016generative,
2
+ title={Generative Visual Manipulation on the Natural Image Manifold},
3
+ author={Zhu, Jun-Yan and Kr{\"a}henb{\"u}hl, Philipp and Shechtman, Eli and Efros, Alexei A.},
4
+ booktitle={Proceedings of European Conference on Computer Vision (ECCV)},
5
+ year={2016}
6
+ }
7
+
8
+ @InProceedings{xie15hed,
9
+ author = {"Xie, Saining and Tu, Zhuowen"},
10
+ Title = {Holistically-Nested Edge Detection},
11
+ Booktitle = "Proceedings of IEEE International Conference on Computer Vision",
12
+ Year = {2015},
13
+ }
datasets/bibtex/shoes.tex ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @InProceedings{fine-grained,
2
+ author = {A. Yu and K. Grauman},
3
+ title = {{F}ine-{G}rained {V}isual {C}omparisons with {L}ocal {L}earning},
4
+ booktitle = {Computer Vision and Pattern Recognition (CVPR)},
5
+ month = {June},
6
+ year = {2014}
7
+ }
8
+
9
+ @InProceedings{xie15hed,
10
+ author = {"Xie, Saining and Tu, Zhuowen"},
11
+ Title = {Holistically-Nested Edge Detection},
12
+ Booktitle = "Proceedings of IEEE International Conference on Computer Vision",
13
+ Year = {2015},
14
+ }
datasets/bibtex/transattr.tex ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ @article {Laffont14,
2
+ title = {Transient Attributes for High-Level Understanding and Editing of Outdoor Scenes},
3
+ author = {Pierre-Yves Laffont and Zhile Ren and Xiaofeng Tao and Chao Qian and James Hays},
4
+ journal = {ACM Transactions on Graphics (proceedings of SIGGRAPH)},
5
+ volume = {33},
6
+ number = {4},
7
+ year = {2014}
8
+ }
datasets/combine_A_and_B.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ import cv2
4
+ import argparse
5
+ from multiprocessing import Pool
6
+
7
+
8
+ def image_write(path_A, path_B, path_AB):
9
+ im_A = cv2.imread(path_A, 1) # python2: cv2.CV_LOAD_IMAGE_COLOR; python3: cv2.IMREAD_COLOR
10
+ im_B = cv2.imread(path_B, 1) # python2: cv2.CV_LOAD_IMAGE_COLOR; python3: cv2.IMREAD_COLOR
11
+ im_AB = np.concatenate([im_A, im_B], 1)
12
+ cv2.imwrite(path_AB, im_AB)
13
+
14
+
15
+ parser = argparse.ArgumentParser('create image pairs')
16
+ parser.add_argument('--fold_A', dest='fold_A', help='input directory for image A', type=str, default='../dataset/50kshoes_edges')
17
+ parser.add_argument('--fold_B', dest='fold_B', help='input directory for image B', type=str, default='../dataset/50kshoes_jpg')
18
+ parser.add_argument('--fold_AB', dest='fold_AB', help='output directory', type=str, default='../dataset/test_AB')
19
+ parser.add_argument('--num_imgs', dest='num_imgs', help='number of images', type=int, default=1000000)
20
+ parser.add_argument('--use_AB', dest='use_AB', help='if true: (0001_A, 0001_B) to (0001_AB)', action='store_true')
21
+ parser.add_argument('--no_multiprocessing', dest='no_multiprocessing', help='If used, chooses single CPU execution instead of parallel execution', action='store_true',default=False)
22
+ args = parser.parse_args()
23
+
24
+ for arg in vars(args):
25
+ print('[%s] = ' % arg, getattr(args, arg))
26
+
27
+ splits = os.listdir(args.fold_A)
28
+
29
+ if not args.no_multiprocessing:
30
+ pool=Pool()
31
+
32
+ for sp in splits:
33
+ img_fold_A = os.path.join(args.fold_A, sp)
34
+ img_fold_B = os.path.join(args.fold_B, sp)
35
+ img_list = os.listdir(img_fold_A)
36
+ if args.use_AB:
37
+ img_list = [img_path for img_path in img_list if '_A.' in img_path]
38
+
39
+ num_imgs = min(args.num_imgs, len(img_list))
40
+ print('split = %s, use %d/%d images' % (sp, num_imgs, len(img_list)))
41
+ img_fold_AB = os.path.join(args.fold_AB, sp)
42
+ if not os.path.isdir(img_fold_AB):
43
+ os.makedirs(img_fold_AB)
44
+ print('split = %s, number of images = %d' % (sp, num_imgs))
45
+ for n in range(num_imgs):
46
+ name_A = img_list[n]
47
+ path_A = os.path.join(img_fold_A, name_A)
48
+ if args.use_AB:
49
+ name_B = name_A.replace('_A.', '_B.')
50
+ else:
51
+ name_B = name_A
52
+ path_B = os.path.join(img_fold_B, name_B)
53
+ if os.path.isfile(path_A) and os.path.isfile(path_B):
54
+ name_AB = name_A
55
+ if args.use_AB:
56
+ name_AB = name_AB.replace('_A.', '.') # remove _A
57
+ path_AB = os.path.join(img_fold_AB, name_AB)
58
+ if not args.no_multiprocessing:
59
+ pool.apply_async(image_write, args=(path_A, path_B, path_AB))
60
+ else:
61
+ im_A = cv2.imread(path_A, 1) # python2: cv2.CV_LOAD_IMAGE_COLOR; python3: cv2.IMREAD_COLOR
62
+ im_B = cv2.imread(path_B, 1) # python2: cv2.CV_LOAD_IMAGE_COLOR; python3: cv2.IMREAD_COLOR
63
+ im_AB = np.concatenate([im_A, im_B], 1)
64
+ cv2.imwrite(path_AB, im_AB)
65
+ if not args.no_multiprocessing:
66
+ pool.close()
67
+ pool.join()
datasets/data/test/input_image.png ADDED
datasets/data/test/test_demo_1.png ADDED
datasets/download_cyclegan_dataset.sh ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FILE=$1
2
+
3
+ if [[ $FILE != "ae_photos" && $FILE != "apple2orange" && $FILE != "summer2winter_yosemite" && $FILE != "horse2zebra" && $FILE != "monet2photo" && $FILE != "cezanne2photo" && $FILE != "ukiyoe2photo" && $FILE != "vangogh2photo" && $FILE != "maps" && $FILE != "cityscapes" && $FILE != "facades" && $FILE != "iphone2dslr_flower" && $FILE != "mini" && $FILE != "mini_pix2pix" && $FILE != "mini_colorization" ]]; then
4
+ echo "Available datasets are: apple2orange, summer2winter_yosemite, horse2zebra, monet2photo, cezanne2photo, ukiyoe2photo, vangogh2photo, maps, cityscapes, facades, iphone2dslr_flower, ae_photos"
5
+ exit 1
6
+ fi
7
+
8
+ if [[ $FILE == "cityscapes" ]]; then
9
+ echo "Due to license issue, we cannot provide the Cityscapes dataset from our repository. Please download the Cityscapes dataset from https://cityscapes-dataset.com, and use the script ./datasets/prepare_cityscapes_dataset.py."
10
+ echo "You need to download gtFine_trainvaltest.zip and leftImg8bit_trainvaltest.zip. For further instruction, please read ./datasets/prepare_cityscapes_dataset.py"
11
+ exit 1
12
+ fi
13
+
14
+ echo "Specified [$FILE]"
15
+ URL=http://efrosgans.eecs.berkeley.edu/cyclegan/datasets/$FILE.zip
16
+ ZIP_FILE=./datasets/$FILE.zip
17
+ TARGET_DIR=./datasets/$FILE/
18
+ wget -N $URL -O $ZIP_FILE
19
+ mkdir $TARGET_DIR
20
+ unzip $ZIP_FILE -d ./datasets/
21
+ rm $ZIP_FILE
datasets/download_pix2pix_dataset.sh ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FILE=$1
2
+
3
+ if [[ $FILE != "cityscapes" && $FILE != "night2day" && $FILE != "edges2handbags" && $FILE != "edges2shoes" && $FILE != "facades" && $FILE != "maps" ]]; then
4
+ echo "Available datasets are cityscapes, night2day, edges2handbags, edges2shoes, facades, maps"
5
+ exit 1
6
+ fi
7
+
8
+ if [[ $FILE == "cityscapes" ]]; then
9
+ echo "Due to license issue, we cannot provide the Cityscapes dataset from our repository. Please download the Cityscapes dataset from https://cityscapes-dataset.com, and use the script ./datasets/prepare_cityscapes_dataset.py."
10
+ echo "You need to download gtFine_trainvaltest.zip and leftImg8bit_trainvaltest.zip. For further instruction, please read ./datasets/prepare_cityscapes_dataset.py"
11
+ exit 1
12
+ fi
13
+
14
+ echo "Specified [$FILE]"
15
+
16
+ URL=http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/$FILE.tar.gz
17
+ TAR_FILE=./datasets/$FILE.tar.gz
18
+ TARGET_DIR=./datasets/$FILE/
19
+ wget -N $URL -O $TAR_FILE
20
+ mkdir -p $TARGET_DIR
21
+ tar -zxvf $TAR_FILE -C ./datasets/
22
+ rm $TAR_FILE
datasets/make_dataset_aligned.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from PIL import Image
4
+
5
+
6
+ def get_file_paths(folder):
7
+ image_file_paths = []
8
+ for root, dirs, filenames in os.walk(folder):
9
+ filenames = sorted(filenames)
10
+ for filename in filenames:
11
+ input_path = os.path.abspath(root)
12
+ file_path = os.path.join(input_path, filename)
13
+ if filename.endswith('.png') or filename.endswith('.jpg'):
14
+ image_file_paths.append(file_path)
15
+
16
+ break # prevent descending into subfolders
17
+ return image_file_paths
18
+
19
+
20
+ def align_images(a_file_paths, b_file_paths, target_path):
21
+ if not os.path.exists(target_path):
22
+ os.makedirs(target_path)
23
+
24
+ for i in range(len(a_file_paths)):
25
+ img_a = Image.open(a_file_paths[i])
26
+ img_b = Image.open(b_file_paths[i])
27
+ assert(img_a.size == img_b.size)
28
+
29
+ aligned_image = Image.new("RGB", (img_a.size[0] * 2, img_a.size[1]))
30
+ aligned_image.paste(img_a, (0, 0))
31
+ aligned_image.paste(img_b, (img_a.size[0], 0))
32
+ aligned_image.save(os.path.join(target_path, '{:04d}.jpg'.format(i)))
33
+
34
+
35
+ if __name__ == '__main__':
36
+ import argparse
37
+ parser = argparse.ArgumentParser()
38
+ parser.add_argument(
39
+ '--dataset-path',
40
+ dest='dataset_path',
41
+ help='Which folder to process (it should have subfolders testA, testB, trainA and trainB'
42
+ )
43
+ args = parser.parse_args()
44
+
45
+ dataset_folder = args.dataset_path
46
+ print(dataset_folder)
47
+
48
+ test_a_path = os.path.join(dataset_folder, 'testA')
49
+ test_b_path = os.path.join(dataset_folder, 'testB')
50
+ test_a_file_paths = get_file_paths(test_a_path)
51
+ test_b_file_paths = get_file_paths(test_b_path)
52
+ assert(len(test_a_file_paths) == len(test_b_file_paths))
53
+ test_path = os.path.join(dataset_folder, 'test')
54
+
55
+ train_a_path = os.path.join(dataset_folder, 'trainA')
56
+ train_b_path = os.path.join(dataset_folder, 'trainB')
57
+ train_a_file_paths = get_file_paths(train_a_path)
58
+ train_b_file_paths = get_file_paths(train_b_path)
59
+ assert(len(train_a_file_paths) == len(train_b_file_paths))
60
+ train_path = os.path.join(dataset_folder, 'train')
61
+
62
+ align_images(test_a_file_paths, test_b_file_paths, test_path)
63
+ align_images(train_a_file_paths, train_b_file_paths, train_path)
datasets/prepare_cityscapes_dataset.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import glob
3
+ from PIL import Image
4
+
5
+ help_msg = """
6
+ The dataset can be downloaded from https://cityscapes-dataset.com.
7
+ Please download the datasets [gtFine_trainvaltest.zip] and [leftImg8bit_trainvaltest.zip] and unzip them.
8
+ gtFine contains the semantics segmentations. Use --gtFine_dir to specify the path to the unzipped gtFine_trainvaltest directory.
9
+ leftImg8bit contains the dashcam photographs. Use --leftImg8bit_dir to specify the path to the unzipped leftImg8bit_trainvaltest directory.
10
+ The processed images will be placed at --output_dir.
11
+
12
+ Example usage:
13
+
14
+ python prepare_cityscapes_dataset.py --gtFine_dir ./gtFine/ --leftImg8bit_dir ./leftImg8bit --output_dir ./datasets/cityscapes/
15
+ """
16
+
17
+ def load_resized_img(path):
18
+ return Image.open(path).convert('RGB').resize((256, 256))
19
+
20
+ def check_matching_pair(segmap_path, photo_path):
21
+ segmap_identifier = os.path.basename(segmap_path).replace('_gtFine_color', '')
22
+ photo_identifier = os.path.basename(photo_path).replace('_leftImg8bit', '')
23
+
24
+ assert segmap_identifier == photo_identifier, \
25
+ "[%s] and [%s] don't seem to be matching. Aborting." % (segmap_path, photo_path)
26
+
27
+
28
+ def process_cityscapes(gtFine_dir, leftImg8bit_dir, output_dir, phase):
29
+ save_phase = 'test' if phase == 'val' else 'train'
30
+ savedir = os.path.join(output_dir, save_phase)
31
+ os.makedirs(savedir, exist_ok=True)
32
+ os.makedirs(savedir + 'A', exist_ok=True)
33
+ os.makedirs(savedir + 'B', exist_ok=True)
34
+ print("Directory structure prepared at %s" % output_dir)
35
+
36
+ segmap_expr = os.path.join(gtFine_dir, phase) + "/*/*_color.png"
37
+ segmap_paths = glob.glob(segmap_expr)
38
+ segmap_paths = sorted(segmap_paths)
39
+
40
+ photo_expr = os.path.join(leftImg8bit_dir, phase) + "/*/*_leftImg8bit.png"
41
+ photo_paths = glob.glob(photo_expr)
42
+ photo_paths = sorted(photo_paths)
43
+
44
+ assert len(segmap_paths) == len(photo_paths), \
45
+ "%d images that match [%s], and %d images that match [%s]. Aborting." % (len(segmap_paths), segmap_expr, len(photo_paths), photo_expr)
46
+
47
+ for i, (segmap_path, photo_path) in enumerate(zip(segmap_paths, photo_paths)):
48
+ check_matching_pair(segmap_path, photo_path)
49
+ segmap = load_resized_img(segmap_path)
50
+ photo = load_resized_img(photo_path)
51
+
52
+ # data for pix2pix where the two images are placed side-by-side
53
+ sidebyside = Image.new('RGB', (512, 256))
54
+ sidebyside.paste(segmap, (256, 0))
55
+ sidebyside.paste(photo, (0, 0))
56
+ savepath = os.path.join(savedir, "%d.jpg" % i)
57
+ sidebyside.save(savepath, format='JPEG', subsampling=0, quality=100)
58
+
59
+ # data for cyclegan where the two images are stored at two distinct directories
60
+ savepath = os.path.join(savedir + 'A', "%d_A.jpg" % i)
61
+ photo.save(savepath, format='JPEG', subsampling=0, quality=100)
62
+ savepath = os.path.join(savedir + 'B', "%d_B.jpg" % i)
63
+ segmap.save(savepath, format='JPEG', subsampling=0, quality=100)
64
+
65
+ if i % (len(segmap_paths) // 10) == 0:
66
+ print("%d / %d: last image saved at %s, " % (i, len(segmap_paths), savepath))
67
+
68
+
69
+
70
+
71
+
72
+
73
+
74
+
75
+
76
+
77
+ if __name__ == '__main__':
78
+ import argparse
79
+ parser = argparse.ArgumentParser()
80
+ parser.add_argument('--gtFine_dir', type=str, required=True,
81
+ help='Path to the Cityscapes gtFine directory.')
82
+ parser.add_argument('--leftImg8bit_dir', type=str, required=True,
83
+ help='Path to the Cityscapes leftImg8bit_trainvaltest directory.')
84
+ parser.add_argument('--output_dir', type=str, required=True,
85
+ default='./datasets/cityscapes',
86
+ help='Directory the output images will be written to.')
87
+ opt = parser.parse_args()
88
+
89
+ print(help_msg)
90
+
91
+ print('Preparing Cityscapes Dataset for val phase')
92
+ process_cityscapes(opt.gtFine_dir, opt.leftImg8bit_dir, opt.output_dir, "val")
93
+ print('Preparing Cityscapes Dataset for train phase')
94
+ process_cityscapes(opt.gtFine_dir, opt.leftImg8bit_dir, opt.output_dir, "train")
95
+
96
+ print('Done')
97
+
98
+
99
+
debug_images/L_channel.png DELETED
Binary file (40.6 kB)
 
debug_images/input_grayscale.png CHANGED
debug_images/input_original.png CHANGED
debug_images/lab_image.png DELETED
Binary file (184 kB)
 
debug_images/lab_image_debug.png CHANGED
debug_images/lab_to_rgb_debug.png ADDED
debug_images/output_rgb.png DELETED
Binary file (115 kB)
 
debug_images/postprocessed_output.png CHANGED
debug_images/preprocessed_input.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:19ff643efd3bccca7dd4cf004c1cce21165e2eddb466d96016b76776a512bd41
3
  size 263379
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04041cf7927f4cbe177bf6336049899c67d6476d70f12a347201b0321f14b534
3
  size 263379
debug_images/raw_AB_channels.png DELETED
Binary file (37.9 kB)
 
debug_images/raw_output.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5c83fc6de1312d1708ecce582d714de19b51821e9203fb9e557e4df7990cd8c3
3
  size 525483
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf2f5f035f110680b6b7de488b174093a5b34f0967294ea1ef91ed5de20ee60f
3
  size 525483
docs/Dockerfile ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM nvidia/cuda:10.1-base
2
+
3
+ #Nvidia Public GPG Key
4
+ RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub
5
+
6
+ RUN apt update && apt install -y wget unzip curl bzip2 git
7
+ RUN curl -LO http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh
8
+ RUN bash Miniconda3-latest-Linux-x86_64.sh -p /miniconda -b
9
+ RUN rm Miniconda3-latest-Linux-x86_64.sh
10
+ ENV PATH=/miniconda/bin:${PATH}
11
+ RUN conda update -y conda
12
+
13
+ RUN conda install -y pytorch torchvision -c pytorch
14
+ RUN mkdir /workspace/ && cd /workspace/ && git clone https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix.git && cd pytorch-CycleGAN-and-pix2pix && pip install -r requirements.txt
15
+
16
+ WORKDIR /workspace
docs/README_es.md ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <img src='https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/raw/master/imgs/horse2zebra.gif' align="right" width=384>
2
+
3
+ <br><br><br>
4
+
5
+ # CycleGAN y pix2pix en PyTorch
6
+
7
+ Implementacion en PyTorch de Unpaired Image-to-Image Translation.
8
+
9
+ Este codigo fue escrito por [Jun-Yan Zhu](https://github.com/junyanz) y [Taesung Park](https://github.com/taesung), y con ayuda de [Tongzhou Wang](https://ssnl.github.io/).
10
+
11
+ Esta implementacion de PyTorch produce resultados comparables o mejores que nuestros original software de Torch. Si te gustaria producir los mismos resultados que en documento oficial, echa un vistazo al codigo original [CycleGAN Torch](https://github.com/junyanz/CycleGAN) y [pix2pix Torch](https://github.com/phillipi/pix2pix)
12
+
13
+ **Aviso**: El software actual funciona correctamente en PyTorch 0.41+. Para soporte en PyTorch 0.1-0.3: [branch](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/tree/pytorch0.3.1).
14
+
15
+ Puede encontrar información útil en [training/test tips](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md) y [preguntas frecuentes](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md). Para implementar modelos y conjuntos de datos personalizados, consulte nuestro [templates](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/README_es.md#modelo-y-dataset-personalizado). Para ayudar a los usuarios a comprender y adaptar mejor nuestra base de código, proporcionamos un [overview](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/overview.md) de la estructura de código de este repositorio.
16
+
17
+ **CycleGAN: [Proyecto](https://junyanz.github.io/CycleGAN/) | [PDF](https://arxiv.org/pdf/1703.10593.pdf) | [Torch](https://github.com/junyanz/CycleGAN) |
18
+ [Guia de Tensorflow Core](https://www.tensorflow.org/tutorials/generative/cyclegan) | [PyTorch Colab](https://colab.research.google.com/github/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/CycleGAN.ipynb)**
19
+
20
+ <img src="https://junyanz.github.io/CycleGAN/images/teaser_high_res.jpg" width="800"/>
21
+
22
+ **Pix2pix: [Proyeto](https://phillipi.github.io/pix2pix/) | [PDF](https://arxiv.org/pdf/1611.07004.pdf) | [Torch](https://github.com/phillipi/pix2pix) |
23
+ [Guia de Tensorflow Core](https://www.tensorflow.org/tutorials/generative/cyclegan) | [PyTorch Colab](https://colab.research.google.com/github/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/pix2pix.ipynb)**
24
+
25
+ <img src="https://phillipi.github.io/pix2pix/images/teaser_v3.png" width="800px"/>
26
+
27
+
28
+ **[EdgesCats Demo](https://affinelayer.com/pixsrv/) | [pix2pix-tensorflow](https://github.com/affinelayer/pix2pix-tensorflow) | por [Christopher Hesse](https://twitter.com/christophrhesse)**
29
+
30
+ <img src='https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/imgs/edges2cats.jpg' width="400px"/>
31
+
32
+ Si usa este código para su investigación, cite:
33
+
34
+ Unpaired Image-to-Image Translation usando Cycle-Consistent Adversarial Networks.<br>
35
+ [Jun-Yan Zhu](https://www.cs.cmu.edu/~junyanz/)\*, [Taesung Park](https://taesung.me/)\*, [Phillip Isola](https://people.eecs.berkeley.edu/~isola/), [Alexei A. Efros](https://people.eecs.berkeley.edu/~efros). In ICCV 2017. (* contribucion igualitaria) [[Bibtex]](https://junyanz.github.io/CycleGAN/CycleGAN.txt)
36
+
37
+
38
+ Image-to-Image Translation usando Conditional Adversarial Networks.<br>
39
+ [Phillip Isola](https://people.eecs.berkeley.edu/~isola), [Jun-Yan Zhu](https://www.cs.cmu.edu/~junyanz/), [Tinghui Zhou](https://people.eecs.berkeley.edu/~tinghuiz), [Alexei A. Efros](https://people.eecs.berkeley.edu/~efros). In CVPR 2017. [[Bibtex]](https://www.cs.cmu.edu/~junyanz/projects/pix2pix/pix2pix.bib)
40
+
41
+ ## Charlas y curso
42
+ Presentacion en PowerPoint de Pix2pix: [keynote](http://efrosgans.eecs.berkeley.edu/CVPR18_slides/pix2pix.key) | [pdf](http://efrosgans.eecs.berkeley.edu/CVPR18_slides/pix2pix.pdf),
43
+ Presentacion en PowerPoint de CycleGAN: [pptx](http://efrosgans.eecs.berkeley.edu/CVPR18_slides/CycleGAN.pptx) | [pdf](http://efrosgans.eecs.berkeley.edu/CVPR18_slides/CycleGAN.pdf)
44
+
45
+ Asignación del curso CycleGAN [codigo](http://www.cs.toronto.edu/~rgrosse/courses/csc321_2018/assignments/a4-code.zip) y [handout](http://www.cs.toronto.edu/~rgrosse/courses/csc321_2018/assignments/a4-handout.pdf) diseñado por el Prof. [Roger Grosse](http://www.cs.toronto.edu/~rgrosse/) for [CSC321](http://www.cs.toronto.edu/~rgrosse/courses/csc321_2018/) "Intro to Neural Networks and Machine Learning" en la universidad de Toronto. Póngase en contacto con el instructor si desea adoptarlo en su curso.
46
+
47
+ ## Colab Notebook
48
+ TensorFlow Core CycleGAN Tutorial: [Google Colab](https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/generative/cyclegan.ipynb) | [Codigo](https://github.com/tensorflow/docs/blob/master/site/en/tutorials/generative/cyclegan.ipynb)
49
+
50
+ Guia de TensorFlow Core pix2pix : [Google Colab](https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/generative/pix2pix.ipynb) | [Codigo](https://github.com/tensorflow/docs/blob/master/site/en/tutorials/generative/pix2pix.ipynb)
51
+
52
+ PyTorch Colab notebook: [CycleGAN](https://colab.research.google.com/github/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/CycleGAN.ipynb) y [pix2pix](https://colab.research.google.com/github/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/pix2pix.ipynb)
53
+
54
+ ## Otras implementaciones
55
+ ### CycleGAN
56
+ <p><a href="https://github.com/leehomyc/cyclegan-1"> [Tensorflow]</a> (por Harry Yang),
57
+ <a href="https://github.com/architrathore/CycleGAN/">[Tensorflow]</a> (por Archit Rathore),
58
+ <a href="https://github.com/vanhuyz/CycleGAN-TensorFlow">[Tensorflow]</a> (por Van Huy),
59
+ <a href="https://github.com/XHUJOY/CycleGAN-tensorflow">[Tensorflow]</a> (por Xiaowei Hu),
60
+ <a href="https://github.com/LynnHo/CycleGAN-Tensorflow-Simple"> [Tensorflow-simple]</a> (por Zhenliang He),
61
+ <a href="https://github.com/luoxier/CycleGAN_Tensorlayer"> [TensorLayer]</a> (por luoxier),
62
+ <a href="https://github.com/Aixile/chainer-cyclegan">[Chainer]</a> (por Yanghua Jin),
63
+ <a href="https://github.com/yunjey/mnist-svhn-transfer">[Minimal PyTorch]</a> (por yunjey),
64
+ <a href="https://github.com/Ldpe2G/DeepLearningForFun/tree/master/Mxnet-Scala/CycleGAN">[Mxnet]</a> (por Ldpe2G),
65
+ <a href="https://github.com/tjwei/GANotebooks">[lasagne/Keras]</a> (por tjwei),
66
+ <a href="https://github.com/simontomaskarlsson/CycleGAN-Keras">[Keras]</a> (por Simon Karlsson)
67
+ </p>
68
+ </ul>
69
+
70
+ ### pix2pix
71
+ <p><a href="https://github.com/affinelayer/pix2pix-tensorflow"> [Tensorflow]</a> (por Christopher Hesse),
72
+ <a href="https://github.com/Eyyub/tensorflow-pix2pix">[Tensorflow]</a> (por Eyyüb Sariu),
73
+ <a href="https://github.com/datitran/face2face-demo"> [Tensorflow (face2face)]</a> (por Dat Tran),
74
+ <a href="https://github.com/awjuliani/Pix2Pix-Film"> [Tensorflow (film)]</a> (por Arthur Juliani),
75
+ <a href="https://github.com/kaonashi-tyc/zi2zi">[Tensorflow (zi2zi)]</a> (por Yuchen Tian),
76
+ <a href="https://github.com/pfnet-research/chainer-pix2pix">[Chainer]</a> (por mattya),
77
+ <a href="https://github.com/tjwei/GANotebooks">[tf/torch/keras/lasagne]</a> (por tjwei),
78
+ <a href="https://github.com/taey16/pix2pixBEGAN.pytorch">[Pytorch]</a> (por taey16)
79
+ </p>
80
+ </ul>
81
+
82
+ ## Requerimientos
83
+ - Linux o macOS
84
+ - Python 3
85
+ - CPU o NVIDIA GPU usando CUDA CuDNN
86
+
87
+ ## Inicio
88
+ ### Instalación
89
+
90
+ - Clone este repositorio:
91
+ ```bash
92
+ git clone https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix
93
+ cd pytorch-CycleGAN-and-pix2pix
94
+ ```
95
+
96
+ - Instale [PyTorch](http://pytorch.org) 0.4+ y sus otras dependencias (e.g., torchvision, [visdom](https://github.com/facebookresearch/visdom) y [dominate](https://github.com/Knio/dominate)).
97
+ - Para uso de pip, por favor escriba el comando `pip install -r requirements.txt`.
98
+ - Para uso de Conda, proporcionamos un script de instalación `./scripts/conda_deps.sh`. De forma alterna, puede crear un nuevo entorno Conda usando `conda env create -f environment.yml`.
99
+ - Para uso de Docker, Proporcionamos la imagen Docker y el archivo Docker preconstruidos. Por favor, consulte nuestra página
100
+ [Docker](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/docker.md).
101
+
102
+ ### CycleGAN entreanimiento/test
103
+ - Descargar el dataset de CycleGAN (e.g. maps):
104
+ ```bash
105
+ bash ./datasets/download_cyclegan_dataset.sh maps
106
+ ```
107
+ - Para ver los resultados del entrenamiento y las gráficas de pérdidas, `python -m visdom.server` y haga clic en la URL
108
+ http://localhost:8097.
109
+ - Entrenar el modelo:
110
+ ```bash
111
+ #!./scripts/train_cyclegan.sh
112
+ python train.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan
113
+ ```
114
+ Para ver más resultados intermedios, consulte `./checkpoints/maps_cyclegan/web/index.html`.
115
+ - Pruebe el modelo:
116
+ ```bash
117
+ #!./scripts/test_cyclegan.sh
118
+ python test.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan
119
+ ```
120
+ -Los resultados de la prueba se guardarán en un archivo html aquí: `./results/maps_cyclegan/latest_test/index.html`.
121
+
122
+ ### pix2pix entrenamiento/test
123
+ - Descargue el dataset de pix2pix (e.g.[facades](http://cmp.felk.cvut.cz/~tylecr1/facade/)):
124
+ ```bash
125
+ bash ./datasets/download_pix2pix_dataset.sh facades
126
+ ```
127
+ - Para ver los resultados del entrenamiento y las gráficas de pérdidas `python -m visdom.server`, haga clic en la URL http://localhost:8097.
128
+ - Para entrenar el modelo:
129
+ ```bash
130
+ #!./scripts/train_pix2pix.sh
131
+ python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA
132
+ ```
133
+ Para ver más resultados intermedios, consulte `./checkpoints/facades_pix2pix/web/index.html`.
134
+
135
+ - Pruebe el modelo (`bash ./scripts/test_pix2pix.sh`):
136
+ ```bash
137
+ #!./scripts/test_pix2pix.sh
138
+ python test.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA
139
+ ```
140
+ - Los resultados de la prueba se guardarán en un archivo html aquí: `./results/facades_pix2pix/test_latest/index.html`. Puede encontrar más scripts en `scripts` directory.
141
+ - Para entrenar y probar modelos de colorización basados en pix2pix, agregue la linea `--model colorization` y `--dataset_mode colorization`. Para más detalles de nuestro entrenamiento [tips](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md#notes-on-colorization).
142
+
143
+ ### Aplicar un modelo pre-entrenado (CycleGAN)
144
+ - Puedes descargar un modelo previamente entrenado (e.g. horse2zebra) con el siguiente script:
145
+ ```bash
146
+ bash ./scripts/download_cyclegan_model.sh horse2zebra
147
+ ```
148
+ - El modelo pre-entrenado se guarda en `./checkpoints/{name}_pretrained/latest_net_G.pth`. Revise [aqui](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/scripts/download_cyclegan_model.sh#L3) para todos los modelos CycleGAN disponibles.
149
+
150
+ - Para probar el modelo, también debe descargar el dataset horse2zebra:
151
+ ```bash
152
+ bash ./datasets/download_cyclegan_dataset.sh horse2zebra
153
+ ```
154
+
155
+ - Luego genere los resultados usando:
156
+ ```bash
157
+ python test.py --dataroot datasets/horse2zebra/testA --name horse2zebra_pretrained --model test --no_dropout
158
+ ```
159
+ - La opcion `--model test` ise usa para generar resultados de CycleGAN de un solo lado. Esta opción configurará automáticamente
160
+ `--dataset_mode single`, carga solo las imágenes de un conjunto. Por el contrario, el uso de `--model cycle_gan` requiere cargar y generar resultados en ambas direcciones, lo que a veces es innecesario. Los resultados se guardarán en `./results/`. Use `--results_dir {directory_path_to_save_result}` para especificar el directorio de resultados.
161
+
162
+ - Para sus propios experimentos, es posible que desee especificar `--netG`, `--norm`, `--no_dropout` para que coincida con la arquitectura del generador del modelo entrenado.
163
+
164
+ ### Aplicar un modelo pre-entrenado (pix2pix)
165
+ Descargue un modelo pre-entrenado con `./scripts/download_pix2pix_model.sh`.
166
+
167
+ - Revise [aqui](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/scripts/download_pix2pix_model.sh#L3) para todos los modelos pix2pix disponibles. Por ejemplo, si desea descargar el modelo label2photo en el dataset:
168
+ ```bash
169
+ bash ./scripts/download_pix2pix_model.sh facades_label2photo
170
+ ```
171
+ - Descarga el dataset facades de pix2pix:
172
+ ```bash
173
+ bash ./datasets/download_pix2pix_dataset.sh facades
174
+ ```
175
+ - Luego genere los resultados usando:
176
+ ```bash
177
+ python test.py --dataroot ./datasets/facades/ --direction BtoA --model pix2pix --name facades_label2photo_pretrained
178
+ ```
179
+ - Tenga en cuenta que `--direction BtoA` como Facades dataset's, son direcciones A o B para etiquetado de fotos.
180
+
181
+ - Si desea aplicar un modelo previamente entrenado a una colección de imágenes de entrada (en lugar de pares de imágenes), use la opcion `--model test`. Vea `./scripts/test_single.sh` obre cómo aplicar un modelo a Facade label maps (almacenados en el directorio `facades/testB`).
182
+
183
+ - Vea una lista de los modelos disponibles actualmente en `./scripts/download_pix2pix_model.sh`
184
+
185
+ ## [Docker](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/docker.md)
186
+ Proporcionamos la imagen Docker y el archivo Docker preconstruidos que pueden ejecutar este repositorio de código. Ver [docker](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/docker.md).
187
+
188
+ ## [Datasets](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/datasets.md)
189
+ Descargue los conjuntos de datos pix2pix / CycleGAN y cree sus propios conjuntos de datos.
190
+
191
+ ## [Entretanimiento/Test Tips](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md)
192
+ Las mejores prácticas para entrenar y probar sus modelos.
193
+
194
+ ## [Preguntas frecuentes](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md)
195
+ Antes de publicar una nueva pregunta, primero mire las preguntas y respuestas anteriores y los problemas existentes de GitHub.
196
+
197
+ ## Modelo y Dataset personalizado
198
+ Si planea implementar modelos y conjuntos de datos personalizados para sus nuevas aplicaciones, proporcionamos un conjunto de datos [template](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/data/template_dataset.py) y un modelo [template](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/template_model.py) como punto de partida.
199
+
200
+
201
+ ## [Estructura de codigo](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/overview.md)
202
+ Para ayudar a los usuarios a comprender mejor y usar nuestro código, presentamos brevemente la funcionalidad e implementación de cada paquete y cada módulo.
203
+
204
+ ## Solicitud de Pull
205
+ Siempre puede contribuir a este repositorio enviando un [pull request](https://help.github.com/articles/about-pull-requests/).
206
+ Por favor ejecute `flake8 --ignore E501 .` y `python ./scripts/test_before_push.py` antes de realizar un Pull en el código, asegure de también actualizar la estructura del código [overview](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/overview.md) en consecuencia si agrega o elimina archivos.
207
+
208
+
209
+ ## Citación
210
+ Si utiliza este código para su investigación, cite nuestros documentos.
211
+ ```
212
+ @inproceedings{CycleGAN2017,
213
+ title={Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networkss},
214
+ author={Zhu, Jun-Yan and Park, Taesung and Isola, Phillip and Efros, Alexei A},
215
+ booktitle={Computer Vision (ICCV), 2017 IEEE International Conference on},
216
+ year={2017}
217
+ }
218
+
219
+
220
+ @inproceedings{isola2017image,
221
+ title={Image-to-Image Translation with Conditional Adversarial Networks},
222
+ author={Isola, Phillip and Zhu, Jun-Yan and Zhou, Tinghui and Efros, Alexei A},
223
+ booktitle={Computer Vision and Pattern Recognition (CVPR), 2017 IEEE Conference on},
224
+ year={2017}
225
+ }
226
+ ```
227
+
228
+ ## Proyectos relacionados
229
+ **[CycleGAN-Torch](https://github.com/junyanz/CycleGAN) |
230
+ [pix2pix-Torch](https://github.com/phillipi/pix2pix) | [pix2pixHD](https://github.com/NVIDIA/pix2pixHD)|
231
+ [BicycleGAN](https://github.com/junyanz/BicycleGAN) | [vid2vid](https://tcwang0509.github.io/vid2vid/) | [SPADE/GauGAN](https://github.com/NVlabs/SPADE)**<br>
232
+ **[iGAN](https://github.com/junyanz/iGAN) | [GAN Dissection](https://github.com/CSAILVision/GANDissect) | [GAN Paint](http://ganpaint.io/)**
233
+
234
+ ## Cat Paper Collection
235
+ Si amas a los gatos y te encanta leer gráficos geniales, computer vision y documentos de aprendizaje, echa un vistazo a Cat Paper [Collection](https://github.com/junyanz/CatPapers).
236
+
237
+ ## Agradecimientos
238
+ Nuestro código fue inspirado en [pytorch-DCGAN](https://github.com/pytorch/examples/tree/master/dcgan).
docs/datasets.md ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ ### CycleGAN Datasets
4
+ Download the CycleGAN datasets using the following script. Some of the datasets are collected by other researchers. Please cite their papers if you use the data.
5
+ ```bash
6
+ bash ./datasets/download_cyclegan_dataset.sh dataset_name
7
+ ```
8
+ - `facades`: 400 images from the [CMP Facades dataset](http://cmp.felk.cvut.cz/~tylecr1/facade). [[Citation](../datasets/bibtex/facades.tex)]
9
+ - `cityscapes`: 2975 images from the [Cityscapes training set](https://www.cityscapes-dataset.com). [[Citation](../datasets/bibtex/cityscapes.tex)]. Note: Due to license issue, we cannot directly provide the Cityscapes dataset. Please download the Cityscapes dataset from [https://cityscapes-dataset.com](https://cityscapes-dataset.com) and use the script `./datasets/prepare_cityscapes_dataset.py`.
10
+ - `maps`: 1096 training images scraped from Google Maps.
11
+ - `horse2zebra`: 939 horse images and 1177 zebra images downloaded from [ImageNet](http://www.image-net.org) using keywords `wild horse` and `zebra`
12
+ - `apple2orange`: 996 apple images and 1020 orange images downloaded from [ImageNet](http://www.image-net.org) using keywords `apple` and `navel orange`.
13
+ - `summer2winter_yosemite`: 1273 summer Yosemite images and 854 winter Yosemite images were downloaded using Flickr API. See more details in our paper.
14
+ - `monet2photo`, `vangogh2photo`, `ukiyoe2photo`, `cezanne2photo`: The art images were downloaded from [Wikiart](https://www.wikiart.org/). The real photos are downloaded from Flickr using the combination of the tags *landscape* and *landscapephotography*. The training set size of each class is Monet:1074, Cezanne:584, Van Gogh:401, Ukiyo-e:1433, Photographs:6853.
15
+ - `iphone2dslr_flower`: both classes of images were downlaoded from Flickr. The training set size of each class is iPhone:1813, DSLR:3316. See more details in our paper.
16
+
17
+ To train a model on your own datasets, you need to create a data folder with two subdirectories `trainA` and `trainB` that contain images from domain A and B. You can test your model on your training set by setting `--phase train` in `test.py`. You can also create subdirectories `testA` and `testB` if you have test data.
18
+
19
+ You should **not** expect our method to work on just any random combination of input and output datasets (e.g. `cats<->keyboards`). From our experiments, we find it works better if two datasets share similar visual content. For example, `landscape painting<->landscape photographs` works much better than `portrait painting <-> landscape photographs`. `zebras<->horses` achieves compelling results while `cats<->dogs` completely fails.
20
+
21
+ ### pix2pix datasets
22
+ Download the pix2pix datasets using the following script. Some of the datasets are collected by other researchers. Please cite their papers if you use the data.
23
+ ```bash
24
+ bash ./datasets/download_pix2pix_dataset.sh dataset_name
25
+ ```
26
+ - `facades`: 400 images from [CMP Facades dataset](http://cmp.felk.cvut.cz/~tylecr1/facade). [[Citation](../datasets/bibtex/facades.tex)]
27
+ - `cityscapes`: 2975 images from the [Cityscapes training set](https://www.cityscapes-dataset.com). [[Citation](../datasets/bibtex/cityscapes.tex)]
28
+ - `maps`: 1096 training images scraped from Google Maps
29
+ - `edges2shoes`: 50k training images from [UT Zappos50K dataset](http://vision.cs.utexas.edu/projects/finegrained/utzap50k). Edges are computed by [HED](https://github.com/s9xie/hed) edge detector + post-processing. [[Citation](datasets/bibtex/shoes.tex)]
30
+ - `edges2handbags`: 137K Amazon Handbag images from [iGAN project](https://github.com/junyanz/iGAN). Edges are computed by [HED](https://github.com/s9xie/hed) edge detector + post-processing. [[Citation](datasets/bibtex/handbags.tex)]
31
+ - `night2day`: around 20K natural scene images from [Transient Attributes dataset](http://transattr.cs.brown.edu/) [[Citation](datasets/bibtex/transattr.tex)]. To train a `day2night` pix2pix model, you need to add `--direction BtoA`.
32
+
33
+ We provide a python script to generate pix2pix training data in the form of pairs of images {A,B}, where A and B are two different depictions of the same underlying scene. For example, these might be pairs {label map, photo} or {bw image, color image}. Then we can learn to translate A to B or B to A:
34
+
35
+ Create folder `/path/to/data` with subfolders `A` and `B`. `A` and `B` should each have their own subfolders `train`, `val`, `test`, etc. In `/path/to/data/A/train`, put training images in style A. In `/path/to/data/B/train`, put the corresponding images in style B. Repeat same for other data splits (`val`, `test`, etc).
36
+
37
+ Corresponding images in a pair {A,B} must be the same size and have the same filename, e.g., `/path/to/data/A/train/1.jpg` is considered to correspond to `/path/to/data/B/train/1.jpg`.
38
+
39
+ Once the data is formatted this way, call:
40
+ ```bash
41
+ python datasets/combine_A_and_B.py --fold_A /path/to/data/A --fold_B /path/to/data/B --fold_AB /path/to/data
42
+ ```
43
+
44
+ This will combine each pair of images (A,B) into a single image file, ready for training.
docs/docker.md ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Docker image with pytorch-CycleGAN-and-pix2pix
2
+
3
+ We provide both Dockerfile and pre-built Docker container that can run this code repo.
4
+
5
+ ## Prerequisite
6
+
7
+ - Install [docker-ce](https://docs.docker.com/install/linux/docker-ce/ubuntu/)
8
+ - Install [nvidia-docker](https://github.com/NVIDIA/nvidia-docker#quickstart)
9
+
10
+ ## Running pre-built Dockerfile
11
+
12
+ - Pull the pre-built docker file
13
+
14
+ ```bash
15
+ docker pull taesungp/pytorch-cyclegan-and-pix2pix
16
+ ```
17
+
18
+ - Start an interactive docker session. `-p 8097:8097` option is needed if you want to run `visdom` server on the Docker container.
19
+
20
+ ```bash
21
+ nvidia-docker run -it -p 8097:8097 taesungp/pytorch-cyclegan-and-pix2pix
22
+ ```
23
+
24
+ - Now you are in the Docker environment. Go to our code repo and start running things.
25
+ ```bash
26
+ cd /workspace/pytorch-CycleGAN-and-pix2pix
27
+ bash datasets/download_pix2pix_dataset.sh facades
28
+ python -m visdom.server &
29
+ bash scripts/train_pix2pix.sh
30
+ ```
31
+
32
+ ## Running with Dockerfile
33
+
34
+ We also posted the [Dockerfile](Dockerfile). To generate the pre-built file, download the Dockerfile in this directory and run
35
+ ```bash
36
+ docker build -t [target_tag] .
37
+ ```
38
+ in the directory that contains the Dockerfile.
docs/overview.md ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Overview of Code Structure
2
+ To help users better understand and use our codebase, we briefly overview the functionality and implementation of each package and each module. Please see the documentation in each file for more details. If you have questions, you may find useful information in [training/test tips](tips.md) and [frequently asked questions](qa.md).
3
+
4
+ [train.py](../train.py) is a general-purpose training script. It works for various models (with option `--model`: e.g., `pix2pix`, `cyclegan`, `colorization`) and different datasets (with option `--dataset_mode`: e.g., `aligned`, `unaligned`, `single`, `colorization`). See the main [README](.../README.md) and [training/test tips](tips.md) for more details.
5
+
6
+ [test.py](../test.py) is a general-purpose test script. Once you have trained your model with `train.py`, you can use this script to test the model. It will load a saved model from `--checkpoints_dir` and save the results to `--results_dir`. See the main [README](.../README.md) and [training/test tips](tips.md) for more details.
7
+
8
+
9
+ [data](../data) directory contains all the modules related to data loading and preprocessing. To add a custom dataset class called `dummy`, you need to add a file called `dummy_dataset.py` and define a subclass `DummyDataset` inherited from `BaseDataset`. You need to implement four functions: `__init__` (initialize the class, you need to first call `BaseDataset.__init__(self, opt)`), `__len__` (return the size of dataset), `__getitem__` (get a data point), and optionally `modify_commandline_options` (add dataset-specific options and set default options). Now you can use the dataset class by specifying flag `--dataset_mode dummy`. See our template dataset [class](../data/template_dataset.py) for an example. Below we explain each file in details.
10
+
11
+ * [\_\_init\_\_.py](../data/__init__.py) implements the interface between this package and training and test scripts. `train.py` and `test.py` call `from data import create_dataset` and `dataset = create_dataset(opt)` to create a dataset given the option `opt`.
12
+ * [base_dataset.py](../data/base_dataset.py) implements an abstract base class ([ABC](https://docs.python.org/3/library/abc.html)) for datasets. It also includes common transformation functions (e.g., `get_transform`, `__scale_width`), which can be later used in subclasses.
13
+ * [image_folder.py](../data/image_folder.py) implements an image folder class. We modify the official PyTorch image folder [code](https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py) so that this class can load images from both the current directory and its subdirectories.
14
+ * [template_dataset.py](../data/template_dataset.py) provides a dataset template with detailed documentation. Check out this file if you plan to implement your own dataset.
15
+ * [aligned_dataset.py](../data/aligned_dataset.py) includes a dataset class that can load image pairs. It assumes a single image directory `/path/to/data/train`, which contains image pairs in the form of {A,B}. See [here](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md#prepare-your-own-datasets-for-pix2pix) on how to prepare aligned datasets. During test time, you need to prepare a directory `/path/to/data/test` as test data.
16
+ * [unaligned_dataset.py](../data/unaligned_dataset.py) includes a dataset class that can load unaligned/unpaired datasets. It assumes that two directories to host training images from domain A `/path/to/data/trainA` and from domain B `/path/to/data/trainB` respectively. Then you can train the model with the dataset flag `--dataroot /path/to/data`. Similarly, you need to prepare two directories `/path/to/data/testA` and `/path/to/data/testB` during test time.
17
+ * [single_dataset.py](../data/single_dataset.py) includes a dataset class that can load a set of single images specified by the path `--dataroot /path/to/data`. It can be used for generating CycleGAN results only for one side with the model option `-model test`.
18
+ * [colorization_dataset.py](../data/colorization_dataset.py) implements a dataset class that can load a set of nature images in RGB, and convert RGB format into (L, ab) pairs in [Lab](https://en.wikipedia.org/wiki/CIELAB_color_space) color space. It is required by pix2pix-based colorization model (`--model colorization`).
19
+
20
+
21
+ [models](../models) directory contains modules related to objective functions, optimizations, and network architectures. To add a custom model class called `dummy`, you need to add a file called `dummy_model.py` and define a subclass `DummyModel` inherited from `BaseModel`. You need to implement four functions: `__init__` (initialize the class; you need to first call `BaseModel.__init__(self, opt)`), `set_input` (unpack data from dataset and apply preprocessing), `forward` (generate intermediate results), `optimize_parameters` (calculate loss, gradients, and update network weights), and optionally `modify_commandline_options` (add model-specific options and set default options). Now you can use the model class by specifying flag `--model dummy`. See our template model [class](../models/template_model.py) for an example. Below we explain each file in details.
22
+
23
+ * [\_\_init\_\_.py](../models/__init__.py) implements the interface between this package and training and test scripts. `train.py` and `test.py` call `from models import create_model` and `model = create_model(opt)` to create a model given the option `opt`. You also need to call `model.setup(opt)` to properly initialize the model.
24
+ * [base_model.py](../models/base_model.py) implements an abstract base class ([ABC](https://docs.python.org/3/library/abc.html)) for models. It also includes commonly used helper functions (e.g., `setup`, `test`, `update_learning_rate`, `save_networks`, `load_networks`), which can be later used in subclasses.
25
+ * [template_model.py](../models/template_model.py) provides a model template with detailed documentation. Check out this file if you plan to implement your own model.
26
+ * [pix2pix_model.py](../models/pix2pix_model.py) implements the pix2pix [model](https://phillipi.github.io/pix2pix/), for learning a mapping from input images to output images given paired data. The model training requires `--dataset_mode aligned` dataset. By default, it uses a `--netG unet256` [U-Net](https://arxiv.org/pdf/1505.04597.pdf) generator, a `--netD basic` discriminator (PatchGAN), and a `--gan_mode vanilla` GAN loss (standard cross-entropy objective).
27
+ * [colorization_model.py](../models/colorization_model.py) implements a subclass of `Pix2PixModel` for image colorization (black & white image to colorful image). The model training requires `-dataset_model colorization` dataset. It trains a pix2pix model, mapping from L channel to ab channels in [Lab](https://en.wikipedia.org/wiki/CIELAB_color_space) color space. By default, the `colorization` dataset will automatically set `--input_nc 1` and `--output_nc 2`.
28
+ * [cycle_gan_model.py](../models/cycle_gan_model.py) implements the CycleGAN [model](https://junyanz.github.io/CycleGAN/), for learning image-to-image translation without paired data. The model training requires `--dataset_mode unaligned` dataset. By default, it uses a `--netG resnet_9blocks` ResNet generator, a `--netD basic` discriminator (PatchGAN introduced by pix2pix), and a least-square GANs [objective](https://arxiv.org/abs/1611.04076) (`--gan_mode lsgan`).
29
+ * [networks.py](../models/networks.py) module implements network architectures (both generators and discriminators), as well as normalization layers, initialization methods, optimization scheduler (i.e., learning rate policy), and GAN objective function (`vanilla`, `lsgan`, `wgangp`).
30
+ * [test_model.py](../models/test_model.py) implements a model that can be used to generate CycleGAN results for only one direction. This model will automatically set `--dataset_mode single`, which only loads the images from one set. See the test [instruction](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix#apply-a-pre-trained-model-cyclegan) for more details.
31
+
32
+ [options](../options) directory includes our option modules: training options, test options, and basic options (used in both training and test). `TrainOptions` and `TestOptions` are both subclasses of `BaseOptions`. They will reuse the options defined in `BaseOptions`.
33
+ * [\_\_init\_\_.py](../options/__init__.py) is required to make Python treat the directory `options` as containing packages,
34
+ * [base_options.py](../options/base_options.py) includes options that are used in both training and test. It also implements a few helper functions such as parsing, printing, and saving the options. It also gathers additional options defined in `modify_commandline_options` functions in both dataset class and model class.
35
+ * [train_options.py](../options/train_options.py) includes options that are only used during training time.
36
+ * [test_options.py](../options/test_options.py) includes options that are only used during test time.
37
+
38
+
39
+ [util](../util) directory includes a miscellaneous collection of useful helper functions.
40
+ * [\_\_init\_\_.py](../util/__init__.py) is required to make Python treat the directory `util` as containing packages,
41
+ * [get_data.py](../util/get_data.py) provides a Python script for downloading CycleGAN and pix2pix datasets. Alternatively, You can also use bash scripts such as [download_pix2pix_model.sh](../scripts/download_pix2pix_model.sh) and [download_cyclegan_model.sh](../scripts/download_cyclegan_model.sh).
42
+ * [html.py](../util/html.py) implements a module that saves images into a single HTML file. It consists of functions such as `add_header` (add a text header to the HTML file), `add_images` (add a row of images to the HTML file), `save` (save the HTML to the disk). It is based on Python library `dominate`, a Python library for creating and manipulating HTML documents using a DOM API.
43
+ * [image_pool.py](../util/image_pool.py) implements an image buffer that stores previously generated images. This buffer enables us to update discriminators using a history of generated images rather than the ones produced by the latest generators. The original idea was discussed in this [paper](http://openaccess.thecvf.com/content_cvpr_2017/papers/Shrivastava_Learning_From_Simulated_CVPR_2017_paper.pdf). The size of the buffer is controlled by the flag `--pool_size`.
44
+ * [visualizer.py](../util/visualizer.py) includes several functions that can display/save images and print/save logging information. It uses a Python library `visdom` for display and a Python library `dominate` (wrapped in `HTML`) for creating HTML files with images.
45
+ * [util.py](../util/util.py) consists of simple helper functions such as `tensor2im` (convert a tensor array to a numpy image array), `diagnose_network` (calculate and print the mean of average absolute value of gradients), and `mkdirs` (create multiple directories).
docs/qa.md ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Frequently Asked Questions
2
+ Before you post a new question, please first look at the following Q & A and existing GitHub issues. You may also want to read [Training/Test tips](tips.md) for more suggestions.
3
+
4
+ #### Connection Error:HTTPConnectionPool ([#230](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/230), [#24](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/24), [#38](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/38))
5
+ Similar error messages include “Failed to establish a new connection/Connection refused”.
6
+
7
+ Please start the visdom server before starting the training:
8
+ ```bash
9
+ python -m visdom.server
10
+ ```
11
+ To install the visdom, you can use the following command:
12
+ ```bash
13
+ pip install visdom
14
+ ```
15
+ You can also disable the visdom by setting `--display_id 0`.
16
+
17
+ #### My PyTorch errors on CUDA related code.
18
+ Try to run the following code snippet to make sure that CUDA is working (assuming using PyTorch >= 0.4):
19
+ ```python
20
+ import torch
21
+ torch.cuda.init()
22
+ print(torch.randn(1, device='cuda'))
23
+ ```
24
+
25
+ If you met an error, it is likely that your PyTorch build does not work with CUDA, e.g., it is installed from the official MacOS binary, or you have a GPU that is too old and not supported anymore. You may run the the code with CPU using `--gpu_ids -1`.
26
+
27
+ #### TypeError: Object of type 'Tensor' is not JSON serializable ([#258](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/258))
28
+ Similar errors: AttributeError: module 'torch' has no attribute 'device' ([#314](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/314))
29
+
30
+ The current code only works with PyTorch 0.4+. An earlier PyTorch version can often cause the above errors.
31
+
32
+ #### ValueError: empty range for randrange() ([#390](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/390), [#376](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/376), [#194](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/194))
33
+ Similar error messages include "ConnectionRefusedError: [Errno 111] Connection refused"
34
+
35
+ It is related to the data augmentation step. It often happens when you use `--preprocess crop`. The program will crop random `crop_size x crop_size` patches out of the input training images. But if some of your image sizes (e.g., `256x384`) are smaller than the `crop_size` (e.g., 512), you will get this error. A simple fix will be to use other data augmentation methods such as `resize_and_crop` or `scale_width_and_crop`. Our program will automatically resize the images according to `load_size` before apply `crop_size x crop_size` cropping. Make sure that `load_size >= crop_size`.
36
+
37
+
38
+ #### Can I continue/resume my training? ([#350](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/350), [#275](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/275), [#234](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/234), [#87](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/87))
39
+ You can use the option `--continue_train`. Also set `--epoch_count` to specify a different starting epoch count. See more discussion in [training/test tips](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md#trainingtest-tips).
40
+
41
+ #### Why does my training loss not converge? ([#335](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/335), [#164](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/164), [#30](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/30))
42
+ Many GAN losses do not converge (exception: WGAN, WGAN-GP, etc. ) due to the nature of minimax optimization. For DCGAN and LSGAN objective, it is quite normal for the G and D losses to go up and down. It should be fine as long as they do not blow up.
43
+
44
+ #### How can I make it work for my own data (e.g., 16-bit png, tiff, hyperspectral images)? ([#309](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/309), [#320](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/), [#202](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/202))
45
+ The current code only supports RGB and grayscale images. If you would like to train the model on other data types, please follow the following steps:
46
+
47
+ - change the parameters `--input_nc` and `--output_nc` to the number of channels in your input/output images.
48
+ - Write your own custom data loader (It is easy as long as you know how to load your data with python). If you write a new data loader class, you need to change the flag `--dataset_mode` accordingly. Alternatively, you can modify the existing data loader. For aligned datasets, change this [line](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/data/aligned_dataset.py#L41); For unaligned datasets, change these two [lines](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/data/unaligned_dataset.py#L57).
49
+
50
+ - If you use visdom and HTML to visualize the results, you may also need to change the visualization code.
51
+
52
+ #### Multi-GPU Training ([#327](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/327), [#292](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/292), [#137](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/137), [#35](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/35))
53
+ You can use Multi-GPU training by setting `--gpu_ids` (e.g., `--gpu_ids 0,1,2,3` for the first four GPUs on your machine.) To fully utilize all the GPUs, you need to increase your batch size. Try `--batch_size 4`, `--batch_size 16`, or even a larger batch_size. Each GPU will process batch_size/#GPUs images. The optimal batch size depends on the number of GPUs you have, GPU memory per GPU, and the resolution of your training images.
54
+
55
+ We also recommend that you use the instance normalization for multi-GPU training by setting `--norm instance`. The current batch normalization might not work for multi-GPUs as the batchnorm parameters are not shared across different GPUs. Advanced users can try [synchronized batchnorm](https://github.com/vacancy/Synchronized-BatchNorm-PyTorch).
56
+
57
+
58
+ #### Can I run the model on CPU? ([#310](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/310))
59
+ Yes, you can set `--gpu_ids -1`. See [training/test tips](tips.md) for more details.
60
+
61
+
62
+ #### Are pre-trained models available? ([#10](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/10))
63
+ Yes, you can download pretrained models with the bash script `./scripts/download_cyclegan_model.sh`. See [here](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix#apply-a-pre-trained-model-cyclegan) for more details. We are slowly adding more models to the repo.
64
+
65
+ #### Out of memory ([#174](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/174))
66
+ CycleGAN is more memory-intensive than pix2pix as it requires two generators and two discriminators. If you would like to produce high-resolution images, you can do the following.
67
+
68
+ - During training, train CycleGAN on cropped images of the training set. Please be careful not to change the aspect ratio or the scale of the original image, as this can lead to the training/test gap. You can usually do this by using `--preprocess crop` option, or `--preprocess scale_width_and_crop`.
69
+
70
+ - Then at test time, you can load only one generator to produce the results in a single direction. This greatly saves GPU memory as you are not loading the discriminators and the other generator in the opposite direction. You can probably take the whole image as input. You can do this using `--model test --dataroot [path to the directory that contains your test images (e.g., ./datasets/horse2zebra/trainA)] --model_suffix _A --preprocess none`. You can use either `--preprocess none` or `--preprocess scale_width --crop_size [your_desired_image_width]`. Please see the [model_suffix](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/test_model.py#L16) and [preprocess](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/data/base_dataset.py#L24) for more details.
71
+
72
+ #### RuntimeError: Error(s) in loading state_dict ([#812](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/812), [#671](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/671),[#461](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/461), [#296](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/296))
73
+ If you get the above errors when loading the generator during test time, you probably have used different network configurations for training and test. There are a few things to check: (1) the network architecture `--netG`: you will get an error if you use `--netG unet256` during training, and use `--netG resnet_6blocks` during test. Make sure that the flag is the same. (2) the normalization parameters `--norm`: we use different default `--norm` parameters for `--model cycle_gan`, `--model pix2pix`, and `--model test`. They might be different from the one you used in your training time. Make sure that you add the `--norm` flag in your test code. (3) If you use dropout during training time, make sure that you use the same Dropout setting in your test. Check the flag `--no_dropout`.
74
+
75
+ Note that we use different default generators, normalization, and dropout options for different models. The model file can overwrite the default arguments and add new arguments. For example, this [line](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/pix2pix_model.py#L32) adds and changes default arguments for pix2pix. For CycleGAN, the default is `--netG resnet_9blocks --no_dropout --norm instance --dataset_mode unaligned`. For pix2pix, the default is `--netG unet_256 --norm batch --dataset_mode aligned`. For model testing with single direction (`--model test`), the default is `--netG resnet_9blocks --norm instance --dataset_mode single`. To make sure that your training and test follow the same setting, you are encouraged to plicitly specify the `--netG`, `--norm`, `--dataset_mode`, and `--no_dropout` (or not) in your script.
76
+
77
+ #### NotSupportedError ([#829](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/829))
78
+ The error message states that `slicing multiple dimensions at the same time isn't supported yet proposals (Tensor): boxes to be encoded`. It is not related to our repo. It is often caused by incompatibility between the `torhvision` version and `pytorch` version. You need to re-intall or upgrade your `torchvision` to be compatible with the `pytorch` version.
79
+
80
+
81
+ #### What is the identity loss? ([#322](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/322), [#373](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/373), [#362](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/pull/362))
82
+ We use the identity loss for our photo to painting application. The identity loss can regularize the generator to be close to an identity mapping when fed with real samples from the *target* domain. If something already looks like from the target domain, you should preserve the image without making additional changes. The generator trained with this loss will often be more conservative for unknown content. Please see more details in Sec 5.2 ''Photo generation from paintings'' and Figure 12 in the CycleGAN [paper](https://arxiv.org/pdf/1703.10593.pdf). The loss was first proposed in Equation 6 of the prior work [[Taigman et al., 2017]](https://arxiv.org/pdf/1611.02200.pdf).
83
+
84
+ #### The color gets inverted from the beginning of training ([#249](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/249))
85
+ The authors also observe that the generator unnecessarily inverts the color of the input image early in training, and then never learns to undo the inversion. In this case, you can try two things.
86
+
87
+ - First, try using identity loss `--lambda_identity 1.0` or `--lambda_identity 0.1`. We observe that the identity loss makes the generator to be more conservative and make fewer unnecessary changes. However, because of this, the change may not be as dramatic.
88
+
89
+ - Second, try smaller variance when initializing weights by changing `--init_gain`. We observe that a smaller variance in weight initialization results in less color inversion.
90
+
91
+ #### For labels2photo Cityscapes evaluation, why does the pretrained FCN-8s model not work well on the original Cityscapes input images? ([#150](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/150))
92
+ The model was trained on 256x256 images that are resized/upsampled to 1024x2048, so expected input images to the network are very blurry. The purpose of the resizing was to 1) keep the label maps in the original high resolution untouched and 2) avoid the need to change the standard FCN training code for Cityscapes.
93
+
94
+ #### How do I get the `ground-truth` numbers on the labels2photo Cityscapes evaluation? ([#150](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/150))
95
+ You need to resize the original Cityscapes images to 256x256 before running the evaluation code.
96
+
97
+ #### What is a good evaluation metric for CycleGAN? ([#730](https://github.com/pulls), [#716](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/716), [#166](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/166))
98
+ The evaluation metric highly depends on your specific task and dataset. There is no single metric that works for all the datasets and tasks.
99
+
100
+ There are a few popular choices: (1) we often evaluate CycleGAN on paired datasets (e.g., Cityscapes dataset and the meanIOU metric used in the CycleGAN paper), in which the model was trained without pairs. (2) Many researchers have adopted standard GAN metrics such as FID. Note that FID only evaluates the output images, while it ignores the correspondence between output and input. (3) A user study regarding photorealism might be helpful. Please check out the details of a user study in the CycleGAN paper (Section 5.1.1).
101
+
102
+ In summary, how to automatically evaluate the results is an open research problem for GANs research. But for many creative applications, the results are subjective and hard to quantify without humans in the loop.
103
+
104
+
105
+ #### What dose the CycleGAN loss look like if training goes well? ([#1096](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/1096), [#1086](ttps://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/1086), [#288](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/288), [#30](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/30))
106
+ Typically, the cycle-consistency loss and identity loss decrease during training, while GAN losses oscillate. To evaluate the quality of your results, you need to adopt additional evaluation metrics to your training and test images. See the Q & A above.
107
+
108
+
109
+ #### Using resize-conv to reduce checkerboard artifacts ([#190](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/190), [#64](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/64))
110
+ This Distill [blog](https://distill.pub/2016/deconv-checkerboard/) discussed one of the potential causes of the checkerboard artifacts. You can fix that issue by switching from "deconvolution" to nearest-neighbor upsampling, followed by regular convolution. Here is one implementation provided by [@SsnL](https://github.com/SsnL). You can replace the ConvTranspose2d with the following layers.
111
+ ```python
112
+ nn.Upsample(scale_factor = 2, mode='bilinear'),
113
+ nn.ReflectionPad2d(1),
114
+ nn.Conv2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=1, padding=0),
115
+ ```
116
+ We have also noticed that sometimes the checkboard artifacts will go away if you train long enough. Maybe you can try training your model a bit longer.
117
+
118
+ #### pix2pix/CycleGAN has no random noise z ([#152](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/152))
119
+ The current pix2pix/CycleGAN model does not take z as input. In both pix2pix and CycleGAN, we tried to add z to the generator: e.g., adding z to a latent state, concatenating with a latent state, applying dropout, etc., but often found the output did not vary significantly as a function of z. Conditional GANs do not need noise as long as the input is sufficiently complex so that the input can kind of play the role of noise. Without noise, the mapping is deterministic.
120
+
121
+ Please check out the following papers that show ways of getting z to actually have a substantial effect: e.g., [BicycleGAN](https://github.com/junyanz/BicycleGAN), [AugmentedCycleGAN](https://arxiv.org/abs/1802.10151), [MUNIT](https://arxiv.org/abs/1804.04732), [DRIT](https://arxiv.org/pdf/1808.00948.pdf), etc.
122
+
123
+ #### Experiment details (e.g., BW->color) ([#306](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/306))
124
+ You can find more training details and hyperparameter settings in the appendix of [CycleGAN](https://arxiv.org/abs/1703.10593) and [pix2pix](https://arxiv.org/abs/1611.07004) papers.
125
+
126
+ #### Results with [Cycada](https://arxiv.org/pdf/1711.03213.pdf)
127
+ We generated the [result of translating GTA images to Cityscapes-style images](https://junyanz.github.io/CycleGAN/) using our Torch repo. Our PyTorch and Torch implementation seemed to produce a little bit different results, although we have not measured the FCN score using the PyTorch-trained model. To reproduce the result of Cycada, please use the Torch repo for now.
128
+
129
+ #### Loading and using the saved model in your code
130
+ You can easily consume the model in your code using the below code snippet:
131
+
132
+ ```python
133
+ import torch
134
+ from models.networks import define_G
135
+ from collections import OrderedDict
136
+
137
+ model_dict = torch.load("checkpoints/stars_pix2pix/latest_net_G.pth")
138
+ new_dict = OrderedDict()
139
+ for k, v in model_dict.items():
140
+ # load_state_dict expects keys with prefix 'module.'
141
+ new_dict["module." + k] = v
142
+
143
+ # make sure you pass the correct parameters to the define_G method
144
+ generator_model = define_G(input_nc=1,output_nc=1,ngf=64,netG="resnet_9blocks",
145
+ norm="batch",use_dropout=True,init_gain=0.02,gpu_ids=[0])
146
+ generator_model.load_state_dict(new_dict)
147
+ ```
148
+ If everything goes well you should see a '\<All keys matched successfully\>' message.
docs/tips.md ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Training/test Tips
2
+ #### Training/test options
3
+ Please see `options/train_options.py` and `options/base_options.py` for the training flags; see `options/test_options.py` and `options/base_options.py` for the test flags. There are some model-specific flags as well, which are added in the model files, such as `--lambda_A` option in `model/cycle_gan_model.py`. The default values of these options are also adjusted in the model files.
4
+ #### CPU/GPU (default `--gpu_ids 0`)
5
+ Please set`--gpu_ids -1` to use CPU mode; set `--gpu_ids 0,1,2` for multi-GPU mode. You need a large batch size (e.g., `--batch_size 32`) to benefit from multiple GPUs.
6
+
7
+ #### Visualization
8
+ During training, the current results can be viewed using two methods. First, if you set `--display_id` > 0, the results and loss plot will appear on a local graphics web server launched by [visdom](https://github.com/facebookresearch/visdom). To do this, you should have `visdom` installed and a server running by the command `python -m visdom.server`. The default server URL is `http://localhost:8097`. `display_id` corresponds to the window ID that is displayed on the `visdom` server. The `visdom` display functionality is turned on by default. To avoid the extra overhead of communicating with `visdom` set `--display_id -1`. Second, the intermediate results are saved to `[opt.checkpoints_dir]/[opt.name]/web/` as an HTML file. To avoid this, set `--no_html`.
9
+
10
+ #### Preprocessing
11
+ Images can be resized and cropped in different ways using `--preprocess` option. The default option `'resize_and_crop'` resizes the image to be of size `(opt.load_size, opt.load_size)` and does a random crop of size `(opt.crop_size, opt.crop_size)`. `'crop'` skips the resizing step and only performs random cropping. `'scale_width'` resizes the image to have width `opt.crop_size` while keeping the aspect ratio. `'scale_width_and_crop'` first resizes the image to have width `opt.load_size` and then does random cropping of size `(opt.crop_size, opt.crop_size)`. `'none'` tries to skip all these preprocessing steps. However, if the image size is not a multiple of some number depending on the number of downsamplings of the generator, you will get an error because the size of the output image may be different from the size of the input image. Therefore, `'none'` option still tries to adjust the image size to be a multiple of 4. You might need a bigger adjustment if you change the generator architecture. Please see `data/base_dataset.py` do see how all these were implemented.
12
+
13
+ #### Fine-tuning/resume training
14
+ To fine-tune a pre-trained model, or resume the previous training, use the `--continue_train` flag. The program will then load the model based on `epoch`. By default, the program will initialize the epoch count as 1. Set `--epoch_count <int>` to specify a different starting epoch count.
15
+
16
+
17
+ #### Prepare your own datasets for CycleGAN
18
+ You need to create two directories to host images from domain A `/path/to/data/trainA` and from domain B `/path/to/data/trainB`. Then you can train the model with the dataset flag `--dataroot /path/to/data`. Optionally, you can create hold-out test datasets at `/path/to/data/testA` and `/path/to/data/testB` to test your model on unseen images.
19
+
20
+ #### Prepare your own datasets for pix2pix
21
+ Pix2pix's training requires paired data. We provide a python script to generate training data in the form of pairs of images {A,B}, where A and B are two different depictions of the same underlying scene. For example, these might be pairs {label map, photo} or {bw image, color image}. Then we can learn to translate A to B or B to A:
22
+
23
+ Create folder `/path/to/data` with subdirectories `A` and `B`. `A` and `B` should each have their own subdirectories `train`, `val`, `test`, etc. In `/path/to/data/A/train`, put training images in style A. In `/path/to/data/B/train`, put the corresponding images in style B. Repeat same for other data splits (`val`, `test`, etc).
24
+
25
+ Corresponding images in a pair {A,B} must be the same size and have the same filename, e.g., `/path/to/data/A/train/1.jpg` is considered to correspond to `/path/to/data/B/train/1.jpg`.
26
+
27
+ Once the data is formatted this way, call:
28
+ ```bash
29
+ python datasets/combine_A_and_B.py --fold_A /path/to/data/A --fold_B /path/to/data/B --fold_AB /path/to/data
30
+ ```
31
+
32
+ This will combine each pair of images (A,B) into a single image file, ready for training.
33
+
34
+
35
+ #### About image size
36
+ Since the generator architecture in CycleGAN involves a series of downsampling / upsampling operations, the size of the input and output image may not match if the input image size is not a multiple of 4. As a result, you may get a runtime error because the L1 identity loss cannot be enforced with images of different size. Therefore, we slightly resize the image to become multiples of 4 even with `--preprocess none` option. For the same reason, `--crop_size` needs to be a multiple of 4.
37
+
38
+ #### Training/Testing with high res images
39
+ CycleGAN is quite memory-intensive as four networks (two generators and two discriminators) need to be loaded on one GPU, so a large image cannot be entirely loaded. In this case, we recommend training with cropped images. For example, to generate 1024px results, you can train with `--preprocess scale_width_and_crop --load_size 1024 --crop_size 360`, and test with `--preprocess scale_width --load_size 1024`. This way makes sure the training and test will be at the same scale. At test time, you can afford higher resolution because you don’t need to load all networks.
40
+
41
+ #### Training/Testing with rectangular images
42
+ Both pix2pix and CycleGAN can work for rectangular images. To make them work, you need to use different preprocessing flags. Let's say that you are working with `360x256` images. During training, you can specify `--preprocess crop` and `--crop_size 256`. This will allow your model to be trained on randomly cropped `256x256` images during training time. During test time, you can apply the model on `360x256` images with the flag `--preprocess none`.
43
+
44
+ There are practical restrictions regarding image sizes for each generator architecture. For `unet256`, it only supports images whose width and height are divisible by 256. For `unet128`, the width and height need to be divisible by 128. For `resnet_6blocks` and `resnet_9blocks`, the width and height need to be divisible by 4.
45
+
46
+ #### About loss curve
47
+ Unfortunately, the loss curve does not reveal much information in training GANs, and CycleGAN is no exception. To check whether the training has converged or not, we recommend periodically generating a few samples and looking at them.
48
+
49
+ #### About batch size
50
+ For all experiments in the paper, we set the batch size to be 1. If there is room for memory, you can use higher batch size with batch norm or instance norm. (Note that the default batchnorm does not work well with multi-GPU training. You may consider using [synchronized batchnorm](https://github.com/vacancy/Synchronized-BatchNorm-PyTorch) instead). But please be aware that it can impact the training. In particular, even with Instance Normalization, different batch sizes can lead to different results. Moreover, increasing `--crop_size` may be a good alternative to increasing the batch size.
51
+
52
+
53
+ #### Notes on Colorization
54
+ No need to run `combine_A_and_B.py` for colorization. Instead, you need to prepare natural images and set `--dataset_mode colorization` and `--model colorization` in the script. The program will automatically convert each RGB image into Lab color space, and create `L -> ab` image pair during the training. Also set `--input_nc 1` and `--output_nc 2`. The training and test directory should be organized as `/your/data/train` and `your/data/test`. See example scripts `scripts/train_colorization.sh` and `scripts/test_colorization` for more details.
55
+
56
+ #### Notes on Extracting Edges
57
+ We provide python and Matlab scripts to extract coarse edges from photos. Run `scripts/edges/batch_hed.py` to compute [HED](https://github.com/s9xie/hed) edges. Run `scripts/edges/PostprocessHED.m` to simplify edges with additional post-processing steps. Check the code documentation for more details.
58
+
59
+ #### Evaluating Labels2Photos on Cityscapes
60
+ We provide scripts for running the evaluation of the Labels2Photos task on the Cityscapes **validation** set. We assume that you have installed `caffe` (and `pycaffe`) in your system. If not, see the [official website](http://caffe.berkeleyvision.org/installation.html) for installation instructions. Once `caffe` is successfully installed, download the pre-trained FCN-8s semantic segmentation model (512MB) by running
61
+ ```bash
62
+ bash ./scripts/eval_cityscapes/download_fcn8s.sh
63
+ ```
64
+ Then make sure `./scripts/eval_cityscapes/` is in your system's python path. If not, run the following command to add it
65
+ ```bash
66
+ export PYTHONPATH=${PYTHONPATH}:./scripts/eval_cityscapes/
67
+ ```
68
+ Now you can run the following command to evaluate your predictions:
69
+ ```bash
70
+ python ./scripts/eval_cityscapes/evaluate.py --cityscapes_dir /path/to/original/cityscapes/dataset/ --result_dir /path/to/your/predictions/ --output_dir /path/to/output/directory/
71
+ ```
72
+ Images stored under `--result_dir` should contain your model predictions on the Cityscapes **validation** split, and have the original Cityscapes naming convention (e.g., `frankfurt_000001_038418_leftImg8bit.png`). The script will output a text file under `--output_dir` containing the metric.
73
+
74
+ **Further notes**: Our pre-trained FCN model is **not** supposed to work on Cityscapes in the original resolution (1024x2048) as it was trained on 256x256 images that are then upsampled to 1024x2048 during training. The purpose of the resizing during training was to 1) keep the label maps in the original high resolution untouched and 2) avoid the need of changing the standard FCN training code and the architecture for Cityscapes. During test time, you need to synthesize 256x256 results. Our test code will automatically upsample your results to 1024x2048 before feeding them to the pre-trained FCN model. The output is at 1024x2048 resolution and will be compared to 1024x2048 ground truth labels. You do not need to resize the ground truth labels. The best way to verify whether everything is correct is to reproduce the numbers for real images in the paper first. To achieve it, you need to resize the original/real Cityscapes images (**not** labels) to 256x256 and feed them to the evaluation code.
environment.yml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: pytorch-CycleGAN-and-pix2pix
2
+ channels:
3
+ - pytorch
4
+ - defaults
5
+ dependencies:
6
+ - python=3.8
7
+ - pytorch=1.8.1
8
+ - scipy
9
+ - pip
10
+ - pip:
11
+ - dominate==2.6.0
12
+ - torchvision==0.9.1
13
+ - Pillow==8.0.1
14
+ - numpy==1.19.2
15
+ - visdom==0.1.8
16
+ - wandb==0.12.18
17
+