Keiser41 commited on
Commit
96eb931
1 Parent(s): 5b0413d

Upload 201 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +53 -0
  2. .gitignore +46 -0
  3. .replit +2 -0
  4. CycleGAN.ipynb +273 -0
  5. LICENSE +58 -0
  6. README.md +246 -0
  7. checkpoints/bw2color/115_net_D.pth +3 -0
  8. checkpoints/bw2color/115_net_G.pth +3 -0
  9. checkpoints/bw2color/bw2color.pth +3 -0
  10. checkpoints/bw2color/latest_net_D.pth +3 -0
  11. checkpoints/bw2color/latest_net_G_A.pth +3 -0
  12. checkpoints/bw2color/loss_log.txt +17 -0
  13. checkpoints/bw2color/opt.txt +35 -0
  14. checkpoints/bw2color/test_opt.txt +45 -0
  15. checkpoints/bw2color/web/images/epoch004_fake_A.png +0 -0
  16. checkpoints/bw2color/web/images/epoch004_fake_B.png +0 -0
  17. checkpoints/bw2color/web/images/epoch004_idt_A.png +0 -0
  18. checkpoints/bw2color/web/images/epoch004_idt_B.png +0 -0
  19. checkpoints/bw2color/web/images/epoch004_real_A.png +0 -0
  20. checkpoints/bw2color/web/images/epoch004_real_B.png +0 -0
  21. checkpoints/bw2color/web/images/epoch004_rec_A.png +0 -0
  22. checkpoints/bw2color/web/images/epoch004_rec_B.png +0 -0
  23. checkpoints/bw2color/web/images/epoch008_fake_A.png +0 -0
  24. checkpoints/bw2color/web/images/epoch008_fake_B.png +0 -0
  25. checkpoints/bw2color/web/images/epoch008_idt_A.png +0 -0
  26. checkpoints/bw2color/web/images/epoch008_idt_B.png +0 -0
  27. checkpoints/bw2color/web/images/epoch008_real_A.png +0 -0
  28. checkpoints/bw2color/web/images/epoch008_real_B.png +0 -0
  29. checkpoints/bw2color/web/images/epoch008_rec_A.png +0 -0
  30. checkpoints/bw2color/web/images/epoch008_rec_B.png +0 -0
  31. checkpoints/bw2color/web/images/epoch012_fake_A.png +0 -0
  32. checkpoints/bw2color/web/images/epoch012_fake_B.png +0 -0
  33. checkpoints/bw2color/web/images/epoch012_idt_A.png +0 -0
  34. checkpoints/bw2color/web/images/epoch012_idt_B.png +0 -0
  35. checkpoints/bw2color/web/images/epoch012_real_A.png +0 -0
  36. checkpoints/bw2color/web/images/epoch012_real_B.png +0 -0
  37. checkpoints/bw2color/web/images/epoch012_rec_A.png +0 -0
  38. checkpoints/bw2color/web/images/epoch012_rec_B.png +0 -0
  39. checkpoints/bw2color/web/images/epoch016_fake_A.png +0 -0
  40. checkpoints/bw2color/web/images/epoch016_fake_B.png +0 -0
  41. checkpoints/bw2color/web/images/epoch016_idt_A.png +0 -0
  42. checkpoints/bw2color/web/images/epoch016_idt_B.png +0 -0
  43. checkpoints/bw2color/web/images/epoch016_real_A.png +0 -0
  44. checkpoints/bw2color/web/images/epoch016_real_B.png +0 -0
  45. checkpoints/bw2color/web/images/epoch016_rec_A.png +0 -0
  46. checkpoints/bw2color/web/images/epoch016_rec_B.png +0 -0
  47. checkpoints/bw2color/web/images/epoch029_fake_B_rgb.png +0 -0
  48. checkpoints/bw2color/web/images/epoch029_real_A.png +0 -0
  49. checkpoints/bw2color/web/images/epoch029_real_B_rgb.png +0 -0
  50. checkpoints/bw2color/web/images/epoch058_fake_B_rgb.png +0 -0
.gitattributes CHANGED
@@ -33,3 +33,56 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ datasets/bw2color/testA/11_3.png filter=lfs diff=lfs merge=lfs -text
37
+ datasets/bw2color/testA/12_12.png filter=lfs diff=lfs merge=lfs -text
38
+ datasets/bw2color/testA/13_8.png filter=lfs diff=lfs merge=lfs -text
39
+ datasets/bw2color/trainB/a[[:space:]](12).jpg filter=lfs diff=lfs merge=lfs -text
40
+ datasets/bw2color/trainB/a[[:space:]](188).jpg filter=lfs diff=lfs merge=lfs -text
41
+ datasets/bw2color/trainB/a[[:space:]](192).jpg filter=lfs diff=lfs merge=lfs -text
42
+ datasets/bw2color/trainB/a[[:space:]](197).jpg filter=lfs diff=lfs merge=lfs -text
43
+ datasets/bw2color/trainB/a[[:space:]](215).jpg filter=lfs diff=lfs merge=lfs -text
44
+ datasets/bw2color/trainB/a[[:space:]](231).jpg filter=lfs diff=lfs merge=lfs -text
45
+ datasets/bw2color/trainB/a[[:space:]](238).jpg filter=lfs diff=lfs merge=lfs -text
46
+ datasets/bw2color/trainB/a[[:space:]](254).jpg filter=lfs diff=lfs merge=lfs -text
47
+ datasets/bw2color/trainB/a[[:space:]](281).jpg filter=lfs diff=lfs merge=lfs -text
48
+ datasets/bw2color/trainB/a[[:space:]](288).jpg filter=lfs diff=lfs merge=lfs -text
49
+ datasets/bw2color/trainB/a[[:space:]](302).jpg filter=lfs diff=lfs merge=lfs -text
50
+ datasets/bw2color/trainB/a[[:space:]](307).jpg filter=lfs diff=lfs merge=lfs -text
51
+ datasets/bw2color/trainB/a[[:space:]](310).jpg filter=lfs diff=lfs merge=lfs -text
52
+ datasets/bw2color/trainB/a[[:space:]](329).jpg filter=lfs diff=lfs merge=lfs -text
53
+ datasets/bw2color/trainB/a[[:space:]](33).jpg filter=lfs diff=lfs merge=lfs -text
54
+ datasets/bw2color/trainB/a[[:space:]](374).jpg filter=lfs diff=lfs merge=lfs -text
55
+ datasets/bw2color/trainB/a[[:space:]](392).jpg filter=lfs diff=lfs merge=lfs -text
56
+ datasets/bw2color/trainB/a[[:space:]](41).jpg filter=lfs diff=lfs merge=lfs -text
57
+ datasets/bw2color/trainB/a[[:space:]](445).jpg filter=lfs diff=lfs merge=lfs -text
58
+ datasets/bw2color/trainB/a[[:space:]](449).jpg filter=lfs diff=lfs merge=lfs -text
59
+ datasets/bw2color/trainB/a[[:space:]](454).jpg filter=lfs diff=lfs merge=lfs -text
60
+ datasets/bw2color/trainB/a[[:space:]](512).jpg filter=lfs diff=lfs merge=lfs -text
61
+ datasets/bw2color/trainB/a[[:space:]](517).jpg filter=lfs diff=lfs merge=lfs -text
62
+ datasets/bw2color/trainB/a[[:space:]](529).jpg filter=lfs diff=lfs merge=lfs -text
63
+ datasets/bw2color/trainB/a[[:space:]](533).jpg filter=lfs diff=lfs merge=lfs -text
64
+ datasets/bw2color/trainB/a[[:space:]](54).jpg filter=lfs diff=lfs merge=lfs -text
65
+ datasets/bw2color/trainB/a[[:space:]](565).jpg filter=lfs diff=lfs merge=lfs -text
66
+ datasets/bw2color/trainB/a[[:space:]](576).jpg filter=lfs diff=lfs merge=lfs -text
67
+ datasets/bw2color/trainB/a[[:space:]](587).jpg filter=lfs diff=lfs merge=lfs -text
68
+ datasets/bw2color/trainB/a[[:space:]](642).jpg filter=lfs diff=lfs merge=lfs -text
69
+ datasets/bw2color/trainB/a[[:space:]](648).jpg filter=lfs diff=lfs merge=lfs -text
70
+ datasets/bw2color/trainB/a[[:space:]](670).jpg filter=lfs diff=lfs merge=lfs -text
71
+ datasets/bw2color/trainB/a[[:space:]](672).jpg filter=lfs diff=lfs merge=lfs -text
72
+ datasets/bw2color/trainB/a[[:space:]](732).jpg filter=lfs diff=lfs merge=lfs -text
73
+ datasets/bw2color/trainB/a[[:space:]](751).jpg filter=lfs diff=lfs merge=lfs -text
74
+ datasets/bw2color/trainB/a[[:space:]](757).jpg filter=lfs diff=lfs merge=lfs -text
75
+ datasets/bw2color/trainB/a[[:space:]](759).jpg filter=lfs diff=lfs merge=lfs -text
76
+ datasets/bw2color/trainB/a[[:space:]](825).jpg filter=lfs diff=lfs merge=lfs -text
77
+ datasets/bw2color/trainB/a[[:space:]](834).jpg filter=lfs diff=lfs merge=lfs -text
78
+ datasets/bw2color/trainB/a[[:space:]](845).jpg filter=lfs diff=lfs merge=lfs -text
79
+ datasets/bw2color/trainB/a[[:space:]](846).jpg filter=lfs diff=lfs merge=lfs -text
80
+ datasets/bw2color/trainB/a[[:space:]](849).jpg filter=lfs diff=lfs merge=lfs -text
81
+ datasets/bw2color/trainB/a[[:space:]](880).jpg filter=lfs diff=lfs merge=lfs -text
82
+ datasets/bw2color/trainB/a[[:space:]](884).jpg filter=lfs diff=lfs merge=lfs -text
83
+ datasets/bw2color/trainB/a[[:space:]](90).jpg filter=lfs diff=lfs merge=lfs -text
84
+ datasets/bw2color/trainB/a[[:space:]](902).jpg filter=lfs diff=lfs merge=lfs -text
85
+ datasets/bw2color/trainB/a[[:space:]](93).jpg filter=lfs diff=lfs merge=lfs -text
86
+ datasets/bw2color/trainB/a[[:space:]](940).jpg filter=lfs diff=lfs merge=lfs -text
87
+ datasets/bw2color/trainB/a[[:space:]](95).jpg filter=lfs diff=lfs merge=lfs -text
88
+ imgs/horse2zebra.gif filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .DS_Store
2
+ debug*
3
+ datasets/
4
+ checkpoints/
5
+ results/
6
+ build/
7
+ dist/
8
+ *.png
9
+ torch.egg-info/
10
+ */**/__pycache__
11
+ torch/version.py
12
+ torch/csrc/generic/TensorMethods.cpp
13
+ torch/lib/*.so*
14
+ torch/lib/*.dylib*
15
+ torch/lib/*.h
16
+ torch/lib/build
17
+ torch/lib/tmp_install
18
+ torch/lib/include
19
+ torch/lib/torch_shm_manager
20
+ torch/csrc/cudnn/cuDNN.cpp
21
+ torch/csrc/nn/THNN.cwrap
22
+ torch/csrc/nn/THNN.cpp
23
+ torch/csrc/nn/THCUNN.cwrap
24
+ torch/csrc/nn/THCUNN.cpp
25
+ torch/csrc/nn/THNN_generic.cwrap
26
+ torch/csrc/nn/THNN_generic.cpp
27
+ torch/csrc/nn/THNN_generic.h
28
+ docs/src/**/*
29
+ test/data/legacy_modules.t7
30
+ test/data/gpu_tensors.pt
31
+ test/htmlcov
32
+ test/.coverage
33
+ */*.pyc
34
+ */**/*.pyc
35
+ */**/**/*.pyc
36
+ */**/**/**/*.pyc
37
+ */**/**/**/**/*.pyc
38
+ */*.so*
39
+ */**/*.so*
40
+ */**/*.dylib*
41
+ test/data/legacy_serialized.pt
42
+ *~
43
+ .idea
44
+
45
+ #Ignore Wandb
46
+ wandb/
.replit ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ language = "python3"
2
+ run = "<p><a href=\"https://github.com/affinelayer/pix2pix-tensorflow\"> [Tensorflow]</a> (by Christopher Hesse), <a href=\"https://github.com/Eyyub/tensorflow-pix2pix\">[Tensorflow]</a> (by Eyyüb Sariu), <a href=\"https://github.com/datitran/face2face-demo\"> [Tensorflow (face2face)]</a> (by Dat Tran), <a href=\"https://github.com/awjuliani/Pix2Pix-Film\"> [Tensorflow (film)]</a> (by Arthur Juliani), <a href=\"https://github.com/kaonashi-tyc/zi2zi\">[Tensorflow (zi2zi)]</a> (by Yuchen Tian), <a href=\"https://github.com/pfnet-research/chainer-pix2pix\">[Chainer]</a> (by mattya), <a href=\"https://github.com/tjwei/GANotebooks\">[tf/torch/keras/lasagne]</a> (by tjwei), <a href=\"https://github.com/taey16/pix2pixBEGAN.pytorch\">[Pytorch]</a> (by taey16) </p> </ul>"
CycleGAN.ipynb ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {
6
+ "colab_type": "text",
7
+ "id": "view-in-github"
8
+ },
9
+ "source": [
10
+ "<a href=\"https://colab.research.google.com/github/bkkaggle/pytorch-CycleGAN-and-pix2pix/blob/master/CycleGAN.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
11
+ ]
12
+ },
13
+ {
14
+ "cell_type": "markdown",
15
+ "metadata": {
16
+ "colab_type": "text",
17
+ "id": "5VIGyIus8Vr7"
18
+ },
19
+ "source": [
20
+ "Take a look at the [repository](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix) for more information"
21
+ ]
22
+ },
23
+ {
24
+ "cell_type": "markdown",
25
+ "metadata": {
26
+ "colab_type": "text",
27
+ "id": "7wNjDKdQy35h"
28
+ },
29
+ "source": [
30
+ "# Install"
31
+ ]
32
+ },
33
+ {
34
+ "cell_type": "code",
35
+ "execution_count": null,
36
+ "metadata": {
37
+ "colab": {},
38
+ "colab_type": "code",
39
+ "id": "TRm-USlsHgEV"
40
+ },
41
+ "outputs": [],
42
+ "source": [
43
+ "!git clone https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix"
44
+ ]
45
+ },
46
+ {
47
+ "cell_type": "code",
48
+ "execution_count": null,
49
+ "metadata": {
50
+ "colab": {},
51
+ "colab_type": "code",
52
+ "id": "Pt3igws3eiVp"
53
+ },
54
+ "outputs": [],
55
+ "source": [
56
+ "import os\n",
57
+ "os.chdir('pytorch-CycleGAN-and-pix2pix/')"
58
+ ]
59
+ },
60
+ {
61
+ "cell_type": "code",
62
+ "execution_count": null,
63
+ "metadata": {
64
+ "colab": {},
65
+ "colab_type": "code",
66
+ "id": "z1EySlOXwwoa"
67
+ },
68
+ "outputs": [],
69
+ "source": [
70
+ "!pip install -r requirements.txt"
71
+ ]
72
+ },
73
+ {
74
+ "cell_type": "markdown",
75
+ "metadata": {
76
+ "colab_type": "text",
77
+ "id": "8daqlgVhw29P"
78
+ },
79
+ "source": [
80
+ "# Datasets\n",
81
+ "\n",
82
+ "Download one of the official datasets with:\n",
83
+ "\n",
84
+ "- `bash ./datasets/download_cyclegan_dataset.sh [apple2orange, summer2winter_yosemite, horse2zebra, monet2photo, cezanne2photo, ukiyoe2photo, vangogh2photo, maps, cityscapes, facades, iphone2dslr_flower, ae_photos]`\n",
85
+ "\n",
86
+ "Or use your own dataset by creating the appropriate folders and adding in the images.\n",
87
+ "\n",
88
+ "- Create a dataset folder under `/dataset` for your dataset.\n",
89
+ "- Create subfolders `testA`, `testB`, `trainA`, and `trainB` under your dataset's folder. Place any images you want to transform from a to b (cat2dog) in the `testA` folder, images you want to transform from b to a (dog2cat) in the `testB` folder, and do the same for the `trainA` and `trainB` folders."
90
+ ]
91
+ },
92
+ {
93
+ "cell_type": "code",
94
+ "execution_count": null,
95
+ "metadata": {
96
+ "colab": {},
97
+ "colab_type": "code",
98
+ "id": "vrdOettJxaCc"
99
+ },
100
+ "outputs": [],
101
+ "source": [
102
+ "!bash ./datasets/download_cyclegan_dataset.sh horse2zebra"
103
+ ]
104
+ },
105
+ {
106
+ "cell_type": "markdown",
107
+ "metadata": {
108
+ "colab_type": "text",
109
+ "id": "gdUz4116xhpm"
110
+ },
111
+ "source": [
112
+ "# Pretrained models\n",
113
+ "\n",
114
+ "Download one of the official pretrained models with:\n",
115
+ "\n",
116
+ "- `bash ./scripts/download_cyclegan_model.sh [apple2orange, orange2apple, summer2winter_yosemite, winter2summer_yosemite, horse2zebra, zebra2horse, monet2photo, style_monet, style_cezanne, style_ukiyoe, style_vangogh, sat2map, map2sat, cityscapes_photo2label, cityscapes_label2photo, facades_photo2label, facades_label2photo, iphone2dslr_flower]`\n",
117
+ "\n",
118
+ "Or add your own pretrained model to `./checkpoints/{NAME}_pretrained/latest_net_G.pt`"
119
+ ]
120
+ },
121
+ {
122
+ "cell_type": "code",
123
+ "execution_count": null,
124
+ "metadata": {
125
+ "colab": {},
126
+ "colab_type": "code",
127
+ "id": "B75UqtKhxznS"
128
+ },
129
+ "outputs": [],
130
+ "source": [
131
+ "!bash ./scripts/download_cyclegan_model.sh horse2zebra"
132
+ ]
133
+ },
134
+ {
135
+ "cell_type": "markdown",
136
+ "metadata": {
137
+ "colab_type": "text",
138
+ "id": "yFw1kDQBx3LN"
139
+ },
140
+ "source": [
141
+ "# Training\n",
142
+ "\n",
143
+ "- `python train.py --dataroot ./datasets/horse2zebra --name horse2zebra --model cycle_gan`\n",
144
+ "\n",
145
+ "Change the `--dataroot` and `--name` to your own dataset's path and model's name. Use `--gpu_ids 0,1,..` to train on multiple GPUs and `--batch_size` to change the batch size. I've found that a batch size of 16 fits onto 4 V100s and can finish training an epoch in ~90s.\n",
146
+ "\n",
147
+ "Once your model has trained, copy over the last checkpoint to a format that the testing model can automatically detect:\n",
148
+ "\n",
149
+ "Use `cp ./checkpoints/horse2zebra/latest_net_G_A.pth ./checkpoints/horse2zebra/latest_net_G.pth` if you want to transform images from class A to class B and `cp ./checkpoints/horse2zebra/latest_net_G_B.pth ./checkpoints/horse2zebra/latest_net_G.pth` if you want to transform images from class B to class A.\n"
150
+ ]
151
+ },
152
+ {
153
+ "cell_type": "code",
154
+ "execution_count": null,
155
+ "metadata": {
156
+ "colab": {},
157
+ "colab_type": "code",
158
+ "id": "0sp7TCT2x9dB"
159
+ },
160
+ "outputs": [],
161
+ "source": [
162
+ "!python train.py --dataroot ./datasets/horse2zebra --name horse2zebra --model cycle_gan --display_id -1"
163
+ ]
164
+ },
165
+ {
166
+ "cell_type": "markdown",
167
+ "metadata": {
168
+ "colab_type": "text",
169
+ "id": "9UkcaFZiyASl"
170
+ },
171
+ "source": [
172
+ "# Testing\n",
173
+ "\n",
174
+ "- `python test.py --dataroot datasets/horse2zebra/testA --name horse2zebra_pretrained --model test --no_dropout`\n",
175
+ "\n",
176
+ "Change the `--dataroot` and `--name` to be consistent with your trained model's configuration.\n",
177
+ "\n",
178
+ "> from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix:\n",
179
+ "> The option --model test is used for generating results of CycleGAN only for one side. This option will automatically set --dataset_mode single, which only loads the images from one set. On the contrary, using --model cycle_gan requires loading and generating results in both directions, which is sometimes unnecessary. The results will be saved at ./results/. Use --results_dir {directory_path_to_save_result} to specify the results directory.\n",
180
+ "\n",
181
+ "> For your own experiments, you might want to specify --netG, --norm, --no_dropout to match the generator architecture of the trained model."
182
+ ]
183
+ },
184
+ {
185
+ "cell_type": "code",
186
+ "execution_count": null,
187
+ "metadata": {
188
+ "colab": {},
189
+ "colab_type": "code",
190
+ "id": "uCsKkEq0yGh0"
191
+ },
192
+ "outputs": [],
193
+ "source": [
194
+ "!python test.py --dataroot datasets/horse2zebra/testA --name horse2zebra_pretrained --model test --no_dropout"
195
+ ]
196
+ },
197
+ {
198
+ "cell_type": "markdown",
199
+ "metadata": {
200
+ "colab_type": "text",
201
+ "id": "OzSKIPUByfiN"
202
+ },
203
+ "source": [
204
+ "# Visualize"
205
+ ]
206
+ },
207
+ {
208
+ "cell_type": "code",
209
+ "execution_count": null,
210
+ "metadata": {
211
+ "colab": {},
212
+ "colab_type": "code",
213
+ "id": "9Mgg8raPyizq"
214
+ },
215
+ "outputs": [],
216
+ "source": [
217
+ "import matplotlib.pyplot as plt\n",
218
+ "\n",
219
+ "img = plt.imread('./results/horse2zebra_pretrained/test_latest/images/n02381460_1010_fake.png')\n",
220
+ "plt.imshow(img)"
221
+ ]
222
+ },
223
+ {
224
+ "cell_type": "code",
225
+ "execution_count": null,
226
+ "metadata": {
227
+ "colab": {},
228
+ "colab_type": "code",
229
+ "id": "0G3oVH9DyqLQ"
230
+ },
231
+ "outputs": [],
232
+ "source": [
233
+ "import matplotlib.pyplot as plt\n",
234
+ "\n",
235
+ "img = plt.imread('./results/horse2zebra_pretrained/test_latest/images/n02381460_1010_real.png')\n",
236
+ "plt.imshow(img)"
237
+ ]
238
+ }
239
+ ],
240
+ "metadata": {
241
+ "accelerator": "GPU",
242
+ "colab": {
243
+ "collapsed_sections": [],
244
+ "include_colab_link": true,
245
+ "name": "CycleGAN",
246
+ "provenance": []
247
+ },
248
+ "environment": {
249
+ "name": "tf2-gpu.2-3.m74",
250
+ "type": "gcloud",
251
+ "uri": "gcr.io/deeplearning-platform-release/tf2-gpu.2-3:m74"
252
+ },
253
+ "kernelspec": {
254
+ "display_name": "Python 3",
255
+ "language": "python",
256
+ "name": "python3"
257
+ },
258
+ "language_info": {
259
+ "codemirror_mode": {
260
+ "name": "ipython",
261
+ "version": 3
262
+ },
263
+ "file_extension": ".py",
264
+ "mimetype": "text/x-python",
265
+ "name": "python",
266
+ "nbconvert_exporter": "python",
267
+ "pygments_lexer": "ipython3",
268
+ "version": "3.7.10"
269
+ }
270
+ },
271
+ "nbformat": 4,
272
+ "nbformat_minor": 4
273
+ }
LICENSE ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2017, Jun-Yan Zhu and Taesung Park
2
+ All rights reserved.
3
+
4
+ Redistribution and use in source and binary forms, with or without
5
+ modification, are permitted provided that the following conditions are met:
6
+
7
+ * Redistributions of source code must retain the above copyright notice, this
8
+ list of conditions and the following disclaimer.
9
+
10
+ * Redistributions in binary form must reproduce the above copyright notice,
11
+ this list of conditions and the following disclaimer in the documentation
12
+ and/or other materials provided with the distribution.
13
+
14
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
18
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
20
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
21
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
22
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24
+
25
+
26
+ --------------------------- LICENSE FOR pix2pix --------------------------------
27
+ BSD License
28
+
29
+ For pix2pix software
30
+ Copyright (c) 2016, Phillip Isola and Jun-Yan Zhu
31
+ All rights reserved.
32
+
33
+ Redistribution and use in source and binary forms, with or without
34
+ modification, are permitted provided that the following conditions are met:
35
+
36
+ * Redistributions of source code must retain the above copyright notice, this
37
+ list of conditions and the following disclaimer.
38
+
39
+ * Redistributions in binary form must reproduce the above copyright notice,
40
+ this list of conditions and the following disclaimer in the documentation
41
+ and/or other materials provided with the distribution.
42
+
43
+ ----------------------------- LICENSE FOR DCGAN --------------------------------
44
+ BSD License
45
+
46
+ For dcgan.torch software
47
+
48
+ Copyright (c) 2015, Facebook, Inc. All rights reserved.
49
+
50
+ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
51
+
52
+ Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
53
+
54
+ Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
55
+
56
+ Neither the name Facebook nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
57
+
58
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
README.md ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ <img src='imgs/horse2zebra.gif' align="right" width=384>
3
+
4
+ <br><br><br>
5
+
6
+ # CycleGAN and pix2pix in PyTorch
7
+
8
+ **New**: Please check out [contrastive-unpaired-translation](https://github.com/taesungp/contrastive-unpaired-translation) (CUT), our new unpaired image-to-image translation model that enables fast and memory-efficient training.
9
+
10
+ We provide PyTorch implementations for both unpaired and paired image-to-image translation.
11
+
12
+ The code was written by [Jun-Yan Zhu](https://github.com/junyanz) and [Taesung Park](https://github.com/taesungp), and supported by [Tongzhou Wang](https://github.com/SsnL).
13
+
14
+ This PyTorch implementation produces results comparable to or better than our original Torch software. If you would like to reproduce the same results as in the papers, check out the original [CycleGAN Torch](https://github.com/junyanz/CycleGAN) and [pix2pix Torch](https://github.com/phillipi/pix2pix) code in Lua/Torch.
15
+
16
+ **Note**: The current software works well with PyTorch 1.4. Check out the older [branch](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/tree/pytorch0.3.1) that supports PyTorch 0.1-0.3.
17
+
18
+ You may find useful information in [training/test tips](docs/tips.md) and [frequently asked questions](docs/qa.md). To implement custom models and datasets, check out our [templates](#custom-model-and-dataset). To help users better understand and adapt our codebase, we provide an [overview](docs/overview.md) of the code structure of this repository.
19
+
20
+ **CycleGAN: [Project](https://junyanz.github.io/CycleGAN/) | [Paper](https://arxiv.org/pdf/1703.10593.pdf) | [Torch](https://github.com/junyanz/CycleGAN) |
21
+ [Tensorflow Core Tutorial](https://www.tensorflow.org/tutorials/generative/cyclegan) | [PyTorch Colab](https://colab.research.google.com/github/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/CycleGAN.ipynb)**
22
+
23
+ <img src="https://junyanz.github.io/CycleGAN/images/teaser_high_res.jpg" width="800"/>
24
+
25
+ **Pix2pix: [Project](https://phillipi.github.io/pix2pix/) | [Paper](https://arxiv.org/pdf/1611.07004.pdf) | [Torch](https://github.com/phillipi/pix2pix) |
26
+ [Tensorflow Core Tutorial](https://www.tensorflow.org/tutorials/generative/pix2pix) | [PyTorch Colab](https://colab.research.google.com/github/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/pix2pix.ipynb)**
27
+
28
+ <img src="https://phillipi.github.io/pix2pix/images/teaser_v3.png" width="800px"/>
29
+
30
+
31
+ **[EdgesCats Demo](https://affinelayer.com/pixsrv/) | [pix2pix-tensorflow](https://github.com/affinelayer/pix2pix-tensorflow) | by [Christopher Hesse](https://twitter.com/christophrhesse)**
32
+
33
+ <img src='imgs/edges2cats.jpg' width="400px"/>
34
+
35
+ If you use this code for your research, please cite:
36
+
37
+ Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks.<br>
38
+ [Jun-Yan Zhu](https://www.cs.cmu.edu/~junyanz/)\*, [Taesung Park](https://taesung.me/)\*, [Phillip Isola](https://people.eecs.berkeley.edu/~isola/), [Alexei A. Efros](https://people.eecs.berkeley.edu/~efros). In ICCV 2017. (* equal contributions) [[Bibtex]](https://junyanz.github.io/CycleGAN/CycleGAN.txt)
39
+
40
+
41
+ Image-to-Image Translation with Conditional Adversarial Networks.<br>
42
+ [Phillip Isola](https://people.eecs.berkeley.edu/~isola), [Jun-Yan Zhu](https://www.cs.cmu.edu/~junyanz/), [Tinghui Zhou](https://people.eecs.berkeley.edu/~tinghuiz), [Alexei A. Efros](https://people.eecs.berkeley.edu/~efros). In CVPR 2017. [[Bibtex]](https://www.cs.cmu.edu/~junyanz/projects/pix2pix/pix2pix.bib)
43
+
44
+ ## Talks and Course
45
+ pix2pix slides: [keynote](http://efrosgans.eecs.berkeley.edu/CVPR18_slides/pix2pix.key) | [pdf](http://efrosgans.eecs.berkeley.edu/CVPR18_slides/pix2pix.pdf),
46
+ CycleGAN slides: [pptx](http://efrosgans.eecs.berkeley.edu/CVPR18_slides/CycleGAN.pptx) | [pdf](http://efrosgans.eecs.berkeley.edu/CVPR18_slides/CycleGAN.pdf)
47
+
48
+ CycleGAN course assignment [code](http://www.cs.toronto.edu/~rgrosse/courses/csc321_2018/assignments/a4-code.zip) and [handout](http://www.cs.toronto.edu/~rgrosse/courses/csc321_2018/assignments/a4-handout.pdf) designed by Prof. [Roger Grosse](http://www.cs.toronto.edu/~rgrosse/) for [CSC321](http://www.cs.toronto.edu/~rgrosse/courses/csc321_2018/) "Intro to Neural Networks and Machine Learning" at University of Toronto. Please contact the instructor if you would like to adopt it in your course.
49
+
50
+ ## Colab Notebook
51
+ TensorFlow Core CycleGAN Tutorial: [Google Colab](https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/generative/cyclegan.ipynb) | [Code](https://github.com/tensorflow/docs/blob/master/site/en/tutorials/generative/cyclegan.ipynb)
52
+
53
+ TensorFlow Core pix2pix Tutorial: [Google Colab](https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/generative/pix2pix.ipynb) | [Code](https://github.com/tensorflow/docs/blob/master/site/en/tutorials/generative/pix2pix.ipynb)
54
+
55
+ PyTorch Colab notebook: [CycleGAN](https://colab.research.google.com/github/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/CycleGAN.ipynb) and [pix2pix](https://colab.research.google.com/github/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/pix2pix.ipynb)
56
+
57
+ ZeroCostDL4Mic Colab notebook: [CycleGAN](https://colab.research.google.com/github/HenriquesLab/ZeroCostDL4Mic/blob/master/Colab_notebooks_Beta/CycleGAN_ZeroCostDL4Mic.ipynb) and [pix2pix](https://colab.research.google.com/github/HenriquesLab/ZeroCostDL4Mic/blob/master/Colab_notebooks_Beta/pix2pix_ZeroCostDL4Mic.ipynb)
58
+
59
+ ## Other implementations
60
+ ### CycleGAN
61
+ <p><a href="https://github.com/leehomyc/cyclegan-1"> [Tensorflow]</a> (by Harry Yang),
62
+ <a href="https://github.com/architrathore/CycleGAN/">[Tensorflow]</a> (by Archit Rathore),
63
+ <a href="https://github.com/vanhuyz/CycleGAN-TensorFlow">[Tensorflow]</a> (by Van Huy),
64
+ <a href="https://github.com/XHUJOY/CycleGAN-tensorflow">[Tensorflow]</a> (by Xiaowei Hu),
65
+ <a href="https://github.com/LynnHo/CycleGAN-Tensorflow-2"> [Tensorflow2]</a> (by Zhenliang He),
66
+ <a href="https://github.com/luoxier/CycleGAN_Tensorlayer"> [TensorLayer1.0]</a> (by luoxier),
67
+ <a href="https://github.com/tensorlayer/cyclegan"> [TensorLayer2.0]</a> (by zsdonghao),
68
+ <a href="https://github.com/Aixile/chainer-cyclegan">[Chainer]</a> (by Yanghua Jin),
69
+ <a href="https://github.com/yunjey/mnist-svhn-transfer">[Minimal PyTorch]</a> (by yunjey),
70
+ <a href="https://github.com/Ldpe2G/DeepLearningForFun/tree/master/Mxnet-Scala/CycleGAN">[Mxnet]</a> (by Ldpe2G),
71
+ <a href="https://github.com/tjwei/GANotebooks">[lasagne/Keras]</a> (by tjwei),
72
+ <a href="https://github.com/simontomaskarlsson/CycleGAN-Keras">[Keras]</a> (by Simon Karlsson),
73
+ <a href="https://github.com/Ldpe2G/DeepLearningForFun/tree/master/Oneflow-Python/CycleGAN">[OneFlow]</a> (by Ldpe2G)
74
+ </p>
75
+ </ul>
76
+
77
+ ### pix2pix
78
+ <p><a href="https://github.com/affinelayer/pix2pix-tensorflow"> [Tensorflow]</a> (by Christopher Hesse),
79
+ <a href="https://github.com/Eyyub/tensorflow-pix2pix">[Tensorflow]</a> (by Eyyüb Sariu),
80
+ <a href="https://github.com/datitran/face2face-demo"> [Tensorflow (face2face)]</a> (by Dat Tran),
81
+ <a href="https://github.com/awjuliani/Pix2Pix-Film"> [Tensorflow (film)]</a> (by Arthur Juliani),
82
+ <a href="https://github.com/kaonashi-tyc/zi2zi">[Tensorflow (zi2zi)]</a> (by Yuchen Tian),
83
+ <a href="https://github.com/pfnet-research/chainer-pix2pix">[Chainer]</a> (by mattya),
84
+ <a href="https://github.com/tjwei/GANotebooks">[tf/torch/keras/lasagne]</a> (by tjwei),
85
+ <a href="https://github.com/taey16/pix2pixBEGAN.pytorch">[Pytorch]</a> (by taey16)
86
+ </p>
87
+ </ul>
88
+
89
+ ## Prerequisites
90
+ - Linux or macOS
91
+ - Python 3
92
+ - CPU or NVIDIA GPU + CUDA CuDNN
93
+
94
+ ## Getting Started
95
+ ### Installation
96
+
97
+ - Clone this repo:
98
+ ```bash
99
+ git clone https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix
100
+ cd pytorch-CycleGAN-and-pix2pix
101
+ ```
102
+
103
+ - Install [PyTorch](http://pytorch.org) and 0.4+ and other dependencies (e.g., torchvision, [visdom](https://github.com/facebookresearch/visdom) and [dominate](https://github.com/Knio/dominate)).
104
+ - For pip users, please type the command `pip install -r requirements.txt`.
105
+ - For Conda users, you can create a new Conda environment using `conda env create -f environment.yml`.
106
+ - For Docker users, we provide the pre-built Docker image and Dockerfile. Please refer to our [Docker](docs/docker.md) page.
107
+ - For Repl users, please click [![Run on Repl.it](https://repl.it/badge/github/junyanz/pytorch-CycleGAN-and-pix2pix)](https://repl.it/github/junyanz/pytorch-CycleGAN-and-pix2pix).
108
+
109
+ ### CycleGAN train/test
110
+ - Download a CycleGAN dataset (e.g. maps):
111
+ ```bash
112
+ bash ./datasets/download_cyclegan_dataset.sh maps
113
+ ```
114
+ - To view training results and loss plots, run `python -m visdom.server` and click the URL http://localhost:8097.
115
+ - To log training progress and test images to W&B dashboard, set the `--use_wandb` flag with train and test script
116
+ - Train a model:
117
+ ```bash
118
+ #!./scripts/train_cyclegan.sh
119
+ python train.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan
120
+ ```
121
+ To see more intermediate results, check out `./checkpoints/maps_cyclegan/web/index.html`.
122
+ - Test the model:
123
+ ```bash
124
+ #!./scripts/test_cyclegan.sh
125
+ python test.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan
126
+ ```
127
+ - The test results will be saved to a html file here: `./results/maps_cyclegan/latest_test/index.html`.
128
+
129
+ ### pix2pix train/test
130
+ - Download a pix2pix dataset (e.g.[facades](http://cmp.felk.cvut.cz/~tylecr1/facade/)):
131
+ ```bash
132
+ bash ./datasets/download_pix2pix_dataset.sh facades
133
+ ```
134
+ - To view training results and loss plots, run `python -m visdom.server` and click the URL http://localhost:8097.
135
+ - To log training progress and test images to W&B dashboard, set the `--use_wandb` flag with train and test script
136
+ - Train a model:
137
+ ```bash
138
+ #!./scripts/train_pix2pix.sh
139
+ python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA
140
+ ```
141
+ To see more intermediate results, check out `./checkpoints/facades_pix2pix/web/index.html`.
142
+
143
+ - Test the model (`bash ./scripts/test_pix2pix.sh`):
144
+ ```bash
145
+ #!./scripts/test_pix2pix.sh
146
+ python test.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA
147
+ ```
148
+ - The test results will be saved to a html file here: `./results/facades_pix2pix/test_latest/index.html`. You can find more scripts at `scripts` directory.
149
+ - To train and test pix2pix-based colorization models, please add `--model colorization` and `--dataset_mode colorization`. See our training [tips](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md#notes-on-colorization) for more details.
150
+
151
+ ### Apply a pre-trained model (CycleGAN)
152
+ - You can download a pretrained model (e.g. horse2zebra) with the following script:
153
+ ```bash
154
+ bash ./scripts/download_cyclegan_model.sh horse2zebra
155
+ ```
156
+ - The pretrained model is saved at `./checkpoints/{name}_pretrained/latest_net_G.pth`. Check [here](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/scripts/download_cyclegan_model.sh#L3) for all the available CycleGAN models.
157
+ - To test the model, you also need to download the horse2zebra dataset:
158
+ ```bash
159
+ bash ./datasets/download_cyclegan_dataset.sh horse2zebra
160
+ ```
161
+
162
+ - Then generate the results using
163
+ ```bash
164
+ python test.py --dataroot datasets/horse2zebra/testA --name horse2zebra_pretrained --model test --no_dropout
165
+ ```
166
+ - The option `--model test` is used for generating results of CycleGAN only for one side. This option will automatically set `--dataset_mode single`, which only loads the images from one set. On the contrary, using `--model cycle_gan` requires loading and generating results in both directions, which is sometimes unnecessary. The results will be saved at `./results/`. Use `--results_dir {directory_path_to_save_result}` to specify the results directory.
167
+
168
+ - For pix2pix and your own models, you need to explicitly specify `--netG`, `--norm`, `--no_dropout` to match the generator architecture of the trained model. See this [FAQ](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md#runtimeerror-errors-in-loading-state_dict-812-671461-296) for more details.
169
+
170
+ ### Apply a pre-trained model (pix2pix)
171
+ Download a pre-trained model with `./scripts/download_pix2pix_model.sh`.
172
+
173
+ - Check [here](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/scripts/download_pix2pix_model.sh#L3) for all the available pix2pix models. For example, if you would like to download label2photo model on the Facades dataset,
174
+ ```bash
175
+ bash ./scripts/download_pix2pix_model.sh facades_label2photo
176
+ ```
177
+ - Download the pix2pix facades datasets:
178
+ ```bash
179
+ bash ./datasets/download_pix2pix_dataset.sh facades
180
+ ```
181
+ - Then generate the results using
182
+ ```bash
183
+ python test.py --dataroot ./datasets/facades/ --direction BtoA --model pix2pix --name facades_label2photo_pretrained
184
+ ```
185
+ - Note that we specified `--direction BtoA` as Facades dataset's A to B direction is photos to labels.
186
+
187
+ - If you would like to apply a pre-trained model to a collection of input images (rather than image pairs), please use `--model test` option. See `./scripts/test_single.sh` for how to apply a model to Facade label maps (stored in the directory `facades/testB`).
188
+
189
+ - See a list of currently available models at `./scripts/download_pix2pix_model.sh`
190
+
191
+ ## [Docker](docs/docker.md)
192
+ We provide the pre-built Docker image and Dockerfile that can run this code repo. See [docker](docs/docker.md).
193
+
194
+ ## [Datasets](docs/datasets.md)
195
+ Download pix2pix/CycleGAN datasets and create your own datasets.
196
+
197
+ ## [Training/Test Tips](docs/tips.md)
198
+ Best practice for training and testing your models.
199
+
200
+ ## [Frequently Asked Questions](docs/qa.md)
201
+ Before you post a new question, please first look at the above Q & A and existing GitHub issues.
202
+
203
+ ## Custom Model and Dataset
204
+ If you plan to implement custom models and dataset for your new applications, we provide a dataset [template](data/template_dataset.py) and a model [template](models/template_model.py) as a starting point.
205
+
206
+ ## [Code structure](docs/overview.md)
207
+ To help users better understand and use our code, we briefly overview the functionality and implementation of each package and each module.
208
+
209
+ ## Pull Request
210
+ You are always welcome to contribute to this repository by sending a [pull request](https://help.github.com/articles/about-pull-requests/).
211
+ Please run `flake8 --ignore E501 .` and `python ./scripts/test_before_push.py` before you commit the code. Please also update the code structure [overview](docs/overview.md) accordingly if you add or remove files.
212
+
213
+ ## Citation
214
+ If you use this code for your research, please cite our papers.
215
+ ```
216
+ @inproceedings{CycleGAN2017,
217
+ title={Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks},
218
+ author={Zhu, Jun-Yan and Park, Taesung and Isola, Phillip and Efros, Alexei A},
219
+ booktitle={Computer Vision (ICCV), 2017 IEEE International Conference on},
220
+ year={2017}
221
+ }
222
+
223
+
224
+ @inproceedings{isola2017image,
225
+ title={Image-to-Image Translation with Conditional Adversarial Networks},
226
+ author={Isola, Phillip and Zhu, Jun-Yan and Zhou, Tinghui and Efros, Alexei A},
227
+ booktitle={Computer Vision and Pattern Recognition (CVPR), 2017 IEEE Conference on},
228
+ year={2017}
229
+ }
230
+ ```
231
+
232
+ ## Other Languages
233
+ [Spanish](docs/README_es.md)
234
+
235
+ ## Related Projects
236
+ **[contrastive-unpaired-translation](https://github.com/taesungp/contrastive-unpaired-translation) (CUT)**<br>
237
+ **[CycleGAN-Torch](https://github.com/junyanz/CycleGAN) |
238
+ [pix2pix-Torch](https://github.com/phillipi/pix2pix) | [pix2pixHD](https://github.com/NVIDIA/pix2pixHD)|
239
+ [BicycleGAN](https://github.com/junyanz/BicycleGAN) | [vid2vid](https://tcwang0509.github.io/vid2vid/) | [SPADE/GauGAN](https://github.com/NVlabs/SPADE)**<br>
240
+ **[iGAN](https://github.com/junyanz/iGAN) | [GAN Dissection](https://github.com/CSAILVision/GANDissect) | [GAN Paint](http://ganpaint.io/)**
241
+
242
+ ## Cat Paper Collection
243
+ If you love cats, and love reading cool graphics, vision, and learning papers, please check out the Cat Paper [Collection](https://github.com/junyanz/CatPapers).
244
+
245
+ ## Acknowledgments
246
+ Our code is inspired by [pytorch-DCGAN](https://github.com/pytorch/examples/tree/master/dcgan).
checkpoints/bw2color/115_net_D.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcd750209fe24f61e92b68560120825b73082495a4640d9e8c01a9dadd7c52e5
3
+ size 11076872
checkpoints/bw2color/115_net_G.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd30259282564b9026db75345ad672bff504350a552c19d3413c019d7ee7fdd9
3
+ size 217710092
checkpoints/bw2color/bw2color.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:110fc38ccd54ecae7ff81aa131f519a9eed1b839eaf5c4716cb194ee4a8d68e8
3
+ size 217710350
checkpoints/bw2color/latest_net_D.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5b83cdcf7625bfb5e196dc9c6d47333a361095690db62f2ad1cf92d9a2986ee
3
+ size 11076950
checkpoints/bw2color/latest_net_G_A.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c607ba5bee252b896387d91691222f5d1e4df3f83e7cd27704629621330cab81
3
+ size 217710350
checkpoints/bw2color/loss_log.txt ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ================ Training Loss (Sat Nov 4 23:03:46 2023) ================
2
+ (epoch: 8, iters: 2, time: 0.198, data: 0.717) G_GAN: 1.010 G_L1: 7.272 D_real: 0.533 D_fake: 0.516
3
+ (epoch: 15, iters: 4, time: 0.199, data: 0.001) G_GAN: 1.228 G_L1: 4.958 D_real: 0.364 D_fake: 0.573
4
+ (epoch: 22, iters: 6, time: 0.227, data: 0.003) G_GAN: 1.043 G_L1: 4.291 D_real: 0.214 D_fake: 0.797
5
+ (epoch: 29, iters: 8, time: 2.188, data: 0.001) G_GAN: 0.901 G_L1: 2.646 D_real: 0.669 D_fake: 0.546
6
+ (epoch: 36, iters: 10, time: 0.257, data: 0.005) G_GAN: 1.026 G_L1: 2.751 D_real: 0.526 D_fake: 0.560
7
+ (epoch: 43, iters: 12, time: 0.242, data: 0.001) G_GAN: 1.380 G_L1: 3.614 D_real: 0.305 D_fake: 0.586
8
+ (epoch: 50, iters: 14, time: 0.256, data: 0.003) G_GAN: 0.709 G_L1: 2.387 D_real: 0.763 D_fake: 0.772
9
+ (epoch: 58, iters: 2, time: 2.606, data: 0.526) G_GAN: 0.973 G_L1: 3.211 D_real: 0.583 D_fake: 0.805
10
+ (epoch: 65, iters: 4, time: 0.239, data: 0.005) G_GAN: 0.849 G_L1: 2.521 D_real: 0.685 D_fake: 0.521
11
+ (epoch: 72, iters: 6, time: 0.227, data: 0.004) G_GAN: 0.768 G_L1: 2.132 D_real: 0.898 D_fake: 0.606
12
+ (epoch: 79, iters: 8, time: 0.186, data: 0.003) G_GAN: 0.764 G_L1: 1.370 D_real: 0.824 D_fake: 0.625
13
+ (epoch: 86, iters: 10, time: 1.048, data: 0.020) G_GAN: 1.167 G_L1: 3.618 D_real: 0.286 D_fake: 0.943
14
+ (epoch: 93, iters: 12, time: 0.256, data: 0.001) G_GAN: 0.800 G_L1: 1.420 D_real: 0.879 D_fake: 0.532
15
+ (epoch: 100, iters: 14, time: 0.250, data: 0.003) G_GAN: 0.689 G_L1: 1.218 D_real: 0.590 D_fake: 0.869
16
+ (epoch: 108, iters: 2, time: 0.168, data: 0.382) G_GAN: 0.871 G_L1: 2.465 D_real: 0.585 D_fake: 0.526
17
+ (epoch: 115, iters: 4, time: 1.077, data: 0.006) G_GAN: 0.732 G_L1: 1.168 D_real: 0.869 D_fake: 0.569
checkpoints/bw2color/opt.txt ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ------------ Options -------------
2
+ align_data: False
3
+ aspect_ratio: 1.0
4
+ batchSize: 1
5
+ checkpoints_dir: ./checkpoints
6
+ dataroot: None
7
+ display_id: 1
8
+ display_winsize: 256
9
+ fineSize: 256
10
+ gpu_ids: []
11
+ how_many: 50
12
+ identity: 0.0
13
+ image_path: C:\Users\thera\Downloads\DataSet\09.png
14
+ input_nc: 3
15
+ isTrain: False
16
+ loadSize: 286
17
+ max_dataset_size: inf
18
+ model: colorization
19
+ nThreads: 2
20
+ n_layers_D: 3
21
+ name: bw2color
22
+ ndf: 64
23
+ ngf: 64
24
+ norm: instance
25
+ ntest: inf
26
+ output_nc: 3
27
+ phase: test
28
+ results_dir: ./results/
29
+ serial_batches: False
30
+ use_dropout: True
31
+ which_direction: AtoB
32
+ which_epoch: latest
33
+ which_model_netD: basic
34
+ which_model_netG: resnet_9blocks
35
+ -------------- End ----------------
checkpoints/bw2color/test_opt.txt ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ----------------- Options ---------------
2
+ aspect_ratio: 1.0
3
+ batch_size: 1
4
+ checkpoints_dir: ./checkpoints
5
+ crop_size: None
6
+ dataroot: None
7
+ dataset_mode: colorization
8
+ direction: AtoB
9
+ display_winsize: 256
10
+ epoch: latest
11
+ eval: False
12
+ gpu_ids: -1 [default: 0]
13
+ how_many: 50
14
+ image_path: C:\Users\thera\Downloads\55Sin t�tulo.png [default: None]
15
+ init_gain: 0.02
16
+ init_type: normal
17
+ input_nc: 1
18
+ isTrain: False [default: None]
19
+ load_iter: 0 [default: 0]
20
+ load_size: None
21
+ max_dataset_size: inf
22
+ model: colorization
23
+ n_layers_D: 3
24
+ name: bw2color [default: experiment_name]
25
+ ndf: 64
26
+ netD: basic
27
+ netG: unet_256
28
+ ngf: 64
29
+ no_dropout: False
30
+ no_flip: False
31
+ norm: batch
32
+ ntest: inf
33
+ num_test: 50
34
+ num_threads: 4
35
+ output_nc: 2
36
+ phase: test
37
+ preprocess: resize_and_crop
38
+ results_dir: ./results/
39
+ serial_batches: False
40
+ suffix:
41
+ use_wandb: False
42
+ verbose: False
43
+ wandb_project_name: CycleGAN-and-pix2pix
44
+ which_epoch: latest
45
+ ----------------- End -------------------
checkpoints/bw2color/web/images/epoch004_fake_A.png ADDED
checkpoints/bw2color/web/images/epoch004_fake_B.png ADDED
checkpoints/bw2color/web/images/epoch004_idt_A.png ADDED
checkpoints/bw2color/web/images/epoch004_idt_B.png ADDED
checkpoints/bw2color/web/images/epoch004_real_A.png ADDED
checkpoints/bw2color/web/images/epoch004_real_B.png ADDED
checkpoints/bw2color/web/images/epoch004_rec_A.png ADDED
checkpoints/bw2color/web/images/epoch004_rec_B.png ADDED
checkpoints/bw2color/web/images/epoch008_fake_A.png ADDED
checkpoints/bw2color/web/images/epoch008_fake_B.png ADDED
checkpoints/bw2color/web/images/epoch008_idt_A.png ADDED
checkpoints/bw2color/web/images/epoch008_idt_B.png ADDED
checkpoints/bw2color/web/images/epoch008_real_A.png ADDED
checkpoints/bw2color/web/images/epoch008_real_B.png ADDED
checkpoints/bw2color/web/images/epoch008_rec_A.png ADDED
checkpoints/bw2color/web/images/epoch008_rec_B.png ADDED
checkpoints/bw2color/web/images/epoch012_fake_A.png ADDED
checkpoints/bw2color/web/images/epoch012_fake_B.png ADDED
checkpoints/bw2color/web/images/epoch012_idt_A.png ADDED
checkpoints/bw2color/web/images/epoch012_idt_B.png ADDED
checkpoints/bw2color/web/images/epoch012_real_A.png ADDED
checkpoints/bw2color/web/images/epoch012_real_B.png ADDED
checkpoints/bw2color/web/images/epoch012_rec_A.png ADDED
checkpoints/bw2color/web/images/epoch012_rec_B.png ADDED
checkpoints/bw2color/web/images/epoch016_fake_A.png ADDED
checkpoints/bw2color/web/images/epoch016_fake_B.png ADDED
checkpoints/bw2color/web/images/epoch016_idt_A.png ADDED
checkpoints/bw2color/web/images/epoch016_idt_B.png ADDED
checkpoints/bw2color/web/images/epoch016_real_A.png ADDED
checkpoints/bw2color/web/images/epoch016_real_B.png ADDED
checkpoints/bw2color/web/images/epoch016_rec_A.png ADDED
checkpoints/bw2color/web/images/epoch016_rec_B.png ADDED
checkpoints/bw2color/web/images/epoch029_fake_B_rgb.png ADDED
checkpoints/bw2color/web/images/epoch029_real_A.png ADDED
checkpoints/bw2color/web/images/epoch029_real_B_rgb.png ADDED
checkpoints/bw2color/web/images/epoch058_fake_B_rgb.png ADDED