Alican commited on
Commit
13090e3
1 Parent(s): 697c3d3

Version 2 has been released.

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. LICENSE +58 -0
  2. app.py +119 -56
  3. cache.gif +0 -0
  4. cache.mp4 +0 -0
  5. data/__init__.py +93 -0
  6. data/__pycache__/__init__.cpython-38.pyc +0 -0
  7. data/__pycache__/__init__.cpython-39.pyc +0 -0
  8. data/__pycache__/base_dataset.cpython-38.pyc +0 -0
  9. data/__pycache__/base_dataset.cpython-39.pyc +0 -0
  10. data/__pycache__/image_folder.cpython-38.pyc +0 -0
  11. data/__pycache__/image_folder.cpython-39.pyc +0 -0
  12. data/__pycache__/single_dataset.cpython-38.pyc +0 -0
  13. data/__pycache__/single_dataset.cpython-39.pyc +0 -0
  14. data/base_dataset.py +167 -0
  15. data/image_folder.py +65 -0
  16. data/single_dataset.py +40 -0
  17. examples/GANexample1.ipynb +0 -0
  18. examples/pixelArt/__pycache__/combine.cpython-38.pyc +0 -0
  19. examples/pixelArt/combine.py +0 -29
  20. img/example_1.jpg +0 -0
  21. img/logo.jpg +0 -0
  22. img/method_1.png +0 -0
  23. imgs/dragon.jpg +0 -0
  24. imgs/landscape.jpg +0 -0
  25. imgs/landscape_2.jpg +0 -0
  26. imgs/landscape_3.jpg +0 -0
  27. methods/__pycache__/img2pixl.cpython-38.pyc +0 -0
  28. methods/__pycache__/media.cpython-38.pyc +0 -0
  29. methods/media.py +0 -35
  30. models/__init__.py +67 -0
  31. models/__pycache__/__init__.cpython-38.pyc +0 -0
  32. models/__pycache__/__init__.cpython-39.pyc +0 -0
  33. models/__pycache__/base_model.cpython-38.pyc +0 -0
  34. models/__pycache__/base_model.cpython-39.pyc +0 -0
  35. models/__pycache__/networks.cpython-38.pyc +0 -0
  36. models/__pycache__/networks.cpython-39.pyc +0 -0
  37. models/__pycache__/test_model.cpython-38.pyc +0 -0
  38. models/__pycache__/test_model.cpython-39.pyc +0 -0
  39. models/base_model.py +230 -0
  40. models/networks.py +616 -0
  41. models/pixera_CYCLEGAN/latest_net_G.pth +3 -0
  42. models/pixera_CYCLEGAN/test_opt.txt +42 -0
  43. models/test_model.py +69 -0
  44. options/__init__.py +1 -0
  45. options/__pycache__/__init__.cpython-38.pyc +0 -0
  46. options/__pycache__/__init__.cpython-39.pyc +0 -0
  47. options/__pycache__/base_options.cpython-38.pyc +0 -0
  48. options/__pycache__/base_options.cpython-39.pyc +0 -0
  49. options/__pycache__/test_options.cpython-38.pyc +0 -0
  50. options/__pycache__/test_options.cpython-39.pyc +0 -0
LICENSE ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2017, Jun-Yan Zhu and Taesung Park
2
+ All rights reserved.
3
+
4
+ Redistribution and use in source and binary forms, with or without
5
+ modification, are permitted provided that the following conditions are met:
6
+
7
+ * Redistributions of source code must retain the above copyright notice, this
8
+ list of conditions and the following disclaimer.
9
+
10
+ * Redistributions in binary form must reproduce the above copyright notice,
11
+ this list of conditions and the following disclaimer in the documentation
12
+ and/or other materials provided with the distribution.
13
+
14
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
18
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
20
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
21
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
22
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24
+
25
+
26
+ --------------------------- LICENSE FOR pix2pix --------------------------------
27
+ BSD License
28
+
29
+ For pix2pix software
30
+ Copyright (c) 2016, Phillip Isola and Jun-Yan Zhu
31
+ All rights reserved.
32
+
33
+ Redistribution and use in source and binary forms, with or without
34
+ modification, are permitted provided that the following conditions are met:
35
+
36
+ * Redistributions of source code must retain the above copyright notice, this
37
+ list of conditions and the following disclaimer.
38
+
39
+ * Redistributions in binary form must reproduce the above copyright notice,
40
+ this list of conditions and the following disclaimer in the documentation
41
+ and/or other materials provided with the distribution.
42
+
43
+ ----------------------------- LICENSE FOR DCGAN --------------------------------
44
+ BSD License
45
+
46
+ For dcgan.torch software
47
+
48
+ Copyright (c) 2015, Facebook, Inc. All rights reserved.
49
+
50
+ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
51
+
52
+ Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
53
+
54
+ Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
55
+
56
+ Neither the name Facebook nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
57
+
58
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
app.py CHANGED
@@ -1,62 +1,125 @@
1
  import os
2
  import cv2
3
  import torch
4
- import warnings
5
  import numpy as np
6
  import gradio as gr
7
- import paddlehub as hub
8
- from PIL import Image
9
- from methods.img2pixl import pixL
10
- from examples.pixelArt.combine import combine
11
- from methods.media import Media
12
-
13
- warnings.filterwarnings("ignore")
14
-
15
- U2Net = hub.Module(name='U2Net')
16
- device = "cuda" if torch.cuda.is_available() else "cpu"
17
- face2paint = torch.hub.load("bryandlee/animegan2-pytorch:main", "face2paint", device=device, size=512)
18
- model = torch.hub.load("bryandlee/animegan2-pytorch", "generator", device=device).eval()
19
-
20
-
21
- def initilize(media,pixel_size,checkbox1):
22
- #Author: Alican Akca
23
- if media.name.endswith('.gif'):
24
- return Media().split(media.name,pixel_size, 'gif')
25
- elif media.name.endswith('.mp4'):
26
- return None #Media().split(media.name,pixel_size, "video")
27
- else:
28
- media = Image.open(media.name).convert("RGB")
29
- media = cv2.cvtColor(np.asarray(face2paint(model, media)), cv2.COLOR_BGR2RGB)
30
- if checkbox1:
31
- result = U2Net.Segmentation(images=[media],
32
- paths=None,
33
- batch_size=1,
34
- input_size=320,
35
- output_dir='output',
36
- visualization=True)
37
- result = combine().combiner(images = pixL().toThePixL([result[0]['front'][:,:,::-1], result[0]['mask']],
38
- pixel_size),
39
- background_image = media)
40
- else:
41
- result = pixL().toThePixL([media], pixel_size)
42
- result = Image.fromarray(result)
43
- result.save('cache.png')
44
- return [None, result, 'cache.png']
45
-
46
- inputs = [gr.File(label="Media"),
47
- gr.Slider(4, 100, value=12, step = 2, label="Pixel Size"),
48
- gr.Checkbox(label="Object-Oriented Inference", value=False)]
49
-
50
- outputs = [gr.Video(label="Pixed Media"),
51
- gr.Image(label="Pixed Media"),
52
- gr.File(label="Download")]
53
-
54
- title = "Pixera: Create your own Pixel Art"
55
- description = """Object-Oriented Inference is currently only available for images. Also, Video Processing has currently suspended."""
56
-
57
- gr.Interface(fn = initilize,
58
- inputs = inputs,
59
- outputs = outputs,
60
- title=title,
61
- description=description).launch()
62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  import cv2
3
  import torch
4
+ import random
5
  import numpy as np
6
  import gradio as gr
7
+ from util import util
8
+ from util.img2pixl import pixL
9
+ from data import create_dataset
10
+ from models import create_model
11
+ from options.test_options import TestOptions
12
+ from wandb.sdk.data_types.image import Image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
+ opt = TestOptions().parse()
15
+ opt.num_threads = 0
16
+ opt.batch_size = 1
17
+ opt.display_id = -1
18
+ opt.no_dropout = True
19
+
20
+ model = create_model(opt)
21
+ model.setup(opt)
22
+
23
+ num_inferences = 0
24
+
25
+ def preprocess(image):
26
+
27
+ im_type = None
28
+ imgH, imgW = image.shape[:2]
29
+ aspect_ratio = imgW / imgH
30
+
31
+
32
+ if 0.75 <= aspect_ratio <= 1.75:
33
+
34
+ image = cv2.resize(image, (512, 512))
35
+ image = pixL().toThePixL([image],6)
36
+ image = image[0]
37
+
38
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
39
+ image = np.asarray([image])
40
+ image = np.transpose(image, (0, 3, 1, 2))
41
+
42
+ image = inference(image)
43
+
44
+ return image
45
+
46
+ elif 1.75 <= aspect_ratio: # upper boundary
47
+
48
+ image = cv2.resize(image, (1024, 512))
49
+ middlePoint = image.shape[1] // 2
50
+ half_1 = image[:,:middlePoint]
51
+ half_2 = image[:,middlePoint:]
52
+ images = pixL().toThePixL([half_1,half_2],6)
53
+
54
+ for image in images:
55
+
56
+ image = np.asarray([image])
57
+ image = np.transpose(image, (0, 3, 1, 2))
58
+ image = inference(image)
59
+
60
+ image = cv2.hconcat([images[0], images[1]])
61
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
62
+ return image
63
+
64
+ elif 0.00 <= aspect_ratio <= 0.75:
65
+
66
+ image = cv2.resize(image, (512, 1024))
67
+ middlePoint = image.shape[0] // 2
68
+ half_1 = image[:middlePoint,:]
69
+ half_2 = image[middlePoint:,:]
70
+ images = pixL().toThePixL([half_1,half_2], 6)
71
+
72
+ for image in images:
73
+
74
+ image = np.asarray([image])
75
+ image = np.transpose(image, (0, 3, 1, 2))
76
+ image = inference(image)
77
+
78
+ image = cv2.vconcat([images[0], images[1]])
79
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
80
+ return image
81
+
82
+ def postprocess(image):
83
+ image = util.tensor2im(image)
84
+ return image
85
+
86
+ def inference(image):
87
+ global model
88
+
89
+ data = {"A": None, "A_paths": None}
90
+ data['A'] = torch.FloatTensor(image)
91
+
92
+ model.set_input(data)
93
+ model.test()
94
+
95
+ image = model.get_current_visuals()['fake']
96
+
97
+ return image
98
+
99
+ def pixera_CYCLEGAN(image):
100
+ global num_inferences
101
+
102
+ image = preprocess(image)
103
+
104
+ image = postprocess(image)
105
+
106
+ num_inferences += 1
107
+ print(num_inferences)
108
+
109
+ return image
110
+
111
+ title_ = "Pixera: Create your own Pixel Art"
112
+ description_ = ""
113
+
114
+ examples_path = f"{os.getcwd()}/imgs"
115
+ examples_ = os.listdir(examples_path)
116
+ random.shuffle(examples_)
117
+ examples_ = [[f"{examples_path}/{example}"] for example in examples_]
118
+
119
+
120
+ demo = gr.Interface(pixera_CYCLEGAN, inputs = [gr.Image(show_label= False)],
121
+ outputs = [gr.Image(show_label= False)],
122
+ examples = examples_,
123
+ title = title_,
124
+ description= description_)
125
+ demo.launch(debug= True, share=True)
cache.gif DELETED
Binary file (35.8 kB)
cache.mp4 DELETED
Binary file (6.99 kB)
data/__init__.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """This package includes all the modules related to data loading and preprocessing
2
+
3
+ To add a custom dataset class called 'dummy', you need to add a file called 'dummy_dataset.py' and define a subclass 'DummyDataset' inherited from BaseDataset.
4
+ You need to implement four functions:
5
+ -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
6
+ -- <__len__>: return the size of dataset.
7
+ -- <__getitem__>: get a data point from data loader.
8
+ -- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.
9
+
10
+ Now you can use the dataset class by specifying flag '--dataset_mode dummy'.
11
+ See our template dataset class 'template_dataset.py' for more details.
12
+ """
13
+ import importlib
14
+ import torch.utils.data
15
+ from data.base_dataset import BaseDataset
16
+
17
+
18
+ def find_dataset_using_name(dataset_name):
19
+ """Import the module "data/[dataset_name]_dataset.py".
20
+
21
+ In the file, the class called DatasetNameDataset() will
22
+ be instantiated. It has to be a subclass of BaseDataset,
23
+ and it is case-insensitive.
24
+ """
25
+ dataset_filename = "data." + dataset_name + "_dataset"
26
+ datasetlib = importlib.import_module(dataset_filename)
27
+
28
+ dataset = None
29
+ target_dataset_name = dataset_name.replace('_', '') + 'dataset'
30
+ for name, cls in datasetlib.__dict__.items():
31
+ if name.lower() == target_dataset_name.lower() \
32
+ and issubclass(cls, BaseDataset):
33
+ dataset = cls
34
+
35
+ if dataset is None:
36
+ raise NotImplementedError("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name))
37
+
38
+ return dataset
39
+
40
+
41
+ def get_option_setter(dataset_name):
42
+ """Return the static method <modify_commandline_options> of the dataset class."""
43
+ dataset_class = find_dataset_using_name(dataset_name)
44
+ return dataset_class.modify_commandline_options
45
+
46
+
47
+ def create_dataset(opt):
48
+ """Create a dataset given the option.
49
+
50
+ This function wraps the class CustomDatasetDataLoader.
51
+ This is the main interface between this package and 'train.py'/'test.py'
52
+
53
+ Example:
54
+ >>> from data import create_dataset
55
+ >>> dataset = create_dataset(opt)
56
+ """
57
+ data_loader = CustomDatasetDataLoader(opt)
58
+ dataset = data_loader.load_data()
59
+ return dataset
60
+
61
+
62
+ class CustomDatasetDataLoader():
63
+ """Wrapper class of Dataset class that performs multi-threaded data loading"""
64
+
65
+ def __init__(self, opt):
66
+ """Initialize this class
67
+
68
+ Step 1: create a dataset instance given the name [dataset_mode]
69
+ Step 2: create a multi-threaded data loader.
70
+ """
71
+ self.opt = opt
72
+ dataset_class = find_dataset_using_name(opt.dataset_mode)
73
+ self.dataset = dataset_class(opt)
74
+ print("dataset [%s] was created" % type(self.dataset).__name__)
75
+ self.dataloader = torch.utils.data.DataLoader(
76
+ self.dataset,
77
+ batch_size=opt.batch_size,
78
+ shuffle=not opt.serial_batches,
79
+ num_workers=int(opt.num_threads))
80
+
81
+ def load_data(self):
82
+ return self
83
+
84
+ def __len__(self):
85
+ """Return the number of data in the dataset"""
86
+ return min(len(self.dataset), self.opt.max_dataset_size)
87
+
88
+ def __iter__(self):
89
+ """Return a batch of data"""
90
+ for i, data in enumerate(self.dataloader):
91
+ if i * self.opt.batch_size >= self.opt.max_dataset_size:
92
+ break
93
+ yield data
data/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (4.01 kB). View file
data/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (3.99 kB). View file
data/__pycache__/base_dataset.cpython-38.pyc ADDED
Binary file (6.16 kB). View file
data/__pycache__/base_dataset.cpython-39.pyc ADDED
Binary file (6.15 kB). View file
data/__pycache__/image_folder.cpython-38.pyc ADDED
Binary file (2.49 kB). View file
data/__pycache__/image_folder.cpython-39.pyc ADDED
Binary file (2.42 kB). View file
data/__pycache__/single_dataset.cpython-38.pyc ADDED
Binary file (1.99 kB). View file
data/__pycache__/single_dataset.cpython-39.pyc ADDED
Binary file (1.97 kB). View file
data/base_dataset.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """This module implements an abstract base class (ABC) 'BaseDataset' for datasets.
2
+
3
+ It also includes common transformation functions (e.g., get_transform, __scale_width), which can be later used in subclasses.
4
+ """
5
+ import random
6
+ import numpy as np
7
+ import torch.utils.data as data
8
+ from PIL import Image
9
+ import torchvision.transforms as transforms
10
+ from abc import ABC, abstractmethod
11
+
12
+
13
+ class BaseDataset(data.Dataset, ABC):
14
+ """This class is an abstract base class (ABC) for datasets.
15
+
16
+ To create a subclass, you need to implement the following four functions:
17
+ -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
18
+ -- <__len__>: return the size of dataset.
19
+ -- <__getitem__>: get a data point.
20
+ -- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.
21
+ """
22
+
23
+ def __init__(self, opt):
24
+ """Initialize the class; save the options in the class
25
+
26
+ Parameters:
27
+ opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
28
+ """
29
+ self.opt = opt
30
+ self.root = opt.dataroot
31
+
32
+ @staticmethod
33
+ def modify_commandline_options(parser, is_train):
34
+ """Add new dataset-specific options, and rewrite default values for existing options.
35
+
36
+ Parameters:
37
+ parser -- original option parser
38
+ is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
39
+
40
+ Returns:
41
+ the modified parser.
42
+ """
43
+ return parser
44
+
45
+ @abstractmethod
46
+ def __len__(self):
47
+ """Return the total number of images in the dataset."""
48
+ return 0
49
+
50
+ @abstractmethod
51
+ def __getitem__(self, index):
52
+ """Return a data point and its metadata information.
53
+
54
+ Parameters:
55
+ index - - a random integer for data indexing
56
+
57
+ Returns:
58
+ a dictionary of data with their names. It ususally contains the data itself and its metadata information.
59
+ """
60
+ pass
61
+
62
+
63
+ def get_params(opt, size):
64
+ w, h = size
65
+ new_h = h
66
+ new_w = w
67
+ if opt.preprocess == 'resize_and_crop':
68
+ new_h = new_w = opt.load_size
69
+ elif opt.preprocess == 'scale_width_and_crop':
70
+ new_w = opt.load_size
71
+ new_h = opt.load_size * h // w
72
+
73
+ x = random.randint(0, np.maximum(0, new_w - opt.crop_size))
74
+ y = random.randint(0, np.maximum(0, new_h - opt.crop_size))
75
+
76
+ flip = random.random() > 0.5
77
+
78
+ return {'crop_pos': (x, y), 'flip': flip}
79
+
80
+
81
+ def get_transform(opt, params=None, grayscale=False, method=transforms.InterpolationMode.BICUBIC, convert=True):
82
+ transform_list = []
83
+ if grayscale:
84
+ transform_list.append(transforms.Grayscale(1))
85
+ if 'resize' in opt.preprocess:
86
+ osize = [opt.load_size, opt.load_size]
87
+ transform_list.append(transforms.Resize(osize, method))
88
+ elif 'scale_width' in opt.preprocess:
89
+ transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, opt.crop_size, method)))
90
+
91
+ if 'crop' in opt.preprocess:
92
+ if params is None:
93
+ transform_list.append(transforms.RandomCrop(opt.crop_size))
94
+ else:
95
+ transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size)))
96
+
97
+ if opt.preprocess == 'none':
98
+ transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method)))
99
+
100
+ if not opt.no_flip:
101
+ if params is None:
102
+ transform_list.append(transforms.RandomHorizontalFlip())
103
+ elif params['flip']:
104
+ transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip'])))
105
+
106
+ if convert:
107
+ transform_list += [transforms.ToTensor()]
108
+ if grayscale:
109
+ transform_list += [transforms.Normalize((0.5,), (0.5,))]
110
+ else:
111
+ transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
112
+ return transforms.Compose(transform_list)
113
+
114
+
115
+ def __transforms2pil_resize(method):
116
+ mapper = {transforms.InterpolationMode.BILINEAR: Image.BILINEAR,
117
+ transforms.InterpolationMode.BICUBIC: Image.BICUBIC,
118
+ transforms.InterpolationMode.NEAREST: Image.NEAREST,
119
+ transforms.InterpolationMode.LANCZOS: Image.LANCZOS,}
120
+ return mapper[method]
121
+
122
+
123
+ def __make_power_2(img, base, method=transforms.InterpolationMode.BICUBIC):
124
+ method = __transforms2pil_resize(method)
125
+ ow, oh = img.size
126
+ h = int(round(oh / base) * base)
127
+ w = int(round(ow / base) * base)
128
+ if h == oh and w == ow:
129
+ return img
130
+
131
+ __print_size_warning(ow, oh, w, h)
132
+ return img.resize((w, h), method)
133
+
134
+
135
+ def __scale_width(img, target_size, crop_size, method=transforms.InterpolationMode.BICUBIC):
136
+ method = __transforms2pil_resize(method)
137
+ ow, oh = img.size
138
+ if ow == target_size and oh >= crop_size:
139
+ return img
140
+ w = target_size
141
+ h = int(max(target_size * oh / ow, crop_size))
142
+ return img.resize((w, h), method)
143
+
144
+
145
+ def __crop(img, pos, size):
146
+ ow, oh = img.size
147
+ x1, y1 = pos
148
+ tw = th = size
149
+ if (ow > tw or oh > th):
150
+ return img.crop((x1, y1, x1 + tw, y1 + th))
151
+ return img
152
+
153
+
154
+ def __flip(img, flip):
155
+ if flip:
156
+ return img.transpose(Image.FLIP_LEFT_RIGHT)
157
+ return img
158
+
159
+
160
+ def __print_size_warning(ow, oh, w, h):
161
+ """Print warning information about image size(only print once)"""
162
+ if not hasattr(__print_size_warning, 'has_printed'):
163
+ print("The image size needs to be a multiple of 4. "
164
+ "The loaded image size was (%d, %d), so it was adjusted to "
165
+ "(%d, %d). This adjustment will be done to all images "
166
+ "whose sizes are not multiples of 4" % (ow, oh, w, h))
167
+ __print_size_warning.has_printed = True
data/image_folder.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """A modified image folder class
2
+
3
+ We modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py)
4
+ so that this class can load images from both current directory and its subdirectories.
5
+ """
6
+
7
+ import torch.utils.data as data
8
+
9
+ from PIL import Image
10
+ import os
11
+
12
+ IMG_EXTENSIONS = [
13
+ '.jpg', '.JPG', '.jpeg', '.JPEG',
14
+ '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
15
+ '.tif', '.TIF', '.tiff', '.TIFF',
16
+ ]
17
+
18
+
19
+ def is_image_file(filename):
20
+ return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
21
+
22
+
23
+ def make_dataset(dir, max_dataset_size=float("inf")):
24
+ images = []
25
+ assert os.path.isdir(dir), '%s is not a valid directory' % dir
26
+
27
+ for root, _, fnames in sorted(os.walk(dir)):
28
+ for fname in fnames:
29
+ if is_image_file(fname):
30
+ path = os.path.join(root, fname)
31
+ images.append(path)
32
+ return images[:min(max_dataset_size, len(images))]
33
+
34
+
35
+ def default_loader(path):
36
+ return Image.open(path).convert('RGB')
37
+
38
+
39
+ class ImageFolder(data.Dataset):
40
+
41
+ def __init__(self, root, transform=None, return_paths=False,
42
+ loader=default_loader):
43
+ imgs = make_dataset(root)
44
+ if len(imgs) == 0:
45
+ raise(RuntimeError("Found 0 images in: " + root + "\n"
46
+ "Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
47
+
48
+ self.root = root
49
+ self.imgs = imgs
50
+ self.transform = transform
51
+ self.return_paths = return_paths
52
+ self.loader = loader
53
+
54
+ def __getitem__(self, index):
55
+ path = self.imgs[index]
56
+ img = self.loader(path)
57
+ if self.transform is not None:
58
+ img = self.transform(img)
59
+ if self.return_paths:
60
+ return img, path
61
+ else:
62
+ return img
63
+
64
+ def __len__(self):
65
+ return len(self.imgs)
data/single_dataset.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from data.base_dataset import BaseDataset, get_transform
2
+ from data.image_folder import make_dataset
3
+ from PIL import Image
4
+
5
+
6
+ class SingleDataset(BaseDataset):
7
+ """This dataset class can load a set of images specified by the path --dataroot /path/to/data.
8
+
9
+ It can be used for generating CycleGAN results only for one side with the model option '-model test'.
10
+ """
11
+
12
+ def __init__(self, opt):
13
+ """Initialize this dataset class.
14
+
15
+ Parameters:
16
+ opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
17
+ """
18
+ BaseDataset.__init__(self, opt)
19
+ self.A_paths = sorted(make_dataset(opt.dataroot, opt.max_dataset_size))
20
+ input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc
21
+ self.transform = get_transform(opt, grayscale=(input_nc == 1))
22
+
23
+ def __getitem__(self, index):
24
+ """Return a data point and its metadata information.
25
+
26
+ Parameters:
27
+ index - - a random integer for data indexing
28
+
29
+ Returns a dictionary that contains A and A_paths
30
+ A(tensor) - - an image in one domain
31
+ A_paths(str) - - the path of the image
32
+ """
33
+ A_path = self.A_paths[index]
34
+ A_img = Image.open(A_path).convert('RGB')
35
+ A = self.transform(A_img)
36
+ return {'A': A, 'A_paths': A_path}
37
+
38
+ def __len__(self):
39
+ """Return the total number of images in the dataset."""
40
+ return len(self.A_paths)
examples/GANexample1.ipynb DELETED
The diff for this file is too large to render. See raw diff
examples/pixelArt/__pycache__/combine.cpython-38.pyc DELETED
Binary file (1.27 kB)
examples/pixelArt/combine.py DELETED
@@ -1,29 +0,0 @@
1
- import cv2
2
- import numpy as np
3
-
4
- class combine:
5
- #Author: Alican Akca
6
- def __init__(self, size = (400,300),images = [],background_image = None):
7
- self.size = size
8
- self.images = images
9
- self.background_image = background_image
10
-
11
- def combiner(self,images,background_image):
12
- original = images[0]
13
- masked = images[1]
14
- background = cv2.resize(background_image,(images[0].shape[1],images[0].shape[0]))
15
- result = blend_images_using_mask(original, background, masked)
16
- return result
17
-
18
- def mix_pixel(pix_1, pix_2, perc):
19
-
20
- return (perc/255 * pix_1) + ((255 - perc)/255 * pix_2)
21
-
22
- def blend_images_using_mask(img_orig, img_for_overlay, img_mask):
23
-
24
- if len(img_mask.shape) != 3:
25
- img_mask = cv2.cvtColor(img_mask, cv2.COLOR_GRAY2BGR)
26
-
27
- img_res = mix_pixel(img_orig, img_for_overlay, img_mask)
28
-
29
- return cv2.cvtColor(img_res.astype(np.uint8), cv2.COLOR_BGR2RGB)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
img/example_1.jpg DELETED
Binary file (40.6 kB)
img/logo.jpg DELETED
Binary file (26.3 kB)
img/method_1.png DELETED
Binary file (322 kB)
imgs/dragon.jpg ADDED
imgs/landscape.jpg ADDED
imgs/landscape_2.jpg ADDED
imgs/landscape_3.jpg ADDED
methods/__pycache__/img2pixl.cpython-38.pyc DELETED
Binary file (2.38 kB)
methods/__pycache__/media.cpython-38.pyc DELETED
Binary file (1.33 kB)
methods/media.py DELETED
@@ -1,35 +0,0 @@
1
- import cv2
2
- import torch
3
- import imageio
4
- from methods.img2pixl import pixL
5
-
6
-
7
- device = "cuda" if torch.cuda.is_available() else "cpu"
8
- face2paint = torch.hub.load("bryandlee/animegan2-pytorch:main", "face2paint", device=device, size=512)
9
- model = torch.hub.load("bryandlee/animegan2-pytorch", "generator", device=device).eval()
10
-
11
- class Media:
12
- #Author: Alican Akca
13
- def __init__(self,fname = None,pixel_size = None):
14
- self.fname = fname
15
- self.pixel_size = pixel_size
16
-
17
- def split(self,fname,pixel_size, mediaType):
18
- media = cv2.VideoCapture(fname)
19
- frames = []
20
- while True:
21
- ret, cv2Image = media.read()
22
- if not ret:
23
- break
24
- frames.append(cv2Image)
25
- frames = pixL().toThePixL(frames, pixel_size)
26
- if mediaType == 'gif':
27
- imageio.mimsave('cache.gif', frames)
28
- return [None, 'cache.gif', 'cache.gif']
29
- else:
30
- output_file = "cache.mp4"
31
- out = cv2.VideoWriter(output_file,cv2.VideoWriter_fourcc(*'h264'), 15, (frames[0].shape[1],frames[0].shape[0]))
32
- for i in range(len(frames)):
33
- out.write(frames[i])
34
- out.release()
35
- return [output_file, None, output_file]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
models/__init__.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """This package contains modules related to objective functions, optimizations, and network architectures.
2
+
3
+ To add a custom model class called 'dummy', you need to add a file called 'dummy_model.py' and define a subclass DummyModel inherited from BaseModel.
4
+ You need to implement the following five functions:
5
+ -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
6
+ -- <set_input>: unpack data from dataset and apply preprocessing.
7
+ -- <forward>: produce intermediate results.
8
+ -- <optimize_parameters>: calculate loss, gradients, and update network weights.
9
+ -- <modify_commandline_options>: (optionally) add model-specific options and set default options.
10
+
11
+ In the function <__init__>, you need to define four lists:
12
+ -- self.loss_names (str list): specify the training losses that you want to plot and save.
13
+ -- self.model_names (str list): define networks used in our training.
14
+ -- self.visual_names (str list): specify the images that you want to display and save.
15
+ -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an usage.
16
+
17
+ Now you can use the model class by specifying flag '--model dummy'.
18
+ See our template model class 'template_model.py' for more details.
19
+ """
20
+
21
+ import importlib
22
+ from models.base_model import BaseModel
23
+
24
+
25
+ def find_model_using_name(model_name):
26
+ """Import the module "models/[model_name]_model.py".
27
+
28
+ In the file, the class called DatasetNameModel() will
29
+ be instantiated. It has to be a subclass of BaseModel,
30
+ and it is case-insensitive.
31
+ """
32
+ model_filename = "models." + model_name + "_model"
33
+ modellib = importlib.import_module(model_filename)
34
+ model = None
35
+ target_model_name = model_name.replace('_', '') + 'model'
36
+ for name, cls in modellib.__dict__.items():
37
+ if name.lower() == target_model_name.lower() \
38
+ and issubclass(cls, BaseModel):
39
+ model = cls
40
+
41
+ if model is None:
42
+ print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name))
43
+ exit(0)
44
+
45
+ return model
46
+
47
+
48
+ def get_option_setter(model_name):
49
+ """Return the static method <modify_commandline_options> of the model class."""
50
+ model_class = find_model_using_name(model_name)
51
+ return model_class.modify_commandline_options
52
+
53
+
54
+ def create_model(opt):
55
+ """Create a model given the option.
56
+
57
+ This function warps the class CustomDatasetDataLoader.
58
+ This is the main interface between this package and 'train.py'/'test.py'
59
+
60
+ Example:
61
+ >>> from models import create_model
62
+ >>> model = create_model(opt)
63
+ """
64
+ model = find_model_using_name(opt.model)
65
+ instance = model(opt)
66
+ print("model [%s] was created" % type(instance).__name__)
67
+ return instance
models/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (3.25 kB). View file
models/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (3.25 kB). View file
models/__pycache__/base_model.cpython-38.pyc ADDED
Binary file (10 kB). View file
models/__pycache__/base_model.cpython-39.pyc ADDED
Binary file (10 kB). View file
models/__pycache__/networks.cpython-38.pyc ADDED
Binary file (23.3 kB). View file
models/__pycache__/networks.cpython-39.pyc ADDED
Binary file (23.3 kB). View file
models/__pycache__/test_model.cpython-38.pyc ADDED
Binary file (3.15 kB). View file
models/__pycache__/test_model.cpython-39.pyc ADDED
Binary file (3.13 kB). View file
models/base_model.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ from collections import OrderedDict
4
+ from abc import ABC, abstractmethod
5
+ from . import networks
6
+
7
+
8
+ class BaseModel(ABC):
9
+ """This class is an abstract base class (ABC) for models.
10
+ To create a subclass, you need to implement the following five functions:
11
+ -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
12
+ -- <set_input>: unpack data from dataset and apply preprocessing.
13
+ -- <forward>: produce intermediate results.
14
+ -- <optimize_parameters>: calculate losses, gradients, and update network weights.
15
+ -- <modify_commandline_options>: (optionally) add model-specific options and set default options.
16
+ """
17
+
18
+ def __init__(self, opt):
19
+ """Initialize the BaseModel class.
20
+
21
+ Parameters:
22
+ opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
23
+
24
+ When creating your custom class, you need to implement your own initialization.
25
+ In this function, you should first call <BaseModel.__init__(self, opt)>
26
+ Then, you need to define four lists:
27
+ -- self.loss_names (str list): specify the training losses that you want to plot and save.
28
+ -- self.model_names (str list): define networks used in our training.
29
+ -- self.visual_names (str list): specify the images that you want to display and save.
30
+ -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
31
+ """
32
+ self.opt = opt
33
+ self.gpu_ids = opt.gpu_ids
34
+ self.isTrain = opt.isTrain
35
+ self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU
36
+ self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir
37
+ if opt.preprocess != 'scale_width': # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark.
38
+ torch.backends.cudnn.benchmark = True
39
+ self.loss_names = []
40
+ self.model_names = []
41
+ self.visual_names = []
42
+ self.optimizers = []
43
+ self.image_paths = []
44
+ self.metric = 0 # used for learning rate policy 'plateau'
45
+
46
+ @staticmethod
47
+ def modify_commandline_options(parser, is_train):
48
+ """Add new model-specific options, and rewrite default values for existing options.
49
+
50
+ Parameters:
51
+ parser -- original option parser
52
+ is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
53
+
54
+ Returns:
55
+ the modified parser.
56
+ """
57
+ return parser
58
+
59
+ @abstractmethod
60
+ def set_input(self, input):
61
+ """Unpack input data from the dataloader and perform necessary pre-processing steps.
62
+
63
+ Parameters:
64
+ input (dict): includes the data itself and its metadata information.
65
+ """
66
+ pass
67
+
68
+ @abstractmethod
69
+ def forward(self):
70
+ """Run forward pass; called by both functions <optimize_parameters> and <test>."""
71
+ pass
72
+
73
+ @abstractmethod
74
+ def optimize_parameters(self):
75
+ """Calculate losses, gradients, and update network weights; called in every training iteration"""
76
+ pass
77
+
78
+ def setup(self, opt):
79
+ """Load and print networks; create schedulers
80
+
81
+ Parameters:
82
+ opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
83
+ """
84
+ if self.isTrain:
85
+ self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
86
+ if not self.isTrain or opt.continue_train:
87
+ load_suffix = 'iter_%d' % opt.load_iter if opt.load_iter > 0 else opt.epoch
88
+ self.load_networks(load_suffix)
89
+ self.print_networks(opt.verbose)
90
+
91
+ def eval(self):
92
+ """Make models eval mode during test time"""
93
+ for name in self.model_names:
94
+ if isinstance(name, str):
95
+ net = getattr(self, 'net' + name)
96
+ net.eval()
97
+
98
+ def test(self):
99
+ """Forward function used in test time.
100
+
101
+ This function wraps <forward> function in no_grad() so we don't save intermediate steps for backprop
102
+ It also calls <compute_visuals> to produce additional visualization results
103
+ """
104
+ with torch.no_grad():
105
+ self.forward()
106
+ self.compute_visuals()
107
+
108
+ def compute_visuals(self):
109
+ """Calculate additional output images for visdom and HTML visualization"""
110
+ pass
111
+
112
+ def get_image_paths(self):
113
+ """ Return image paths that are used to load current data"""
114
+ return self.image_paths
115
+
116
+ def update_learning_rate(self):
117
+ """Update learning rates for all the networks; called at the end of every epoch"""
118
+ old_lr = self.optimizers[0].param_groups[0]['lr']
119
+ for scheduler in self.schedulers:
120
+ if self.opt.lr_policy == 'plateau':
121
+ scheduler.step(self.metric)
122
+ else:
123
+ scheduler.step()
124
+
125
+ lr = self.optimizers[0].param_groups[0]['lr']
126
+ print('learning rate %.7f -> %.7f' % (old_lr, lr))
127
+
128
+ def get_current_visuals(self):
129
+ """Return visualization images. train.py will display these images with visdom, and save the images to a HTML"""
130
+ visual_ret = OrderedDict()
131
+ for name in self.visual_names:
132
+ if isinstance(name, str):
133
+ visual_ret[name] = getattr(self, name)
134
+ return visual_ret
135
+
136
+ def get_current_losses(self):
137
+ """Return traning losses / errors. train.py will print out these errors on console, and save them to a file"""
138
+ errors_ret = OrderedDict()
139
+ for name in self.loss_names:
140
+ if isinstance(name, str):
141
+ errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number
142
+ return errors_ret
143
+
144
+ def save_networks(self, epoch):
145
+ """Save all the networks to the disk.
146
+
147
+ Parameters:
148
+ epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
149
+ """
150
+ for name in self.model_names:
151
+ if isinstance(name, str):
152
+ save_filename = '%s_net_%s.pth' % (epoch, name)
153
+ save_path = os.path.join(self.save_dir, save_filename)
154
+ net = getattr(self, 'net' + name)
155
+
156
+ if len(self.gpu_ids) > 0 and torch.cuda.is_available():
157
+ torch.save(net.module.cpu().state_dict(), save_path)
158
+ net.cuda(self.gpu_ids[0])
159
+ else:
160
+ torch.save(net.cpu().state_dict(), save_path)
161
+
162
+ def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
163
+ """Fix InstanceNorm checkpoints incompatibility (prior to 0.4)"""
164
+ key = keys[i]
165
+ if i + 1 == len(keys): # at the end, pointing to a parameter/buffer
166
+ if module.__class__.__name__.startswith('InstanceNorm') and \
167
+ (key == 'running_mean' or key == 'running_var'):
168
+ if getattr(module, key) is None:
169
+ state_dict.pop('.'.join(keys))
170
+ if module.__class__.__name__.startswith('InstanceNorm') and \
171
+ (key == 'num_batches_tracked'):
172
+ state_dict.pop('.'.join(keys))
173
+ else:
174
+ self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
175
+
176
+ def load_networks(self, epoch):
177
+ """Load all the networks from the disk.
178
+
179
+ Parameters:
180
+ epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
181
+ """
182
+ for name in self.model_names:
183
+ if isinstance(name, str):
184
+ load_filename = '%s_net_%s.pth' % (epoch, name)
185
+ load_path = os.path.join(self.save_dir, load_filename)
186
+ net = getattr(self, 'net' + name)
187
+ if isinstance(net, torch.nn.DataParallel):
188
+ net = net.module
189
+ print('loading the model from %s' % load_path)
190
+ # if you are using PyTorch newer than 0.4 (e.g., built from
191
+ # GitHub source), you can remove str() on self.device
192
+ state_dict = torch.load(load_path, map_location=str(self.device))
193
+ if hasattr(state_dict, '_metadata'):
194
+ del state_dict._metadata
195
+
196
+ # patch InstanceNorm checkpoints prior to 0.4
197
+ for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop
198
+ self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))
199
+ net.load_state_dict(state_dict)
200
+
201
+ def print_networks(self, verbose):
202
+ """Print the total number of parameters in the network and (if verbose) network architecture
203
+
204
+ Parameters:
205
+ verbose (bool) -- if verbose: print the network architecture
206
+ """
207
+ print('---------- Networks initialized -------------')
208
+ for name in self.model_names:
209
+ if isinstance(name, str):
210
+ net = getattr(self, 'net' + name)
211
+ num_params = 0
212
+ for param in net.parameters():
213
+ num_params += param.numel()
214
+ if verbose:
215
+ print(net)
216
+ print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
217
+ print('-----------------------------------------------')
218
+
219
+ def set_requires_grad(self, nets, requires_grad=False):
220
+ """Set requies_grad=Fasle for all the networks to avoid unnecessary computations
221
+ Parameters:
222
+ nets (network list) -- a list of networks
223
+ requires_grad (bool) -- whether the networks require gradients or not
224
+ """
225
+ if not isinstance(nets, list):
226
+ nets = [nets]
227
+ for net in nets:
228
+ if net is not None:
229
+ for param in net.parameters():
230
+ param.requires_grad = requires_grad
models/networks.py ADDED
@@ -0,0 +1,616 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from torch.nn import init
4
+ import functools
5
+ from torch.optim import lr_scheduler
6
+
7
+
8
+ ###############################################################################
9
+ # Helper Functions
10
+ ###############################################################################
11
+
12
+
13
+ class Identity(nn.Module):
14
+ def forward(self, x):
15
+ return x
16
+
17
+
18
+ def get_norm_layer(norm_type='instance'):
19
+ """Return a normalization layer
20
+
21
+ Parameters:
22
+ norm_type (str) -- the name of the normalization layer: batch | instance | none
23
+
24
+ For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
25
+ For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
26
+ """
27
+ if norm_type == 'batch':
28
+ norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
29
+ elif norm_type == 'instance':
30
+ norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
31
+ elif norm_type == 'none':
32
+ def norm_layer(x):
33
+ return Identity()
34
+ else:
35
+ raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
36
+ return norm_layer
37
+
38
+
39
+ def get_scheduler(optimizer, opt):
40
+ """Return a learning rate scheduler
41
+
42
+ Parameters:
43
+ optimizer -- the optimizer of the network
44
+ opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions. 
45
+ opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
46
+
47
+ For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs
48
+ and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.
49
+ For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
50
+ See https://pytorch.org/docs/stable/optim.html for more details.
51
+ """
52
+ if opt.lr_policy == 'linear':
53
+ def lambda_rule(epoch):
54
+ lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)
55
+ return lr_l
56
+ scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
57
+ elif opt.lr_policy == 'step':
58
+ scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
59
+ elif opt.lr_policy == 'plateau':
60
+ scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
61
+ elif opt.lr_policy == 'cosine':
62
+ scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
63
+ else:
64
+ return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
65
+ return scheduler
66
+
67
+
68
+ def init_weights(net, init_type='normal', init_gain=0.02):
69
+ """Initialize network weights.
70
+
71
+ Parameters:
72
+ net (network) -- network to be initialized
73
+ init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
74
+ init_gain (float) -- scaling factor for normal, xavier and orthogonal.
75
+
76
+ We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
77
+ work better for some applications. Feel free to try yourself.
78
+ """
79
+ def init_func(m): # define the initialization function
80
+ classname = m.__class__.__name__
81
+ if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
82
+ if init_type == 'normal':
83
+ init.normal_(m.weight.data, 0.0, init_gain)
84
+ elif init_type == 'xavier':
85
+ init.xavier_normal_(m.weight.data, gain=init_gain)
86
+ elif init_type == 'kaiming':
87
+ init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
88
+ elif init_type == 'orthogonal':
89
+ init.orthogonal_(m.weight.data, gain=init_gain)
90
+ else:
91
+ raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
92
+ if hasattr(m, 'bias') and m.bias is not None:
93
+ init.constant_(m.bias.data, 0.0)
94
+ elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
95
+ init.normal_(m.weight.data, 1.0, init_gain)
96
+ init.constant_(m.bias.data, 0.0)
97
+
98
+ print('initialize network with %s' % init_type)
99
+ net.apply(init_func) # apply the initialization function <init_func>
100
+
101
+
102
+ def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
103
+ """Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
104
+ Parameters:
105
+ net (network) -- the network to be initialized
106
+ init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
107
+ gain (float) -- scaling factor for normal, xavier and orthogonal.
108
+ gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
109
+
110
+ Return an initialized network.
111
+ """
112
+ if len(gpu_ids) > 0:
113
+ assert(torch.cuda.is_available())
114
+ net.to(gpu_ids[0])
115
+ net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
116
+ init_weights(net, init_type, init_gain=init_gain)
117
+ return net
118
+
119
+
120
+ def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]):
121
+ """Create a generator
122
+
123
+ Parameters:
124
+ input_nc (int) -- the number of channels in input images
125
+ output_nc (int) -- the number of channels in output images
126
+ ngf (int) -- the number of filters in the last conv layer
127
+ netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
128
+ norm (str) -- the name of normalization layers used in the network: batch | instance | none
129
+ use_dropout (bool) -- if use dropout layers.
130
+ init_type (str) -- the name of our initialization method.
131
+ init_gain (float) -- scaling factor for normal, xavier and orthogonal.
132
+ gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
133
+
134
+ Returns a generator
135
+
136
+ Our current implementation provides two types of generators:
137
+ U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
138
+ The original U-Net paper: https://arxiv.org/abs/1505.04597
139
+
140
+ Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
141
+ Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
142
+ We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
143
+
144
+
145
+ The generator has been initialized by <init_net>. It uses RELU for non-linearity.
146
+ """
147
+ net = None
148
+ norm_layer = get_norm_layer(norm_type=norm)
149
+
150
+ if netG == 'resnet_9blocks':
151
+ net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
152
+ elif netG == 'resnet_6blocks':
153
+ net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)
154
+ elif netG == 'unet_128':
155
+ net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
156
+ elif netG == 'unet_256':
157
+ net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
158
+ else:
159
+ raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
160
+ return init_net(net, init_type, init_gain, gpu_ids)
161
+
162
+
163
+ def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[]):
164
+ """Create a discriminator
165
+
166
+ Parameters:
167
+ input_nc (int) -- the number of channels in input images
168
+ ndf (int) -- the number of filters in the first conv layer
169
+ netD (str) -- the architecture's name: basic | n_layers | pixel
170
+ n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
171
+ norm (str) -- the type of normalization layers used in the network.
172
+ init_type (str) -- the name of the initialization method.
173
+ init_gain (float) -- scaling factor for normal, xavier and orthogonal.
174
+ gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
175
+
176
+ Returns a discriminator
177
+
178
+ Our current implementation provides three types of discriminators:
179
+ [basic]: 'PatchGAN' classifier described in the original pix2pix paper.
180
+ It can classify whether 70×70 overlapping patches are real or fake.
181
+ Such a patch-level discriminator architecture has fewer parameters
182
+ than a full-image discriminator and can work on arbitrarily-sized images
183
+ in a fully convolutional fashion.
184
+
185
+ [n_layers]: With this mode, you can specify the number of conv layers in the discriminator
186
+ with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
187
+
188
+ [pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
189
+ It encourages greater color diversity but has no effect on spatial statistics.
190
+
191
+ The discriminator has been initialized by <init_net>. It uses Leakly RELU for non-linearity.
192
+ """
193
+ net = None
194
+ norm_layer = get_norm_layer(norm_type=norm)
195
+
196
+ if netD == 'basic': # default PatchGAN classifier
197
+ net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer)
198
+ elif netD == 'n_layers': # more options
199
+ net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer)
200
+ elif netD == 'pixel': # classify if each pixel is real or fake
201
+ net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
202
+ else:
203
+ raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD)
204
+ return init_net(net, init_type, init_gain, gpu_ids)
205
+
206
+
207
+ ##############################################################################
208
+ # Classes
209
+ ##############################################################################
210
+ class GANLoss(nn.Module):
211
+ """Define different GAN objectives.
212
+
213
+ The GANLoss class abstracts away the need to create the target label tensor
214
+ that has the same size as the input.
215
+ """
216
+
217
+ def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
218
+ """ Initialize the GANLoss class.
219
+
220
+ Parameters:
221
+ gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
222
+ target_real_label (bool) - - label for a real image
223
+ target_fake_label (bool) - - label of a fake image
224
+
225
+ Note: Do not use sigmoid as the last layer of Discriminator.
226
+ LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
227
+ """
228
+ super(GANLoss, self).__init__()
229
+ self.register_buffer('real_label', torch.tensor(target_real_label))
230
+ self.register_buffer('fake_label', torch.tensor(target_fake_label))
231
+ self.gan_mode = gan_mode
232
+ if gan_mode == 'lsgan':
233
+ self.loss = nn.MSELoss()
234
+ elif gan_mode == 'vanilla':
235
+ self.loss = nn.BCEWithLogitsLoss()
236
+ elif gan_mode in ['wgangp']:
237
+ self.loss = None
238
+ else:
239
+ raise NotImplementedError('gan mode %s not implemented' % gan_mode)
240
+
241
+ def get_target_tensor(self, prediction, target_is_real):
242
+ """Create label tensors with the same size as the input.
243
+
244
+ Parameters:
245
+ prediction (tensor) - - tpyically the prediction from a discriminator
246
+ target_is_real (bool) - - if the ground truth label is for real images or fake images
247
+
248
+ Returns:
249
+ A label tensor filled with ground truth label, and with the size of the input
250
+ """
251
+
252
+ if target_is_real:
253
+ target_tensor = self.real_label
254
+ else:
255
+ target_tensor = self.fake_label
256
+ return target_tensor.expand_as(prediction)
257
+
258
+ def __call__(self, prediction, target_is_real):
259
+ """Calculate loss given Discriminator's output and grount truth labels.
260
+
261
+ Parameters:
262
+ prediction (tensor) - - tpyically the prediction output from a discriminator
263
+ target_is_real (bool) - - if the ground truth label is for real images or fake images
264
+
265
+ Returns:
266
+ the calculated loss.
267
+ """
268
+ if self.gan_mode in ['lsgan', 'vanilla']:
269
+ target_tensor = self.get_target_tensor(prediction, target_is_real)
270
+ loss = self.loss(prediction, target_tensor)
271
+ elif self.gan_mode == 'wgangp':
272
+ if target_is_real:
273
+ loss = -prediction.mean()
274
+ else:
275
+ loss = prediction.mean()
276
+ return loss
277
+
278
+
279
+ def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
280
+ """Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
281
+
282
+ Arguments:
283
+ netD (network) -- discriminator network
284
+ real_data (tensor array) -- real images
285
+ fake_data (tensor array) -- generated images from the generator
286
+ device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
287
+ type (str) -- if we mix real and fake data or not [real | fake | mixed].
288
+ constant (float) -- the constant used in formula ( ||gradient||_2 - constant)^2
289
+ lambda_gp (float) -- weight for this loss
290
+
291
+ Returns the gradient penalty loss
292
+ """
293
+ if lambda_gp > 0.0:
294
+ if type == 'real': # either use real images, fake images, or a linear interpolation of two.
295
+ interpolatesv = real_data
296
+ elif type == 'fake':
297
+ interpolatesv = fake_data
298
+ elif type == 'mixed':
299
+ alpha = torch.rand(real_data.shape[0], 1, device=device)
300
+ alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
301
+ interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
302
+ else:
303
+ raise NotImplementedError('{} not implemented'.format(type))
304
+ interpolatesv.requires_grad_(True)
305
+ disc_interpolates = netD(interpolatesv)
306
+ gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
307
+ grad_outputs=torch.ones(disc_interpolates.size()).to(device),
308
+ create_graph=True, retain_graph=True, only_inputs=True)
309
+ gradients = gradients[0].view(real_data.size(0), -1) # flat the data
310
+ gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
311
+ return gradient_penalty, gradients
312
+ else:
313
+ return 0.0, None
314
+
315
+
316
+ class ResnetGenerator(nn.Module):
317
+ """Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
318
+
319
+ We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
320
+ """
321
+
322
+ def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):
323
+ """Construct a Resnet-based generator
324
+
325
+ Parameters:
326
+ input_nc (int) -- the number of channels in input images
327
+ output_nc (int) -- the number of channels in output images
328
+ ngf (int) -- the number of filters in the last conv layer
329
+ norm_layer -- normalization layer
330
+ use_dropout (bool) -- if use dropout layers
331
+ n_blocks (int) -- the number of ResNet blocks
332
+ padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
333
+ """
334
+ assert(n_blocks >= 0)
335
+ super(ResnetGenerator, self).__init__()
336
+ if type(norm_layer) == functools.partial:
337
+ use_bias = norm_layer.func == nn.InstanceNorm2d
338
+ else:
339
+ use_bias = norm_layer == nn.InstanceNorm2d
340
+
341
+ model = [nn.ReflectionPad2d(3),
342
+ nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
343
+ norm_layer(ngf),
344
+ nn.ReLU(True)]
345
+
346
+ n_downsampling = 2
347
+ for i in range(n_downsampling): # add downsampling layers
348
+ mult = 2 ** i
349
+ model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
350
+ norm_layer(ngf * mult * 2),
351
+ nn.ReLU(True)]
352
+
353
+ mult = 2 ** n_downsampling
354
+ for i in range(n_blocks): # add ResNet blocks
355
+
356
+ model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
357
+
358
+ for i in range(n_downsampling): # add upsampling layers
359
+ mult = 2 ** (n_downsampling - i)
360
+ model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
361
+ kernel_size=3, stride=2,
362
+ padding=1, output_padding=1,
363
+ bias=use_bias),
364
+ norm_layer(int(ngf * mult / 2)),
365
+ nn.ReLU(True)]
366
+ model += [nn.ReflectionPad2d(3)]
367
+ model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
368
+ model += [nn.Tanh()]
369
+
370
+ self.model = nn.Sequential(*model)
371
+
372
+ def forward(self, input):
373
+ """Standard forward"""
374
+ return self.model(input)
375
+
376
+
377
+ class ResnetBlock(nn.Module):
378
+ """Define a Resnet block"""
379
+
380
+ def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
381
+ """Initialize the Resnet block
382
+
383
+ A resnet block is a conv block with skip connections
384
+ We construct a conv block with build_conv_block function,
385
+ and implement skip connections in <forward> function.
386
+ Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
387
+ """
388
+ super(ResnetBlock, self).__init__()
389
+ self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
390
+
391
+ def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
392
+ """Construct a convolutional block.
393
+
394
+ Parameters:
395
+ dim (int) -- the number of channels in the conv layer.
396
+ padding_type (str) -- the name of padding layer: reflect | replicate | zero
397
+ norm_layer -- normalization layer
398
+ use_dropout (bool) -- if use dropout layers.
399
+ use_bias (bool) -- if the conv layer uses bias or not
400
+
401
+ Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
402
+ """
403
+ conv_block = []
404
+ p = 0
405
+ if padding_type == 'reflect':
406
+ conv_block += [nn.ReflectionPad2d(1)]
407
+ elif padding_type == 'replicate':
408
+ conv_block += [nn.ReplicationPad2d(1)]
409
+ elif padding_type == 'zero':
410
+ p = 1
411
+ else:
412
+ raise NotImplementedError('padding [%s] is not implemented' % padding_type)
413
+
414
+ conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
415
+ if use_dropout:
416
+ conv_block += [nn.Dropout(0.5)]
417
+
418
+ p = 0
419
+ if padding_type == 'reflect':
420
+ conv_block += [nn.ReflectionPad2d(1)]
421
+ elif padding_type == 'replicate':
422
+ conv_block += [nn.ReplicationPad2d(1)]
423
+ elif padding_type == 'zero':
424
+ p = 1
425
+ else:
426
+ raise NotImplementedError('padding [%s] is not implemented' % padding_type)
427
+ conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
428
+
429
+ return nn.Sequential(*conv_block)
430
+
431
+ def forward(self, x):
432
+ """Forward function (with skip connections)"""
433
+ out = x + self.conv_block(x) # add skip connections
434
+ return out
435
+
436
+
437
+ class UnetGenerator(nn.Module):
438
+ """Create a Unet-based generator"""
439
+
440
+ def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
441
+ """Construct a Unet generator
442
+ Parameters:
443
+ input_nc (int) -- the number of channels in input images
444
+ output_nc (int) -- the number of channels in output images
445
+ num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
446
+ image of size 128x128 will become of size 1x1 # at the bottleneck
447
+ ngf (int) -- the number of filters in the last conv layer
448
+ norm_layer -- normalization layer
449
+
450
+ We construct the U-Net from the innermost layer to the outermost layer.
451
+ It is a recursive process.
452
+ """
453
+ super(UnetGenerator, self).__init__()
454
+ # construct unet structure
455
+ unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer
456
+ for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
457
+ unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
458
+ # gradually reduce the number of filters from ngf * 8 to ngf
459
+ unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
460
+ unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
461
+ unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
462
+ self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer
463
+
464
+ def forward(self, input):
465
+ """Standard forward"""
466
+ return self.model(input)
467
+
468
+
469
+ class UnetSkipConnectionBlock(nn.Module):
470
+ """Defines the Unet submodule with skip connection.
471
+ X -------------------identity----------------------
472
+ |-- downsampling -- |submodule| -- upsampling --|
473
+ """
474
+
475
+ def __init__(self, outer_nc, inner_nc, input_nc=None,
476
+ submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
477
+ """Construct a Unet submodule with skip connections.
478
+
479
+ Parameters:
480
+ outer_nc (int) -- the number of filters in the outer conv layer
481
+ inner_nc (int) -- the number of filters in the inner conv layer
482
+ input_nc (int) -- the number of channels in input images/features
483
+ submodule (UnetSkipConnectionBlock) -- previously defined submodules
484
+ outermost (bool) -- if this module is the outermost module
485
+ innermost (bool) -- if this module is the innermost module
486
+ norm_layer -- normalization layer
487
+ use_dropout (bool) -- if use dropout layers.
488
+ """
489
+ super(UnetSkipConnectionBlock, self).__init__()
490
+ self.outermost = outermost
491
+ if type(norm_layer) == functools.partial:
492
+ use_bias = norm_layer.func == nn.InstanceNorm2d
493
+ else:
494
+ use_bias = norm_layer == nn.InstanceNorm2d
495
+ if input_nc is None:
496
+ input_nc = outer_nc
497
+ downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
498
+ stride=2, padding=1, bias=use_bias)
499
+ downrelu = nn.LeakyReLU(0.2, True)
500
+ downnorm = norm_layer(inner_nc)
501
+ uprelu = nn.ReLU(True)
502
+ upnorm = norm_layer(outer_nc)
503
+
504
+ if outermost:
505
+ upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
506
+ kernel_size=4, stride=2,
507
+ padding=1)
508
+ down = [downconv]
509
+ up = [uprelu, upconv, nn.Tanh()]
510
+ model = down + [submodule] + up
511
+ elif innermost:
512
+ upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
513
+ kernel_size=4, stride=2,
514
+ padding=1, bias=use_bias)
515
+ down = [downrelu, downconv]
516
+ up = [uprelu, upconv, upnorm]
517
+ model = down + up
518
+ else:
519
+ upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
520
+ kernel_size=4, stride=2,
521
+ padding=1, bias=use_bias)
522
+ down = [downrelu, downconv, downnorm]
523
+ up = [uprelu, upconv, upnorm]
524
+
525
+ if use_dropout:
526
+ model = down + [submodule] + up + [nn.Dropout(0.5)]
527
+ else:
528
+ model = down + [submodule] + up
529
+
530
+ self.model = nn.Sequential(*model)
531
+
532
+ def forward(self, x):
533
+ if self.outermost:
534
+ return self.model(x)
535
+ else: # add skip connections
536
+ return torch.cat([x, self.model(x)], 1)
537
+
538
+
539
+ class NLayerDiscriminator(nn.Module):
540
+ """Defines a PatchGAN discriminator"""
541
+
542
+ def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
543
+ """Construct a PatchGAN discriminator
544
+
545
+ Parameters:
546
+ input_nc (int) -- the number of channels in input images
547
+ ndf (int) -- the number of filters in the last conv layer
548
+ n_layers (int) -- the number of conv layers in the discriminator
549
+ norm_layer -- normalization layer
550
+ """
551
+ super(NLayerDiscriminator, self).__init__()
552
+ if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
553
+ use_bias = norm_layer.func == nn.InstanceNorm2d
554
+ else:
555
+ use_bias = norm_layer == nn.InstanceNorm2d
556
+
557
+ kw = 4
558
+ padw = 1
559
+ sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
560
+ nf_mult = 1
561
+ nf_mult_prev = 1
562
+ for n in range(1, n_layers): # gradually increase the number of filters
563
+ nf_mult_prev = nf_mult
564
+ nf_mult = min(2 ** n, 8)
565
+ sequence += [
566
+ nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
567
+ norm_layer(ndf * nf_mult),
568
+ nn.LeakyReLU(0.2, True)
569
+ ]
570
+
571
+ nf_mult_prev = nf_mult
572
+ nf_mult = min(2 ** n_layers, 8)
573
+ sequence += [
574
+ nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
575
+ norm_layer(ndf * nf_mult),
576
+ nn.LeakyReLU(0.2, True)
577
+ ]
578
+
579
+ sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
580
+ self.model = nn.Sequential(*sequence)
581
+
582
+ def forward(self, input):
583
+ """Standard forward."""
584
+ return self.model(input)
585
+
586
+
587
+ class PixelDiscriminator(nn.Module):
588
+ """Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
589
+
590
+ def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
591
+ """Construct a 1x1 PatchGAN discriminator
592
+
593
+ Parameters:
594
+ input_nc (int) -- the number of channels in input images
595
+ ndf (int) -- the number of filters in the last conv layer
596
+ norm_layer -- normalization layer
597
+ """
598
+ super(PixelDiscriminator, self).__init__()
599
+ if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
600
+ use_bias = norm_layer.func == nn.InstanceNorm2d
601
+ else:
602
+ use_bias = norm_layer == nn.InstanceNorm2d
603
+
604
+ self.net = [
605
+ nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
606
+ nn.LeakyReLU(0.2, True),
607
+ nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
608
+ norm_layer(ndf * 2),
609
+ nn.LeakyReLU(0.2, True),
610
+ nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
611
+
612
+ self.net = nn.Sequential(*self.net)
613
+
614
+ def forward(self, input):
615
+ """Standard forward."""
616
+ return self.net(input)
models/pixera_CYCLEGAN/latest_net_G.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a840b5a9e29483a2caa7335714b51184ca43f6572c586e85033a787d33c77896
3
+ size 45530709
models/pixera_CYCLEGAN/test_opt.txt ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ----------------- Options ---------------
2
+ aspect_ratio: 1.0
3
+ batch_size: 1
4
+ checkpoints_dir: ./models
5
+ crop_size: 512
6
+ dataroot: None
7
+ dataset_mode: single
8
+ direction: AtoB
9
+ display_winsize: 256
10
+ epoch: latest
11
+ eval: False
12
+ gpu_ids: -1
13
+ init_gain: 0.02
14
+ init_type: normal
15
+ input_nc: 3
16
+ isTrain: False [default: None]
17
+ load_iter: 0 [default: 0]
18
+ load_size: 512
19
+ max_dataset_size: inf
20
+ model: test
21
+ model_suffix:
22
+ n_layers_D: 3
23
+ name: pixera_CYCLEGAN
24
+ ndf: 64
25
+ netD: basic
26
+ netG: resnet_9blocks
27
+ ngf: 64
28
+ no_dropout: False
29
+ no_flip: False
30
+ norm: instance
31
+ num_test: 50
32
+ num_threads: 4
33
+ output_nc: 3
34
+ phase: test
35
+ preprocess: scale_width
36
+ results_dir: ./results/
37
+ serial_batches: False
38
+ suffix:
39
+ use_wandb: False
40
+ verbose: False
41
+ wandb_project_name: CycleGAN-and-pix2pix
42
+ ----------------- End -------------------
models/test_model.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .base_model import BaseModel
2
+ from . import networks
3
+
4
+
5
+ class TestModel(BaseModel):
6
+ """ This TesteModel can be used to generate CycleGAN results for only one direction.
7
+ This model will automatically set '--dataset_mode single', which only loads the images from one collection.
8
+
9
+ See the test instruction for more details.
10
+ """
11
+ @staticmethod
12
+ def modify_commandline_options(parser, is_train=True):
13
+ """Add new dataset-specific options, and rewrite default values for existing options.
14
+
15
+ Parameters:
16
+ parser -- original option parser
17
+ is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
18
+
19
+ Returns:
20
+ the modified parser.
21
+
22
+ The model can only be used during test time. It requires '--dataset_mode single'.
23
+ You need to specify the network using the option '--model_suffix'.
24
+ """
25
+ assert not is_train, 'TestModel cannot be used during training time'
26
+ parser.set_defaults(dataset_mode='single')
27
+ parser.add_argument('--model_suffix', type=str, default='', help='In checkpoints_dir, [epoch]_net_G[model_suffix].pth will be loaded as the generator.')
28
+
29
+ return parser
30
+
31
+ def __init__(self, opt):
32
+ """Initialize the pix2pix class.
33
+
34
+ Parameters:
35
+ opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
36
+ """
37
+ assert(not opt.isTrain)
38
+ BaseModel.__init__(self, opt)
39
+ # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
40
+ self.loss_names = []
41
+ # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
42
+ self.visual_names = ['real', 'fake']
43
+ # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>
44
+ self.model_names = ['G' + opt.model_suffix] # only generator is needed.
45
+ self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG,
46
+ opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
47
+
48
+ # assigns the model to self.netG_[suffix] so that it can be loaded
49
+ # please see <BaseModel.load_networks>
50
+ setattr(self, 'netG' + opt.model_suffix, self.netG) # store netG in self.
51
+
52
+ def set_input(self, input):
53
+ """Unpack input data from the dataloader and perform necessary pre-processing steps.
54
+
55
+ Parameters:
56
+ input: a dictionary that contains the data itself and its metadata information.
57
+
58
+ We need to use 'single_dataset' dataset mode. It only load images from one domain.
59
+ """
60
+ self.real = input['A'].to(self.device)
61
+ self.image_paths = input['A_paths']
62
+
63
+ def forward(self):
64
+ """Run forward pass."""
65
+ self.fake = self.netG(self.real) # G(real)
66
+
67
+ def optimize_parameters(self):
68
+ """No optimization for test model."""
69
+ pass
options/__init__.py ADDED
@@ -0,0 +1 @@
 
1
+ """This package options includes option modules: training options, test options, and basic options (used in both training and test)."""
options/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (333 Bytes). View file
options/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (311 Bytes). View file
options/__pycache__/base_options.cpython-38.pyc ADDED
Binary file (6.81 kB). View file
options/__pycache__/base_options.cpython-39.pyc ADDED
Binary file (6.83 kB). View file
options/__pycache__/test_options.cpython-38.pyc ADDED
Binary file (1.19 kB). View file
options/__pycache__/test_options.cpython-39.pyc ADDED
Binary file (1.17 kB). View file