David Piscasio commited on
Commit
32da7be
1 Parent(s): aaa41cb

Added options folder

Browse files
options/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ """This package options includes option modules: training options, test options, and basic options (used in both training and test)."""
options/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (351 Bytes). View file
 
options/__pycache__/base_options.cpython-38.pyc ADDED
Binary file (6.73 kB). View file
 
options/__pycache__/test_options.cpython-38.pyc ADDED
Binary file (1.21 kB). View file
 
options/base_options.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ from util import util
4
+ import torch
5
+ import models
6
+ import data
7
+
8
+
9
+ class BaseOptions():
10
+ """This class defines options used during both training and test time.
11
+
12
+ It also implements several helper functions such as parsing, printing, and saving the options.
13
+ It also gathers additional options defined in <modify_commandline_options> functions in both dataset class and model class.
14
+ """
15
+
16
+ def __init__(self):
17
+ """Reset the class; indicates the class hasn't been initailized"""
18
+ self.initialized = False
19
+
20
+ def initialize(self, parser):
21
+ """Define the common options that are used in both training and test."""
22
+ # basic parameters
23
+ parser.add_argument('--dataroot', required=False, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
24
+ parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
25
+ parser.add_argument('--use_wandb', action='store_true', help='use wandb')
26
+ parser.add_argument('--gpu_ids', type=str, default='-1', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
27
+ parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
28
+ # model parameters
29
+ parser.add_argument('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]')
30
+ parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')
31
+ parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')
32
+ parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
33
+ parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
34
+ parser.add_argument('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
35
+ parser.add_argument('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
36
+ parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
37
+ parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')
38
+ parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')
39
+ parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
40
+ parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
41
+ # dataset parameters
42
+ parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
43
+ parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')
44
+ parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
45
+ parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')
46
+ parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
47
+ parser.add_argument('--load_size', type=int, default=286, help='scale images to this size')
48
+ parser.add_argument('--crop_size', type=int, default=256, help='then crop to this size')
49
+ parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
50
+ parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
51
+ parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
52
+ parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
53
+ # additional parameters
54
+ parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
55
+ parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
56
+ parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
57
+ parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
58
+ self.initialized = True
59
+ return parser
60
+
61
+ def gather_options(self):
62
+ """Initialize our parser with basic options(only once).
63
+ Add additional model-specific and dataset-specific options.
64
+ These options are defined in the <modify_commandline_options> function
65
+ in model and dataset classes.
66
+ """
67
+ if not self.initialized: # check if it has been initialized
68
+ parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
69
+ parser = self.initialize(parser)
70
+
71
+ # get the basic options
72
+ opt, _ = parser.parse_known_args()
73
+
74
+ # modify model-related parser options
75
+ model_name = opt.model
76
+ model_option_setter = models.get_option_setter(model_name)
77
+ parser = model_option_setter(parser, self.isTrain)
78
+ opt, _ = parser.parse_known_args() # parse again with new defaults
79
+
80
+ # modify dataset-related parser options
81
+ dataset_name = opt.dataset_mode
82
+ dataset_option_setter = data.get_option_setter(dataset_name)
83
+ parser = dataset_option_setter(parser, self.isTrain)
84
+
85
+ # save and return the parser
86
+ self.parser = parser
87
+ return parser.parse_args()
88
+
89
+ def print_options(self, opt):
90
+ """Print and save options
91
+
92
+ It will print both current options and default values(if different).
93
+ It will save options into a text file / [checkpoints_dir] / opt.txt
94
+ """
95
+ message = ''
96
+ message += '----------------- Options ---------------\n'
97
+ for k, v in sorted(vars(opt).items()):
98
+ comment = ''
99
+ default = self.parser.get_default(k)
100
+ if v != default:
101
+ comment = '\t[default: %s]' % str(default)
102
+ message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
103
+ message += '----------------- End -------------------'
104
+ print(message)
105
+
106
+ # save to the disk
107
+ expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
108
+ util.mkdirs(expr_dir)
109
+ file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))
110
+ with open(file_name, 'wt') as opt_file:
111
+ opt_file.write(message)
112
+ opt_file.write('\n')
113
+
114
+ def parse(self):
115
+ """Parse our options, create checkpoints directory suffix, and set up gpu device."""
116
+ opt = self.gather_options()
117
+ opt.isTrain = self.isTrain # train or test
118
+
119
+ # process opt.suffix
120
+ if opt.suffix:
121
+ suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
122
+ opt.name = opt.name + suffix
123
+
124
+ self.print_options(opt)
125
+
126
+ # set gpu ids
127
+ str_ids = opt.gpu_ids.split(',')
128
+ opt.gpu_ids = []
129
+ for str_id in str_ids:
130
+ id = int(str_id)
131
+ if id >= 0:
132
+ opt.gpu_ids.append(id)
133
+ if len(opt.gpu_ids) > 0:
134
+ torch.cuda.set_device(opt.gpu_ids[0])
135
+
136
+ self.opt = opt
137
+ return self.opt
options/test_options.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .base_options import BaseOptions
2
+
3
+
4
+ class TestOptions(BaseOptions):
5
+ """This class includes test options.
6
+
7
+ It also includes shared options defined in BaseOptions.
8
+ """
9
+
10
+ def initialize(self, parser):
11
+ parser = BaseOptions.initialize(self, parser) # define shared options
12
+ parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.')
13
+ parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')
14
+ parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
15
+ # Dropout and Batchnorm has different behavioir during training and test.
16
+ parser.add_argument('--eval', action='store_true', help='use eval mode during test time.')
17
+ parser.add_argument('--num_test', type=int, default=50, help='how many test images to run')
18
+ # rewrite devalue values
19
+ parser.set_defaults(model='test')
20
+ # To avoid cropping, the load_size should be the same as crop_size
21
+ parser.set_defaults(load_size=parser.get_default('crop_size'))
22
+ self.isTrain = False
23
+ return parser
options/train_options.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .base_options import BaseOptions
2
+
3
+
4
+ class TrainOptions(BaseOptions):
5
+ """This class includes training options.
6
+
7
+ It also includes shared options defined in BaseOptions.
8
+ """
9
+
10
+ def initialize(self, parser):
11
+ parser = BaseOptions.initialize(self, parser)
12
+ # visdom and HTML visualization parameters
13
+ parser.add_argument('--display_freq', type=int, default=400, help='frequency of showing training results on screen')
14
+ parser.add_argument('--display_ncols', type=int, default=4, help='if positive, display all images in a single visdom web panel with certain number of images per row.')
15
+ parser.add_argument('--display_id', type=int, default=1, help='window id of the web display')
16
+ parser.add_argument('--display_server', type=str, default="http://localhost", help='visdom server of the web display')
17
+ parser.add_argument('--display_env', type=str, default='main', help='visdom display environment name (default is "main")')
18
+ parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display')
19
+ parser.add_argument('--update_html_freq', type=int, default=1000, help='frequency of saving training results to html')
20
+ parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
21
+ parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
22
+ # network saving and loading parameters
23
+ parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results')
24
+ parser.add_argument('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs')
25
+ parser.add_argument('--save_by_iter', action='store_true', help='whether saves model by iteration')
26
+ parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
27
+ parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>, ...')
28
+ parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
29
+ # training parameters
30
+ parser.add_argument('--n_epochs', type=int, default=100, help='number of epochs with the initial learning rate')
31
+ parser.add_argument('--n_epochs_decay', type=int, default=100, help='number of epochs to linearly decay learning rate to zero')
32
+ parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
33
+ parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
34
+ parser.add_argument('--gan_mode', type=str, default='lsgan', help='the type of GAN objective. [vanilla| lsgan | wgangp]. vanilla GAN loss is the cross-entropy objective used in the original GAN paper.')
35
+ parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images')
36
+ parser.add_argument('--lr_policy', type=str, default='linear', help='learning rate policy. [linear | step | plateau | cosine]')
37
+ parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations')
38
+
39
+ self.isTrain = True
40
+ return parser