from options.options import TrainingOptions
from models.pix2pix_model import Pix2PixModel
from data.aligned_dataset import AlignedDataset
import time,datetime

# class CustomDatasetDataLoader():
#     """Wrapper class of Dataset class that performs multi-threaded data loading"""
#
#     def __init__(self, opt):
#         """Initialize this class
#
#         Step 1: create a dataset instance given the name [dataset_mode]
#         Step 2: create a multi-threaded data loader.
#         """
#         self.opt = opt
#         dataset_class = find_dataset_using_name(opt.dataset_mode)
#         self.dataset = dataset_class(opt)
#         print("dataset [%s] was created" % type(self.dataset).__name__)
#         self.dataloader = torch.utils.data.DataLoader(
#             self.dataset,
#             batch_size=opt.batch_size,
#             shuffle=not opt.serial_batches,
#             num_workers=int(opt.num_threads))
#
#     def load_data(self):
#         return self
#
#     def __len__(self):
#         """Return the number of data in the dataset"""
#         return min(len(self.dataset), self.opt.max_dataset_size)
#
#     def __iter__(self):
#         """Return a batch of data"""
#         for i, data in enumerate(self.dataloader):
#             if i * self.opt.batch_size >= self.opt.max_dataset_size:
#                 break
#             yield data

train_opts = TrainingOptions()
dataset = AlignedDataset(train_opts.opt)
model = Pix2PixModel(train_opts.opt, True)
model.setup(train_opts.opt)
# print(model.netD)
# print(model.netG)
total_iters = 0  # the total number of training iterations
opt = train_opts.opt
for epoch in range(1,1000):  # outer loop for different epochs; we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>
    epoch_start_time = time.time()  # timer for entire epoch
    iter_data_time = time.time()  # timer for data loading per iteration
    epoch_iter = 0  # the number of training iterations in current epoch, reset to 0 every epoch
    model.update_learning_rate()  # update learning rates in the beginning of every epoch.
    for i, data in enumerate(dataset):  # inner loop within one epoch
        iter_start_time = time.time()  # timer for computation per iteration
        total_iters += 500
        epoch_iter += 500
        model.set_input(data)  # unpack data from dataset and apply preprocessing
        model.optimize_parameters()  # calculate loss functions, get gradients, update network weights
        if total_iters % 500 == 0:  # cache our latest model every <save_latest_freq> iterations
            print(model.loss_D_fake,model.loss_D_real,model.loss_G,model.loss_G_L1)
        # if total_iters % 50000 == 0:  # cache our latest model every <save_latest_freq> iterations
        #     print('saving the latest model (epoch %d, total_iters %d)' % (epoch, total_iters))
        #     save_suffix = 'iter_%d' % total_iters if opt.save_by_iter else 'latest'
        #     model.save_networks(save_suffix)


    # if epoch % opt.save_epoch_freq == 0:  # cache our model every <save_epoch_freq> epochs
    #     print('saving the model at the end of epoch %d, iters %d' % (epoch, total_iters))
    #     model.save_networks('latest')
    #     model.save_networks(epoch)

    print('End of epoch %d / %d \t Time Taken: %d sec' % (
    epoch, opt.n_epochs + opt.n_epochs_decay, time.time() - epoch_start_time))
