# from controllers.test_controller import TestController
from .base_controller import BaseController
from utils.visualizer import Visualizer
import time
from data import create_dataset
from models import create_model
from controllers import create_controller
import copy
import os
import shutil

class TrainController(BaseController):
    def modify_commandline_options(parser,flagdict=None):
        parser.set_defaults(no_dropout=True)  # default CycleGAN did not use dropout
        # network saving and loading parameters
        parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results')
        parser.add_argument('--save_epoch_freq', type=int, default=10, help='frequency of saving checkpoints at the end of epochs')
        parser.add_argument('--save_by_iter', action='store_true', help='whether saves model by iteration')
        parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
        parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>, ...')
        parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
        parser.add_argument('--batch_size', type=int, default=32, help='input batch size')
        # training parameters
        parser.add_argument('--n_epochs', type=int, default=100, help='number of epochs with the initial learning rate')
        parser.add_argument('--n_epochs_decay', type=int, default=100, help='number of epochs to linearly decay learning rate to zero')
        parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
        parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
        parser.add_argument('--gan_mode', type=str, default='lsgan', help='the type of GAN objective. [vanilla| lsgan | wgangp]. vanilla GAN loss is the cross-entropy objective used in the original GAN paper.')
        parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images')
        parser.add_argument('--lr_policy', type=str, default='linear', help='learning rate policy. [linear | step | plateau | cosine]')
        parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations')
        # parser.add_argument('--limit_ratio', type=float, default=1, help='Only use limit_ratio train data for training')
        parser.add_argument('--_suffix', type=str, default="", help='the suffix of dataset')
        parser.add_argument('--val',action='store_true', help='The switch of validation')
        parser.add_argument('--val_param',type=str, default='', help='The parameters of validation')
        parser.add_argument('--val_ratio', type=float, default=0.8, help='The ratio of validation')
       
        return parser
    
    def __init__(self,opt):
        BaseController.__init__(self,opt)
        
        # self.opt = opt   # get training options
        self.opt = opt
        self.dataset = create_dataset(opt)  # create a dataset given opt.dataset_mode and other options
        self.dataset_size = len(self.dataset)    # get the number of images in the dataset.
        self.continue_init = False
        if self.opt.continue_train:
            self.continue_init = True
        print('The number of training images = %d' % self.dataset_size)
        
        self.model = create_model(self.opt)      # create a model given opt.model and other options
        self.model.setup(self.opt)               # regular setup: load and print networks; create schedulers
        self.visualizer = Visualizer(opt)   # create a visualizer that display/save images and plots
        self.log_name = os.path.join(self.opt.checkpoints_dir, self.opt.name+'_v'+self.opt.version, 'loss_log.txt')
        self.code_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name+'_v'+self.opt.version, 'code')
        model_filename = opt.model + "_model"+".py"
        data_filename =  opt.dataset_mode + "_dataset"+".py"
        if not os.path.exists(self.code_dir):
            os.mkdir(self.code_dir)
        shutil.copy(os.path.join("models",model_filename),os.path.join(self.code_dir,model_filename))
        shutil.copy(os.path.join("data",data_filename),os.path.join(self.code_dir,data_filename))
        
       
        # if opt.val:
        #     _opt = copy.deepcopy(opt)
        #     _opt.controller = 'train_val'
        #     _opt.batch_size=opt.batch_size
        #     self.dataset_val = create_dataset(_opt)  # create a dataset given opt.dataset_mode and other options
        #     self.dataset_val_size = len(self.dataset_val)
        #     print('The number of validation images = %d' % self.dataset_val_size)
        
        # _opt = opt
        # _opt.controller  = 'test'
        # _opt.cal_mode='histeq'
        # _opt.num_test=5000
        # _opt.serial_batches = True
        # _opt.eval = True
        # _opt.isTrain=False
        # self.testController = create_controller(_opt)

    def excute(self):
        total_iters = 0                # the total number of training iterations
        # if 'val' in self.opt.__dir__():
        #     self.mode = 'train'
        # else:
        #     self.mode == 'ori'
            
        for epoch in range(self.opt.epoch_count, self.opt.n_epochs + self.opt.n_epochs_decay + 1):    # outer loop for different epochs; we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>
            epoch_start_time = time.time()  # timer for entire epoch
            iter_data_time = time.time()    # timer for data loading per iteration
            epoch_iter = 0                  # the number of training iterations in current epoch, reset to 0 every epoch
            if not self.continue_init:
                self.model._train()
                self.visualizer.reset()              # reset the visualizer: make sure it saves the results to HTML at least once every epoch
                self.model.update_learning_rate()    # update learning rates in the beginning of every epoch.
                
                if self.opt.val:
                    self.dataset.set_train_val()
                # for i, data in enumerate(getattr(self.dataset,'dataloader_'+self.mode)):  # inner loop within one epoch
                for i, data in enumerate(self.dataset):
                    iter_start_time = time.time()  # timer for computation per iteration
                    if total_iters % self.opt.print_freq == 0:
                        t_data = iter_start_time - iter_data_time

                    total_iters += self.opt.batch_size
                    epoch_iter += self.opt.batch_size
                    
                    data['isTrain'] = True
                    self.model.set_input(data)         # unpack data from self.dataset and apply preprocessing
                    self.model.optimize_parameters()   # calculate loss functions, get gradients, update network weights

                    # if total_iters % opt.display_freq == 0:   # display images on visdom and save images to a HTML file
                    #     save_result = total_iters % opt.update_html_freq == 0
                    #     self.model.compute_visuals()
                    #     visualizer.display_current_results(self.model.get_current_visuals(), epoch, save_result)
                    self.model.iter_end()
                    # if total_iters % self.opt.print_freq == 0:    # print training losses and save logging information to the disk
                    #     losses = self.model.get_current_losses()
                    #     t_comp = (time.time() - iter_start_time) / self.opt.batch_size
                    #     self.visualizer.print_current_losses(epoch, epoch_iter, losses, t_comp, t_data,mode='Training')
                    #     if opt.display_id > 0:
                    #         visualizer.plot_current_losses(epoch, float(epoch_iter) / self.dataset_size, losses)
                    if total_iters % self.opt.save_latest_freq == 0:   # cache our latest self.model every <save_latest_freq> iterations
                        print('saving the latest model (epoch %d, total_iters %d)\n' % (epoch, total_iters))
                        save_suffix = 'iter_%d' % total_iters if self.opt.save_by_iter else 'latest'
                        self.model.save_networks(save_suffix)

                    iter_data_time = time.time()
                    
                losses = self.model.get_current_losses()
                t_comp = (time.time() - iter_start_time) / self.opt.batch_size
                self.visualizer.print_current_losses(epoch, epoch_iter, losses, t_comp, t_data,mode='Training')
                
                if epoch % self.opt.save_epoch_freq == 0:              # cache our model every <save_epoch_freq> epochs
                    print('saving the model at the end of epoch %d, iters %d\n' % (epoch, total_iters))
                    self.model.save_networks(epoch)
                    # self.model.save_networks(epoch)z      
                
                self.model.epoch_end()
                
                       
            if self.opt.val:
                self.model._eval()
                # self.mode = 'val'
                self.dataset.set_mode('val')
                # for i, data in enumerate(getattr(self.dataset,'dataloader_'+self.mode)): 
                for i, data in enumerate(self.dataset): 
                    data['isTrain'] = False
                    self.model.set_input(data)
                    self.model.test()
                    # self.model.backward_D_S0()
                    # self.model.backward_D_S1()
                    # self.model.backward_G()
                    # visuals = self.model.get_current_visuals()  # get image results
                    self.model.iter_end()
                self.model.epoch_end()
                losses = self.model.get_current_losses()
                iter_start_time = time.time()  # timer for computation per iteration
                t_data = iter_start_time - iter_data_time
                t_comp = (time.time() - iter_start_time) / self.opt.batch_size
                
                self.visualizer.print_current_losses(epoch, i, losses, t_comp, t_data,mode="Validating")
                if not self.continue_init :
                    valparams = self.model.get_validation_parameter()
                    for valparam in valparams:
                        if self.model.val_save(valparam):
                            if valparam is not None:
                                with open(self.log_name, "a") as log_file:
                                    log_file.write('Validation : saving the model at the end of epoch %d, iters %d of parameter %s\n' % (epoch, total_iters,valparam))
                                    log_file.write('[')
                                    for model in self.model.model_names:
                                        log_file.write(model+', ')
                                    log_file.write(']\n')
                                print('Validation : saving the model at the end of epoch %d, iters %d of parameter %s\n' % (epoch, total_iters,valparam))
                                print(self.model.model_names)
                                self.model.save_networks(valparam+'_best')
                            else:
                                with open(self.log_name, "a") as log_file:
                                    log_file.write('Validation : saving the model at the end of epoch %d, iters %d\n' % (epoch, total_iters))
                                    log_file.write('[')
                                    for model in self.model.model_names:
                                        log_file.write(model+', ')
                                    log_file.write(']\n')
                                print('Validation : saving the model at the end of epoch %d, iters %d\n' % (epoch, total_iters))
                                print(self.model.model_names)
                                self.model.save_networks('best')
                            self.model.set_model_name()
                
            if self.continue_init :
                self.continue_init = False
            # if "epoch_end" in self.model.__dir__():
            #     self.testController.excute(self.model)
            #     self.model.epoch_end()
            with open(self.log_name, "a") as log_file:
                log_file.write('End of epoch %d / %d \t Time Taken: %d sec\n' % (epoch, self.opt.n_epochs + self.opt.n_epochs_decay, time.time() - epoch_start_time))
            print('End of epoch %d / %d \t Time Taken: %d sec\n' % (epoch, self.opt.n_epochs + self.opt.n_epochs_decay, time.time() - epoch_start_time))
