from make_dataset import make_dataset
from augment_data import augment_data
from model import Model
import argparse
import sys
import numpy as np
import os
import tensorflow as tf

def train(args,data):

#    self.sess = tf.Session()
#    self.sess.run(tf.global_variables_initializer())

    height = 512
    width = 512

    X = tf.placeholder(tf.float32, shape=(None, height, width, 1), name="X")
    Y = tf.placeholder(tf.float32, shape=(None, height, width, 1), name="Y")

    training_op,loss = Model.predict(X,Y,args)

    #with tf.Session() as sess:
    sess = tf.Session()
    if args.load_checkpoint is not None:
        Model.load(args.load_checkpoint)

    print('Initializing training...')

    n_epochs = args.n_epochs
    batch_size = args.batch_size
    best_loss = np.infty
    max_checks_without_progress = args.early_stopping_max_checks
    checks_without_progress = 0

    args = args
    X_train = data['X_train']
    Y_train = data['y_train']
    X_test = data['X_test']
    Y_test = data['y_test']
        
    sess.run(tf.global_variables_initializer())

    for epoch in range(n_epochs):
        X_train, Y_train = Model.unison_shuffled_copies(X_train, Y_train)
        for iteration in range(X_train.shape[0] // batch_size):
            X_batch, Y_batch = Model.data_provider(args,X_train, Y_train, iteration)
            sess.run(training_op, feed_dict={X: X_batch, Y: Y_batch})
            if epoch % 10 == 0:
                loss_train = []
                loss_val = []
                for j in range(X_train.shape[0]):
                    X_val_batch, Y_val_batch = Model.data_provider(args,X_train, Y_train, j)
                    loss_val.append(
                        sess.run(loss,feed_dict={X: X_val_batch, Y: Y_val_batch}))
                    X_train_batch, Y_train_batch = Model.data_provider(args,X_train, Y_train, j)
                    loss_train.append(
                        sess.run(loss,feed_dict={X: X_train_batch, Y: Y_train_batch}))
                loss_train = np.mean(loss_train)
                loss_val = np.mean(loss_val)
                if loss_val < best_loss:
                    Model.save(epoch,sess,args)
                    best_loss = loss_val
                    checks_without_progress = 0
                else:
                    checks_without_progress += 1
                    if checks_without_progress > max_checks_without_progress:
                        print("Early stopping!")
                        Model.save(epoch,sess,args)
                        break
                print(epoch, "Train Loss:", loss_train, "Validation loss:", loss_val)
    sess.close()


def main(argv):

    parser = argparse.ArgumentParser()

    parser.add_argument('--download_data', action='store_true', default=False,
                        help='Turn on to download data to disk.')
    parser.add_argument('--augment_data', action='store_true', default=False,
                        help='Turn on to augment raw data.')

    parser.add_argument('--raw_image_directory', default='../data/raw/',
                        help='Directory for downloaded images')
    parser.add_argument('--augmented_image_directory', default='../data/processed/',
                        help='Augmented image directory')
    parser.add_argument('--augmented_image_filename', default='augmented_images',
                        help='Augmented images filename')

    parser.add_argument('--batch_size', type=int, default=1,
                        help='Batch size for training')
    parser.add_argument('--n_epochs', type=int, default=1000,
                        help='Number of training epochs.')

    parser.add_argument('--saved_model_directory', default='../models/',
                        help='Directory for saving trained models')

    parser.add_argument('--learning_rate', type=float, default=0.001,
                        help='Optimizer learning rate')
    parser.add_argument('--early_stopping_max_checks', type=int, default=20,
                        help='Max checks without improvement for early stopping')
    parser.add_argument('--keep_prob', type=float, default=0.8,
                        help='Keep probability for dropout layers.')

    parser.add_argument('--train', action='store_true', default=False,
                        help='Set to True to train network')
    parser.add_argument('--infer', action='store_true', default=False,
                        help='Set to True to conduct inference on Test images. Trained model must be loaded.')
    parser.add_argument('--load_checkpoint', type=str, default=None,
                        help='Load saved checkpoint, arg=checkpoint_name')

    args = parser.parse_args()

    os.makedirs(args.raw_image_directory, exist_ok=True)
    os.makedirs(args.augmented_image_directory, exist_ok=True)
    os.makedirs(args.saved_model_directory, exist_ok=True)

    if args.download_data:
        make_dataset(args)

    if args.augment_data:
        augment_data(args)
        
    data = np.load(os.path.join(args.augmented_image_directory, args.augmented_image_filename + '.npz'))

    if args.train:
        train(args,data)

    if args.infer and args.load_checkpoint is not None:
        model.infer()
    else:
        print('Trained model needs to be loaded for inference.')

if __name__ == '__main__':
    main(sys.argv)

