"""
Training script to train a model on only SNLI data. MultiNLI data is loaded into the embeddings enabling us to test the model on MultiNLI data.
"""

import tensorflow as tf
import os
import importlib
import random
from ocnli.mnli_code.util import logger
import ocnli.mnli_code.util.parameters as params
from ocnli.mnli_code.util.data_processing import *
from ocnli.mnli_code.util.evaluate import *

FIXED_PARAMETERS = params.load_parameters()
modname = FIXED_PARAMETERS["model_name"]
logpath = os.path.join(FIXED_PARAMETERS["log_path"], modname) + ".log"
logger = logger.Logger(logpath)

model = FIXED_PARAMETERS["model_type"]

module = importlib.import_module(".".join(['ocnli.mnli_code.models', model])) 
MyModel = getattr(module, 'MyModel')

# Logging parameter settings at each launch of training script
# This will help ensure nothing goes awry in reloading a model and we consistenyl use the same hyperparameter settings. 
logger.Log("FIXED_PARAMETERS\n %s" % FIXED_PARAMETERS)

######################### LOAD DATA #############################

logger.Log("Loading data")
training_snli = load_nli_data(FIXED_PARAMETERS["training_snli"], snli=True,partial_input=FIXED_PARAMETERS["partial_input"])
dev_snli = load_nli_data(FIXED_PARAMETERS["dev_snli"], snli=True,partial_input=FIXED_PARAMETERS["partial_input"])

try: 
    test_snli = load_nli_data(FIXED_PARAMETERS["test_snli"], snli=True,partial_input=FIXED_PARAMETERS["partial_input"])
except Exception:
    test_snli = []
    logger.Log('WARNING: Missing test set...')

## empty 
training_mnli = []
dev_matched = []
dev_mismatched = []
test_matched = []
test_mismatched = []

ORIG_TEST_SCORE=0.0

#dictpath = os.path.join(FIXED_PARAMETERS["log_path"], modname) + ".p"
dictpath = os.path.join(FIXED_PARAMETERS["log_path"], modname) + ".p" \
  if not FIXED_PARAMETERS["dictionary"] else FIXED_PARAMETERS["dictionary"]

#if not os.path.isfile(dictpath): 
logger.Log("Building dictionary, dictionary=%s" % dictpath)

if not os.path.isfile(dictpath): 
    logger.Log("Building dictionary")
    word_indices = build_dictionary([training_snli,dev_snli,test_snli])
    logger.Log("Padding and indexifying sentences")
    pickle.dump(word_indices, open(dictpath, "wb"))
    with open(os.path.join(FIXED_PARAMETERS["log_path"],"vocab.json"),'w') as vocab_out:
        vocab_out.write(json.dumps(word_indices))
else:
    logger.Log("Loading dictionary from %s" % (dictpath))
    word_indices = pickle.load(open(dictpath, "rb"))
    logger.Log("Padding and indexifying sentences")

logger.Log("Padding and indexifying sentences")
sentences_to_padded_index_sequences(word_indices, [training_snli,
                                                       training_mnli,
                                                       dev_matched,
                                                       dev_mismatched,
                                                       dev_snli,
                                                       test_snli,
                                                       test_matched,
                                                       test_mismatched])

logger.Log("Loading embeddings")
loaded_embeddings = loadEmbedding_rand(FIXED_PARAMETERS["embedding_data_path"], word_indices)

class modelClassifier:
    def __init__(self, seq_length):
        ## Define hyperparameters
        self.learning_rate =  FIXED_PARAMETERS["learning_rate"]
        self.display_epoch_freq = 1
        self.display_step_freq = 50
        self.embedding_dim = FIXED_PARAMETERS["word_embedding_dim"]
        self.dim = FIXED_PARAMETERS["hidden_embedding_dim"]
        self.batch_size = FIXED_PARAMETERS["batch_size"]
        self.emb_train = FIXED_PARAMETERS["emb_train"]
        self.keep_rate = FIXED_PARAMETERS["keep_rate"]
        self.sequence_length = FIXED_PARAMETERS["seq_length"] 
        self.alpha = FIXED_PARAMETERS["alpha"]

        logger.Log("Building model from %s.py" %(model))
        self.model = MyModel(seq_length=self.sequence_length, emb_dim=self.embedding_dim,  hidden_dim=self.dim, embeddings=loaded_embeddings, emb_train=self.emb_train)

        # Perform gradient descent with Adam
        self.optimizer = tf.train.AdamOptimizer(self.learning_rate, beta1=0.9, beta2=0.999).minimize(self.model.total_cost)

        # Boolean stating that training has not been completed, 
        self.completed = False 

        # tf things: initialize variables and create placeholder for session
        logger.Log("Initializing variables")
        self.init = tf.global_variables_initializer()
        self.sess = None
        self.saver = tf.train.Saver()
        logger.Log("sess=%s" % self.sess)


    def get_minibatch(self, dataset, start_index, end_index):
        indices = range(start_index, end_index)
        premise_vectors = np.vstack([dataset[i]['sentence1_binary_parse_index_sequence'] for i in indices])
        hypothesis_vectors = np.vstack([dataset[i]['sentence2_binary_parse_index_sequence'] for i in indices])
        genres = [dataset[i]['genre'] for i in indices]
        labels = [dataset[i]['label'] for i in indices]
        return premise_vectors, hypothesis_vectors, labels, genres


    def train(self, train_mnli, train_snli, dev_mat, dev_mismat, dev_snli):        
        self.sess = tf.Session()
        self.sess.run(self.init)

        self.step = 1
        self.epoch = 0
        self.best_dev_snli = 0.
        self.best_strain_acc = 0.
        self.last_train_acc = [.001, .001, .001, .001, .001]
        self.best_step = 0

        # Restore most recent checkpoint if it exists. 
        # Also restore values for best dev-set accuracy and best training-set accuracy.
        ckpt_file = os.path.join(FIXED_PARAMETERS["ckpt_path"], modname) + ".ckpt"

        ### restor model in some other arbitrary way
        #################
        restored_model = FIXED_PARAMETERS["continue_training"] + ".ckpt" if ".ckpt" not in FIXED_PARAMETERS["continue_training"] else \
          FIXED_PARAMETERS["continue_training"]
        logger.Log("restored_model=%s" % restored_model)
        orig_test_score = 0.0
        
        if os.path.isfile(restored_model + ".meta"):
            if os.path.isfile(restored_model + "_best.meta"):
                self.saver.restore(self.sess, (restored_model + "_best"))
                #dev_acc_snli, dev_cost_snli = evaluate_classifier(self.classify, dev_snli, self.batch_size)
                test_out = evaluate_classifier(self.classify, test_snli, FIXED_PARAMETERS["batch_size"])[0]
                logger.Log("Model restored=%s" % restored_model + "_best")
                logger.Log("Acc on SNLI test-set: %s" % (test_out))
                orig_test_score = test_out
                logger.Log("Acc on SNLI test-set: %f" % ORIG_TEST_SCORE)
                
        training_data = train_snli
        ### Training cycle
        if FIXED_PARAMETERS["no_train"]: return 0.0
        ### Training cycle
        logger.Log("Training...")

        while True:
            random.shuffle(training_data)
            avg_cost = 0.
            total_batch = int(len(training_data) / self.batch_size)

            # Loop over all batches in epoch
            for i in range(total_batch):
                # Assemble a minibatch of the next B examples
                minibatch_premise_vectors, minibatch_hypothesis_vectors, minibatch_labels, minibatch_genres = self.get_minibatch(
                    training_data, self.batch_size * i, self.batch_size * (i + 1))

                # Run the optimizer to take a gradient step, and also fetch the value of the 
                # cost function for logging
                feed_dict = {self.model.premise_x: minibatch_premise_vectors,
                                self.model.hypothesis_x: minibatch_hypothesis_vectors,
                                self.model.y: minibatch_labels, 
                                self.model.keep_rate_ph: self.keep_rate}
                _, c = self.sess.run([self.optimizer, self.model.total_cost], feed_dict)

                # Since a single epoch can take a  ages for larger models (ESIM),
                #  we'll print accuracy every 50 steps
                if self.step % self.display_step_freq == 0:
                    
                    #dev_acc_mat, dev_cost_mat = evaluate_classifier(self.classify, dev_mat, self.batch_size)
                    #dev_acc_mismat, dev_cost_mismat = evaluate_classifier(self.classify, dev_mismat, self.batch_size)
                    dev_acc_snli, dev_cost_snli = evaluate_classifier(self.classify, dev_snli, self.batch_size)
                    strain_acc, strain_cost = evaluate_classifier(self.classify, train_snli[0:5000], self.batch_size)

                    logger.Log("Step: %i\t Dev-matched acc: %f\t Dev-mismatched acc: %f\t Dev-SNLI acc: %f\t SNLI train acc: %f" %(self.step, 0.0, 0.0, dev_acc_snli, strain_acc))
                    logger.Log("Step: %i\t Dev-matched cost: %f\t Dev-mismatched cost: %f\t Dev-SNLI cost: %f\t SNLI train cost: %f" %(self.step, 0.0, 0.0, dev_cost_snli, strain_cost))

                if self.step % 500 == 0:
                    self.saver.save(self.sess, ckpt_file)
                    best_test = 100 * (1 - self.best_dev_snli / dev_acc_snli)
                    if best_test > 0.04:
                        self.saver.save(self.sess, ckpt_file + "_best")
                        self.best_dev_snli = dev_acc_snli
                        self.best_strain_acc = strain_acc
                        self.best_step = self.step
                        logger.Log("Checkpointing with new best SNLI-dev accuracy: %f" %(self.best_dev_snli))

                self.step += 1

                # Compute average loss
                avg_cost += c / (total_batch * self.batch_size)
                                
            # Display some statistics about the epoch
            if self.epoch % self.display_epoch_freq == 0:
                logger.Log("Epoch: %i\t Avg. Cost: %f" %(self.epoch+1, avg_cost))
            
            self.epoch += 1 
            self.last_train_acc[(self.epoch % 5) - 1] = strain_acc

            #break (debugging)
            #if self.epoch >= 3:
            #    break 

            # Early stopping
            progress = 1000 * (sum(self.last_train_acc)/(5 * min(self.last_train_acc)) - 1) 

            if (progress < 0.1) or (self.step > self.best_step + 30000):
                logger.Log("Best snli-dev accuracy: %s" % str(self.best_dev_snli))
                #logger.Log("MultiNLI Train accuracy: %s" %(self.best_strain_acc))
                self.completed = True
                break

        ## return best dev performance 
        return self.best_dev_snli,orig_test_score

    def restore(self, best=True):
        if True:
            path = os.path.join(FIXED_PARAMETERS["ckpt_path"], modname) + ".ckpt_best"
        else:
            path = os.path.join(FIXED_PARAMETERS["ckpt_path"], modname) + ".ckpt"

        try: 
            self.sess = tf.Session()
            self.sess.run(self.init)
            self.saver.restore(self.sess, path)
            logger.Log("Model restored from file: %s" % path)
        except ValueError:
            logger.Log('WARNING: No best checkpoint found, using last one..')

    def restore_model(self):
        self.sess = tf.Session()
        self.sess.run(self.init)
        #self.saver.restore(self.sess, path)

        restored_model = FIXED_PARAMETERS["continue_training"] + ".ckpt" if ".ckpt" not in FIXED_PARAMETERS["continue_training"] else \
          FIXED_PARAMETERS["continue_training"]

        #restored_model = 
        self.saver.restore(self.sess, (restored_model + "_best"))
        #self.saver.restore(self.sess, (restored_model))

        test_out = evaluate_classifier(self.classify, test_snli, FIXED_PARAMETERS["batch_size"])[0]
        logger.Log("Model restored=%s" % restored_model + "_best")
        logger.Log("Acc on SNLI test-set: %s" % (test_out))

        test_out,total = evaluate_full(self.classify_out,
                                       test_snli,
                                       FIXED_PARAMETERS["batch_size"],
                                       name='test',ofile=None)

        best_dev,dtotal = evaluate_full(self.classify_out,
                                       dev_snli,
                                       FIXED_PARAMETERS["batch_size"], ## not needed 
                                       name='dev',ofile=None)

        logger.Log("Best dev score: %s (tested on %d examples)" % (best_dev,dtotal))
        logger.Log("Acc on SNLI test-set: %s (tested on %d examples)" % (test_out,total))

    def classify(self, examples):
        # This classifies a list of examples
        total_batch = int(len(examples) / self.batch_size)
        logits = np.empty(3)
        genres = []
        for i in range(total_batch):
            minibatch_premise_vectors, minibatch_hypothesis_vectors, minibatch_labels, minibatch_genres = self.get_minibatch(
                examples, self.batch_size * i, self.batch_size * (i + 1))
            feed_dict = {self.model.premise_x: minibatch_premise_vectors, 
                                self.model.hypothesis_x: minibatch_hypothesis_vectors,
                                self.model.y: minibatch_labels, 
                                self.model.keep_rate_ph: 1.0}
            genres += minibatch_genres
            logit, cost = self.sess.run([self.model.logits, self.model.total_cost], feed_dict)
            logits = np.vstack([logits, logit])

        return genres, np.argmax(logits[1:], axis=1), cost

    def classify_dev(self, examples):
        # This classifies a list of examples
        batch_size = 1
        total_batch = int(len(examples) / batch_size)
        logits = np.empty(3)
        genres = []
        for i in range(total_batch):
            minibatch_premise_vectors, minibatch_hypothesis_vectors, minibatch_labels, minibatch_genres = self.get_minibatch(
                examples, self.batch_size * i, self.batch_size * (i + 1))
            feed_dict = {self.model.premise_x: minibatch_premise_vectors, 
                                self.model.hypothesis_x: minibatch_hypothesis_vectors,
                                self.model.y: minibatch_labels, 
                                self.model.keep_rate_ph: 1.0}
            genres += minibatch_genres
            logit, cost = self.sess.run([self.model.logits, self.model.total_cost], feed_dict)
            logits = np.vstack([logits, logit])

        return genres, np.argmax(logits[1:], axis=1), cost

    def classify_out(self, examples):
        # This classifies a list of examples
        batch_size = 1
        #total_batch = int(len(examples) / self.batch_size)
        total_batch = int(len(examples) / batch_size)
        logits = np.empty(3)
        #genres = []

        for i in range(total_batch):
            # minibatch_premise_vectors, minibatch_hypothesis_vectors, minibatch_labels, minibatch_genres = self.get_minibatch(
            #     examples, self.batch_size * i, self.batch_size * (i + 1))
            minibatch_premise_vectors, minibatch_hypothesis_vectors, minibatch_labels, minibatch_genres = self.get_minibatch(
                examples, batch_size * i, batch_size * (i + 1))

            feed_dict = {self.model.premise_x: minibatch_premise_vectors, 
                                self.model.hypothesis_x: minibatch_hypothesis_vectors,
                                self.model.y: minibatch_labels, 
                                self.model.keep_rate_ph: 1.0}
            #genres += minibatch_genres
            logit, cost = self.sess.run([self.model.logits, self.model.total_cost], feed_dict)
            logits = np.vstack([logits, logit])

        return logits[1:],np.argmax(logits[1:], axis=1), cost

classifier = modelClassifier(FIXED_PARAMETERS["seq_length"])

"""
Either train the model and then run it on the test-sets or 
load the best checkpoint and get accuracy on the test set. Default setting is to train the model.
"""

test = params.train_or_test()

# While test-set isn't released, use dev-sets for testing
test_matched = dev_matched
test_mismatched = dev_mismatched


if test == False:
    logger.Log('Random seed=%d' % FIXED_PARAMETERS["random_seed"])
    random.seed(FIXED_PARAMETERS["random_seed"])
    #tf.random.set_seed(FIXED_PARAMETERS["random_seed"])

    ## train 
    best_dev,orig_test_score = classifier.train(training_mnli,
                                                    training_snli,
                                                    dev_matched,
                                                    dev_mismatched,
                                                    dev_snli)

    ## restore best model 
    if not FIXED_PARAMETERS["no_train"]: classifier.restore()

    ### if test set is provided
    if test_snli: 
        test_out,total = evaluate_full(classifier.classify_out,
                                    test_snli,
                                    1,
                                    name='test',ofile=None)
    else:
        test_out = "-1"
        total = "-1"

    best_dev,dtotal = evaluate_full(classifier.classify_out,
                                 dev_snli,
                                 1,
                                 name='dev',ofile=None)

    logger.Log("Best dev score: %s (tested on %d examples)" % (best_dev,dtotal))
    logger.Log("Acc on SNLI test-set: %s (tested on %d examples)" % (test_out,total))

    ### make into json
    with open(os.path.join(FIXED_PARAMETERS["wdir"],"metrics.json"),'w') as metrics:
        json_out = {}
        outgoing = {
          "evaluation_accuracy" : test_out,
          "best_dev_accuracy"   : best_dev,
        }
        metrics.write(json.dumps(outgoing,indent=4))
        metrics.write("\n")

    if FIXED_PARAMETERS["remove_models"]:
        for out_file in os.listdir(FIXED_PARAMETERS["ckpt_path"]):
            if ".ckpt" in out_file:
                logger.Log("removing file: %s" % out_file)
                os.remove(os.path.join(FIXED_PARAMETERS["ckpt_path"],out_file))

else: 
    results = evaluate_final(classifier.restore, classifier.classify, [test_snli], FIXED_PARAMETERS["batch_size"])
    logger.Log("Acc on SNLI test set: %s" %(results[0]))
