# language model
import numpy
import theano
import theano.tensor as T
from models import LookupTable, LogisticRegression, GRU, LSTM, Dropout, HiddenLayer, Parser
from algorithm import adadelta
from stream import DStream
import argparse
import logging
import os
import pprint
import numpy as np
import math
import heapq as pq
import cPickle as pkl
import sys
from fuel.datasets import TextFile
from fuel.streams import DataStream
from fuel.transformers import Batch
from fuel.schemes import ConstantScheme

logger = logging.getLogger(__name__)

# Get the arguments
parser = argparse.ArgumentParser()
parser.add_argument("--proto",  default="get_config_lm_gru",
                     help="Prototype config to use for config")
args = parser.parse_args()


def evaluate(test_fn, word_stream, pos_stream, nterm_stream, op_stream):
    sums = 0
    case = 0
    y_res = []
    y_pred_res = []
    for word, pos, nterm, op in zip(word_stream.get_epoch_iterator(), pos_stream.get_epoch_iterator(), nterm_stream.get_epoch_iterator(), op_stream.get_epoch_iterator()):
        y, y_pred = test_fn(word[0], pos[0], nterm[0], op[0].T, 0)
        y_res.append(y)
        y_pred_res.append(y_pred)
    return y_res, y_pred_res

def error(y, y_pred):
    error_score = 0.0
    count = 0.0
    for y_it, y_pred_it in zip(y, y_pred):
        error_score += np.sum(y_it == y_pred_it)
        count += len(y_it)
    return error_score/count

if __name__=='__main__':
    import configurations
    configuration = getattr(configurations, args.proto)()
    logger.info("\nModel options:\n{}".format(pprint.pformat(configuration)))

    use_noise = T.iscalar()

    word = T.lmatrix()

    pos = T.lmatrix()

    nterm = T.lmatrix()

    op = T.lmatrix()

    parser = Parser(**configuration)

    parser.apply(word, pos, nterm, op, use_noise)

    if configuration['reload']:
        parser.load()
    else:
        parser.save()

    cost_mean = parser.cost/word.shape[0]
    cost_sum = parser.cost
    y = parser.y
    y_pred = parser.y_pred
    softmax_y = parser.softmax_y

    params = parser.params
    params_regular = parser.L1 * 1e-6 + parser.L2 * 1e-6

    grads = T.grad(cost_mean + params_regular, params)
    updates = adadelta(params, grads)

    word_train = DStream(datatype='word', uses = 'train', config=configuration)
    word_dev = DStream(datatype='word', uses = 'dev', config=configuration)
    word_test = DStream(datatype='word', uses = 'test', config=configuration)

    pos_train = DStream(datatype='pos', uses = 'train', config=configuration)
    pos_dev = DStream(datatype='pos', uses = 'dev', config=configuration)
    pos_test = DStream(datatype='pos', uses = 'test', config=configuration)

    op_train = DStream(datatype='op', uses = 'train', config=configuration)
    op_dev = DStream(datatype='op', uses = 'dev', config=configuration)
    op_test = DStream(datatype='op', uses = 'test', config=configuration)

    nterm_train = DStream(datatype='nterm', uses = 'train', config=configuration)
    nterm_dev = DStream(datatype='nterm', uses = 'dev', config=configuration)
    nterm_test = DStream(datatype='nterm', uses = 'test', config=configuration)

    train_fn = theano.function([word, pos, nterm, op, use_noise], [cost_mean], updates=updates)

    test_fn = theano.function([word, pos, nterm, op, use_noise], [y, y_pred])

    iters = 0
    valid_ppl_best = -1
    test_ppl_best = -1
    epoch_best = -1
    iters_best = -1
    fscore_best = -1
    max_epochs = configuration['finish_after']
    #decoder


    import decoder
    y, y_pred = evaluate(test_fn, word_dev, pos_dev, nterm_dev, op_dev)
    valid_ppl = error(y, y_pred)
    y, y_pred = evaluate(test_fn, word_test, pos_test, nterm_test, op_test)
    test_ppl = error(y, y_pred)
    import os
    try:
        os.mkdir('./res')
    except OSError:
        pass
    fscore = decoder.eval('./res/' + str(iters))

    logger.info('Before training, valid_score %.4f test_score %.4f f_score %.4f' %(valid_ppl, test_ppl, fscore))


    lastmodel = ""
    for epoch in range(max_epochs):
        #for data, mask in ds.get_epoch_iterator():
        for word_item, pos_item, nterm_item, op_item in zip(word_train.get_epoch_iterator(), pos_train.get_epoch_iterator(), nterm_train.get_epoch_iterator(), op_train.get_epoch_iterator()):
            _ = train_fn(word_item[0], pos_item[0], nterm_item[0], op_item[0].T, 0)
            iters += 1
            if iters % configuration['save_freq'] == 0:
                y, y_pred = evaluate(test_fn, word_dev, pos_dev, nterm_dev, op_dev)
                valid_ppl = error(y, y_pred)
                y, y_pred = evaluate(test_fn, word_test, pos_test, nterm_test, op_test)
                test_ppl = error(y, y_pred)
                parser.save(str(iters))
                fscore = decoder.eval('./res/' + str(iters), str(iters))

                if lastmodel != "":
                    os.remove(lastmodel)
                lastmodel = str(iters) + configuration['saveto']

                logger.info('epoch %d \t updates %d valid_ppl %.4f test_ppl %.4f f_score %.4f' %(epoch, iters, valid_ppl, test_ppl, fscore))
                if fscore == -1 or fscore > fscore_best:
                    fscore_best = fscore
                    #valid_ppl_best = valid_ppl
                    #test_ppl_best = test_ppl
                    epoch_best = epoch
                    iters_best = iters
                    parser.save()
    logger.info('final result: epoch %d \t updates %d valid_ppl %.4f test_ppl %.4f'
                            %(epoch_best, iters_best, valid_ppl_best, test_ppl_best))
