from py._path.svnwc import cache

__author__ = 'Administrator'
import lasagne
from lasagne.updates import *
from lasagne.layers import *
import numpy
import theano
import theano.tensor as T
from lasagne.regularization import regularize_layer_params_weighted, l2, l1
from Sentence import Sentence, Datasets
from time import *
from progressbar import *
from Optimizers import *
from sklearn.metrics import *
from sklearn.cross_validation import *


def build_model(input_var=None,
                input_avg=None,
                vector_length=300,
                num_classes=10,
                windows=None,
                num_filters=25,
                lamada=1e-6,
                num_units=10,
                k=2):
    if windows is None:
        windows = [1, 2, 3, 4, 5]

    layers_l2_penalty = dict()
    l_inp = InputLayer(
        (1, 1, None, vector_length), input_var=input_var
    )
    # We can retrieve symbolic references to the input variable's shape, which
    # we will later use in reshape layers.
    batchsize, _, seqlen, _ = l_inp.input_var.shape
    conv_layers = list()
    parameters = list()

    # solve the avg operation
    l_inp_avg = InputLayer(
        shape=(1, vector_length),
        input_var=input_avg,
    )
    # solve the conv operation
    for window in windows:
        conv_layer = lasagne.layers.Conv2DLayer(
            incoming=l_inp,
            num_filters=num_filters,
            filter_size=(window, vector_length),
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.GlorotUniform()
        )
        # add all conv parameters
        parameters += conv_layer.get_params()
        layers_l2_penalty[conv_layer] = lamada
        # start to pooling operation
        reshape_layer = lasagne.layers.ReshapeLayer(conv_layer, shape=(num_filters, seqlen - window + 1))
        predicted = lasagne.layers.get_output(reshape_layer, inputs=input_var)
        # get the convulution result sorted
        max_index = T.argsort(predicted).T

        # operation for max k pooling
        for i in xrange(1, k + 1):
            input_to_new = predicted[T.arange(num_filters), max_index[-i]].reshape((1, num_filters))
            new_input_layer = lasagne.layers.InputLayer(shape=(1, num_filters), input_var=input_to_new)
            conv_layers.append(new_input_layer)

    # add the avg layer to the result
    conv_layers.append(l_inp_avg)
    conv_combine = ConcatLayer(conv_layers, axis=1)

    print "conv output shape is -----> ", conv_combine.output_shape
    l_hidden_1 = lasagne.layers.DenseLayer(
        incoming=conv_combine,
        num_units=num_units,
        nonlinearity=lasagne.nonlinearities.tanh,
        W=lasagne.init.GlorotUniform(),
    )
    # add the parameters and penalty
    layers_l2_penalty[l_hidden_1] = lamada
    parameters += l_hidden_1.get_params()

    l_out = lasagne.layers.DenseLayer(
        incoming=l_hidden_1,
        num_units=num_classes,
        nonlinearity=lasagne.nonlinearities.softmax,
        W=lasagne.init.GlorotUniform(),
    )
    # add the parameters and penalty
    layers_l2_penalty[l_out] = lamada
    parameters += l_out.get_params()

    layers_penalty_loss = regularize_layer_params_weighted(layers_l2_penalty, l2) + \
                          regularize_layer_params_weighted(layers_l2_penalty, l1)

    print "output shape is ----->", l_out.output_shape
    print "parameters is -----> ", parameters
    return l_out, parameters, layers_penalty_loss


def main(
        vector_length=300,
        iteration_times=50,
        num_units=10,
        num_filters=10,
):
    train_path = ".//data//trec_train.txt"
    test_path = ".//data//trec_test.txt"

    datasets = Datasets()
    train = datasets.get_data_from_file(padding=3, filename=train_path)
    test_data = datasets.get_data_from_file(padding=3, filename=test_path)
    train_data, val_data = train_test_split(train, test_size=0.15)

    # define the inputs
    input_x = T.tensor4('input')
    input_avg = T.matrix('input_avg')
    input_y = T.ivector('output')

    print "build the output layer"
    output_layer, all_params, layers_penalty_loss = build_model(
        input_var=input_x,
        input_avg=input_avg,
        vector_length=vector_length,
        num_filters=num_filters,
        num_units=num_units
    )

    # predicted = lasagne.layers.get_output(output_layer, inputs=input_x)
    train_predicted = lasagne.layers.get_output(output_layer, deterministic=False)
    # calculate the train loss with penalty loss
    loss_train = lasagne.objectives.categorical_crossentropy(train_predicted, input_y).mean() \
                 + layers_penalty_loss

    # get the predicted label of test model---only in train model
    test_pred = T.argmax(
        lasagne.layers.get_output(output_layer, deterministic=True), axis=1
    )

    # update the parameters with adadelta method
    updates = sgd_updates_adadelta(all_params, loss_train)
    print "build the train model"
    train_model = theano.function(
        inputs=[input_x, input_avg, input_y],
        outputs=loss_train,
        updates=updates
    )
    print "build the test model"
    test_model = theano.function(
        inputs=[input_x, input_avg, input_y],
        outputs=[test_pred, input_y],
    )

    openfile_rep = open(
        "max_3_pooling_result//" + "hidden=" + str(num_units) + "_" + "filters=" + str(num_filters) + "_rep_result.txt",
        'w')
    openfile_acc = open(
        "max_3_pooling_result//" + "hidden=" + str(num_units) + "_" + "filters=" + str(num_filters) + "_acc_result.txt",
        'w')
    for it in xrange(iteration_times):
        # train the model
        train_the_model(
            train_data=train_data,
            train_model=train_model,
            it=it,
            datasets=datasets,
            vector_length=vector_length,
        )
        # validation the model
        rep, acc = val_or_test_the_model(
            val_or_test_data=val_data,
            test_model=test_model,
            datasets=datasets,
            it=it,
            vector_length=vector_length,
            type=str('----validation----')
        )
        openfile_rep.write("validation in it " + str(it + 1) + "\n")
        openfile_rep.write(str(rep) + "\n")
        openfile_acc.write("validation in it " + str(it + 1) + "\n")
        openfile_acc.write(str(acc) + "\n")
        # test the model
        rep, acc = val_or_test_the_model(
            val_or_test_data=test_data,
            test_model=test_model,
            datasets=datasets,
            it=it,
            vector_length=vector_length,
            type=str('----test----')
        )
        openfile_rep.write("test in it " + str(it + 1) + "\n")
        openfile_rep.write(str(rep) + "\n")
        openfile_rep.write("**************************************************" + "\n")
        openfile_acc.write("test in it " + str(it + 1) + "\n")
        openfile_acc.write(str(acc) + "\n")
        openfile_acc.write("**************************************************" + "\n")
    openfile_rep.close()
    openfile_acc.close()


def train_the_model(train_data, train_model, datasets, vector_length, it):
    time.sleep(0.5)
    train_index = 0
    perm = numpy.random.permutation(len(train_data))
    print "train the data, and It is in iterator time ", it + 1
    time.sleep(0.5)
    widgets = [Bar('>'), ' ', ETA(), ' ', ReverseBar('<')]
    pbar = ProgressBar(widgets=widgets, maxval=len(train_data)).start()
    for index in xrange(len(train_data)):
        sentence = train_data[perm[index]]
        sentence_length = len(sentence.words_idx)
        input_to_all = datasets.preprocessing.WORDS[sentence.words_idx]
        input_word_embedding = input_to_all.reshape(
            (1, 1, sentence_length, vector_length))
        # remember to take the average of the data
        input_word_average = input_to_all.sum(axis=0).reshape(1, vector_length) / (len(sentence.words_idx))
        train_model(input_word_embedding, input_word_average, sentence.label)
        train_index += 1
        pbar.update(train_index)
    pbar.finish()


def val_or_test_the_model(val_or_test_data, test_model, datasets, it, vector_length, type=str()):
    predicted_result = list()
    labeled_result = list()
    val_or_test_index = 0
    time.sleep(0.5)
    print type + " the model, and It is in iterator time ", it + 1
    time.sleep(0.5)
    widgets = [Bar('>'), ' ', ETA(), ' ', ReverseBar('<')]
    pbar = ProgressBar(widgets=widgets, maxval=len(val_or_test_data)).start()
    for sentence in val_or_test_data:
        sentence_length = len(sentence.words_idx)
        input_to_all = datasets.preprocessing.WORDS[sentence.words_idx]
        input_word_embedding = input_to_all.reshape(
            (1, 1, sentence_length, vector_length))
        input_word_average = input_to_all.sum(axis=0).reshape(1, vector_length) / (len(sentence.words_idx))
        predicted, labeled = test_model(input_word_embedding, input_word_average, sentence.label)
        predicted_result.append(predicted[0])
        labeled_result.append(labeled[0])
        val_or_test_index += 1
        pbar.update(val_or_test_index)
    pbar.finish()
    time.sleep(0.5)
    report = classification_report(numpy.array(labeled_result),
                                   numpy.array(predicted_result),
    )
    acc = accuracy_score(numpy.array(labeled_result), numpy.array(predicted_result))
    print report
    print type, "score is ", acc
    time.sleep(0.5)
    return report, acc


if __name__ == '__main__':

    vector_length = 300
    number_hidden_units = [20]
    number_filters = [150, 150]

    for units in number_hidden_units:
        for filters in number_filters:
            main(
                vector_length=300,
                iteration_times=30,
                num_filters=filters,
                num_units=units
            )