'''Trains a LSTM on the IMDB sentiment classification task.
The dataset is actually too small for LSTM to be of any advantage
compared to simpler, much faster methods such as TF-IDF + LogReg.
Notes:

- RNNs are tricky. Choice of batch size is important,
choice of loss and optimizer is critical, etc.
Some configurations won't converge.

- LSTM loss decrease patterns during training can be quite different
from what you see with CNNs/MLPs/etc.
'''


import lmdb
import caffe
import random
import numpy as np
np.random.seed(1337)  # for reproducibility

import keras
from keras.preprocessing import sequence
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Embedding
from keras.layers import LSTM, SimpleRNN, GRU
from keras.datasets import imdb


def load_data(lmdb_path):
    img_lmdb = lmdb.open(lmdb_path)
    txn = img_lmdb.begin()
    cursor = txn.cursor()
    datum = caffe.proto.caffe_pb2.Datum()
    num = 0
    num2 = 0
    features = []
    labels = []
    for (idx, (key, value)) in enumerate(cursor):
        datum.ParseFromString(value)
        flat_x = np.fromstring(datum.data, dtype=np.float64)
        # print(key)
        x = flat_x.reshape(datum.channels, datum.height, datum.width)
        y = datum.label
        num += 1

        # print(idx, key, x.shape, y)
        # if y == 1:
        #     num2 += 1
        features.append(x)
        labels.append(y)
    res_features = np.zeros((num, 18, 64))
    res_labels = np.zeros(num)
    index = list(range(num))
    random.shuffle(index)
    for i in range(num):
        res_features[i, :, :] = features[index[i]]
        res_labels[i] = labels[index[i]]
    print(num)
    return res_features, res_labels

def assessment(predict_label, ground_truth):
    # print(predict_label.shape, ground_truth.shape)
    tp = 0
    real_p = 0
    pre_p = 0
    # print(predict_label)
    for i in range(predict_label.shape[0]):
        if predict_label[i] == 1:
            pre_p += 1
        if ground_truth[i] == 1:
            real_p += 1
        if predict_label[i] == 1 and ground_truth[i] == 1:
            tp += 1
    print(tp, pre_p, real_p)
    return tp/(pre_p+1e-6), tp/(real_p+1e-6)


def main():
    # print('Loading data...')
    # (X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features)
    # print(len(X_train), 'train sequences')
    # print(len(X_test), 'test sequences')
    #
    # print('Pad sequences (samples x time)')
    # X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
    # X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
    # print('X_train shape:', X_train.shape)
    # print('X_test shape:', X_test.shape)
    train_lmdb_path = "/usr/data/MedlatTrainingData/lmdb/subints_train_lmdb"
    train_features, train_labels = load_data(train_lmdb_path)
    print(train_features.shape, train_labels.shape)

    test_lmdb_path = "/usr/data/MedlatTrainingData/lmdb/subints_test_lmdb"
    test_features, test_labels = load_data(test_lmdb_path)
    print(test_features.shape, test_labels.shape)

    out_dims = [32, 64, 128, 256]
    nb_epochs = [1, 5, 10, 30, 50, 100]
    batch_sizes = [32, 64, 128, 256]
    lrs = [0.1, 0.01, 0.001, 0.0001]
    momentums = [0.9, 0.99]
    paras = list()
    for o in range(len(out_dims)):
        for n in range(len(nb_epochs)):
            for b in range(len(batch_sizes)):
                for l in range(len(lrs)):
                    for m in range(len(momentums)):
                        temp = [0]*5
                        temp[0] = out_dims[o]
                        temp[1] = nb_epochs[n]
                        temp[2] = batch_sizes[b]
                        temp[3] = lrs[l]
                        temp[4] = momentums[m]
                        paras.append(temp)


    print(len(paras), paras)

    for para in paras:
        out_dim = para[0]
        nb_epoch = para[1]
        batch_size = para[2]
        lr = para[3]
        momentum = para[4]

        print('Build model...')
        model = Sequential()
        model.add(LSTM(out_dim, input_dim=64, input_length=18, dropout_W=0.2, dropout_U=0.2, consume_less="mem"))  # try using a GRU instead, for fun
        model.add(Dense(1))
        model.add(Activation('sigmoid'))

        # try using different optimizers and different optimizer configs
        # optimizer_ins = keras.optimizers.Adam(lr=0.01)
        optimizer_ins = keras.optimizers.SGD(lr=lr, momentum=momentum, decay=0.00, nesterov=False)
        model.compile(loss='binary_crossentropy',
                  optimizer=optimizer_ins,
                  metrics=['accuracy'])

        model.summary()

        print('Train...')
        model.fit(train_features, train_labels, batch_size=batch_size, nb_epoch=nb_epoch,
              validation_data=(test_features, test_labels))
        predict_labels = model.predict_classes(test_features)
        precision, recall = assessment(predict_labels, test_labels)
        fscore = 2*precision*recall/(precision+recall+1e-6)
        print("classification result: ", precision, recall, fscore)
        with open("/home/dataology/workspace/caffe/CaffeProject/pulsar"
                  "/keras_project/htru1_subints/tmp/result.txt", "a") as f:
            f.write("out_dim:%d nb_epoch:%d batch_size:%d lr:%f momentum:%f\n" %
                    (out_dim, nb_epoch, batch_size, lr, momentum))
            f.write("   precision:%f recall:%f fscore:%f\n" %
                    (precision, recall, fscore))


    # score, acc = model.evaluate(test_features, test_labels,
    #                             batch_size=batch_size)
    # print('Test score:', score)
    # print('Test accuracy:', acc)

if __name__ == "__main__":
    main()