import warnings
warnings.filterwarnings("ignore")

import tensorflow as tf
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Embedding, LSTM, concatenate, Bidirectional
from tensorflow.keras import Input
from keras.callbacks import ModelCheckpoint
from keras.models import load_model
from sklearn.metrics import roc_auc_score

model_path = '../../user_data/model_data/weights.best.hdf5'
df_train = pd.read_table("../../tcdata/oppo_breeno_round1_data/gaiic_track3_round1_train_20210228.tsv", names=['q1', 'q2', 'label']).fillna("0")
# parameters
Vocab_size = 30000
Embedding_out_size = 32
LSTM_units = 32
Epochs = 20
Batch_size = 128
Learning_rate = 0.1
Dense_units = 32

def toList(x):
    return list(map(int, x.split(" ")))


def preprocessing():
    df_train['q1'] = list(map(toList, df_train['q1'].tolist()[:]))
    df_train['q2'] = list(map(toList, df_train['q2'].tolist()[:]))
    label = df_train['label']

    df_total = pd.concat([df_train['q1'], df_train['q2']], axis=1)
    max_length = 100
    X_train, X_valid, y_train, y_valid = train_test_split(df_total, label, test_size=0.2, random_state=2020)
    train_sequence1 = pad_sequences(X_train['q1'], maxlen=max_length, padding='post')
    train_sequence2 = pad_sequences(X_train['q2'], maxlen=max_length, padding='post')
    train_label = np.asarray(y_train)
    valid_sequence1 = pad_sequences(X_valid['q1'], maxlen=max_length, padding='post')
    valid_sequence2 = pad_sequences(X_valid['q2'], maxlen=max_length, padding='post')
    valid_label = np.asarray(y_valid)
    return train_sequence1, train_sequence2, train_label, valid_sequence1, valid_sequence2, valid_label


def train(train_sequence1, train_sequence2, train_label, valid_sequence1, valid_sequence2, valid_label):
    text_input1 = Input(shape=(None, ), dtype='int32')
    embedding1 = Embedding(Vocab_size, Embedding_out_size)(text_input1)
    encoded_text1 = Bidirectional(LSTM(LSTM_units, recurrent_dropout=0.2), trainable=True)(embedding1)

    text_input2 = Input(shape=(None, ), dtype='int32')
    embedding2 = Embedding(Vocab_size, Embedding_out_size)(text_input2)
    encoded_text2 = Bidirectional(LSTM(LSTM_units, recurrent_dropout=0.2), trainable=True)(embedding2)
    concatenated = concatenate([encoded_text1, encoded_text2], axis=-1)

    output = Dense(Dense_units, activation='relu')(concatenated)
    output = Dense(1, activation='sigmoid')(output)

    checkpoint = ModelCheckpoint(model_path, monitor='accuracy', verbose=1, save_weights_only=True, save_best_only=True,
                                 mode='max', save_freq='epoch')
    callbacks_list = [checkpoint]

    model = Model([text_input1, text_input2], output)
    model.compile(optimizer=tf.keras.optimizers.SGD(lr=Learning_rate, momentum=0.9, nesterov=True),
                  loss='binary_crossentropy', metrics='accuracy')

    hist = model.fit([train_sequence1, train_sequence2], train_label, epochs=Epochs, batch_size=Batch_size, callbacks=callbacks_list)

    vl_op = model.predict([valid_sequence1, valid_sequence2])
    vl_op = roc_auc_score(np.array(valid_label, dtype=np.float64), np.array(vl_op, dtype=np.float64))
    return vl_op


def run():
    train_sequence1, train_sequence2, train_label, valid_sequence1, valid_sequence2, valid_label = preprocessing()
    vl_op = train(train_sequence1, train_sequence2, train_label, valid_sequence1, valid_sequence2, valid_label)
    print('valid set AUC score:', vl_op)


if __name__ == '__main__':
    run()


