# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'

# %%
import os
import keras.backend as K

from data import DATA_SET_DIR
from elmo.lm_generator import LMDataGenerator
from elmo.model import ELMo
from tqdm import tqdm

import numpy as np

import tensorflow as tf
from tensorflow import keras

import warnings
warnings.filterwarnings("ignore")

import joblib

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config = config)
keras.backend.set_session(sess)

# %%
def printnow(s):
    import time
    import datetime
    now = datetime.datetime.now()
    t = now.strftime("%m-%d-%H:%M:%S")
    print('='*40+t+'='*40)
    print(s)


# %%
debug=False

if debug:
    train_np_for_ward_ids = joblib.load('./data/datasets/train_np_for_ward_ids.jl.z')[:1000]
    train_np_back_ward_ids = joblib.load('./data/datasets/train_np_back_ward_ids.jl.z')[:1000]
    train_np_cur_ids = joblib.load('./data/datasets/train_np_cur_ids.jl.z')[:1000]

    test_np_for_ward_ids = joblib.load('./data/datasets/test_np_for_ward_ids.jl.z')[:1000]
    test_np_back_ward_ids = joblib.load('./data/datasets/test_np_back_ward_ids.jl.z')[:1000]
    test_np_cur_ids = joblib.load('./data/datasets/test_np_cur_ids.jl.z')[:1000]

else:
    train_np_for_ward_ids = joblib.load('./data/datasets/train_np_for_ward_ids.jl.z')
    train_np_back_ward_ids = joblib.load('./data/datasets/train_np_back_ward_ids.jl.z')
    train_np_cur_ids = joblib.load('./data/datasets/train_np_cur_ids.jl.z')

    test_np_for_ward_ids = joblib.load('./data/datasets/test_np_for_ward_ids.jl.z')
    test_np_back_ward_ids = joblib.load('./data/datasets/test_np_back_ward_ids.jl.z')
    test_np_cur_ids = joblib.load('./data/datasets/test_np_cur_ids.jl.z')


# %%
from sklearn.model_selection import train_test_split


# %%
train_for, valid_for = train_test_split(train_np_for_ward_ids, test_size=0.2, random_state = 547)
train_back, valid_back = train_test_split(train_np_back_ward_ids, test_size=0.2, random_state = 547)
train_cur, valid_cur = train_test_split(train_np_cur_ids, test_size=0.2, random_state = 547)

train_for, valid_for, train_back, valid_back = train_for[:, :, np.newaxis], valid_for[:, :, np.newaxis], train_back[:, :, np.newaxis], valid_back[:, :, np.newaxis]
train = [[train_cur, train_for, train_back],[]]
valid = [[valid_cur, valid_for, valid_back], []]
test_np_for_ward_ids, test_np_back_ward_ids = test_np_for_ward_ids[:,:,np.newaxis], test_np_back_ward_ids[:,:,np.newaxis]
test = [[test_np_cur_ids, test_np_for_ward_ids, test_np_back_ward_ids],[]]


print(K.tensorflow_backend._get_available_gpus())


# %%

parameters = {
    'multi_processing': True,
    'n_threads': os.cpu_count(),
    'cuDNN': True if len(K.tensorflow_backend._get_available_gpus()) else False,
    'train_dataset': 'txt/advertiser_id.train.tokens',
    'valid_dataset': 'txt/advertiser_id.valid.tokens',
    'test_dataset': 'txt/advertiser_id.test.tokens',
    'vocab': 'txt/advertiser_id.vocab',
    'vocab_size': 54837,
    'num_sampled': 1000,
    'charset_size': 262,
    'sentence_maxlen': 100,
    'token_maxlen': 50,
    'token_encoding': 'word',
    'epochs': 10,
    'patience': 2,
    'batch_size': 64,
    'clip_value': 1,
    'cell_clip': 5,
    'proj_clip': 5,
    'lr': 0.2,
    'shuffle': True,
    'n_lstm_layers': 2,
    'n_highway_layers': 2,
    'cnn_filters': [[1, 32],
                    [2, 32],
                    [3, 64],
                    [4, 128],
                    [5, 256],
                    [6, 512],
                    [7, 512]
                    ],
    'lstm_units_size': 400,
    'hidden_units_size': 200,
    'char_embedding_size': 16,
    'dropout_rate': 0.1,
    'word_dropout_rate': 0.05,
    'weight_tying': True,
}

# Set-up Generators
# train_generator = LMDataGenerator(os.path.join(DATA_SET_DIR, parameters['train_dataset']),
#                                   os.path.join(DATA_SET_DIR, parameters['vocab']),
#                                   sentence_maxlen=parameters['sentence_maxlen'],
#                                   token_maxlen=parameters['token_maxlen'],
#                                   batch_size=parameters['batch_size'],
#                                   shuffle=parameters['shuffle'],
#                                   token_encoding=parameters['token_encoding'])

# val_generator = LMDataGenerator(os.path.join(DATA_SET_DIR, parameters['valid_dataset']),
#                                 os.path.join(DATA_SET_DIR, parameters['vocab']),
#                                 sentence_maxlen=parameters['sentence_maxlen'],
#                                 token_maxlen=parameters['token_maxlen'],
#                                 batch_size=parameters['batch_size'],
#                                 shuffle=parameters['shuffle'],
#                                 token_encoding=parameters['token_encoding'])

# test_generator = LMDataGenerator(os.path.join(DATA_SET_DIR, parameters['test_dataset']),
#                                 os.path.join(DATA_SET_DIR, parameters['vocab']),
#                                 sentence_maxlen=parameters['sentence_maxlen'],
#                                 token_maxlen=parameters['token_maxlen'],
#                                 batch_size=parameters['batch_size'],
#                                 shuffle=parameters['shuffle'],
#                                 token_encoding=parameters['token_encoding'])


# Compile ELMo
print('compile')
elmo_model = ELMo(parameters)
elmo_model.compile_elmo(print_summary=True)

# Train ELMo
print('train')
elmo_model.train(train_data=train, valid_data=valid)

# Persist ELMo Bidirectional Language Model in disk
print('save')
elmo_model.save(sampled_softmax=False)


# %%
print('evaluate')
elmo_model.evaluate(test[0])


# %%
elmo_model.wrap_multi_elmo_encoder(print_summary=True, save=True)
elmo_model.load_elmo_encoder()


# %%
embs = elmo_model.get_outputs(test[0], output_type='words', state='mean')

joblib.dump(embs, './data/advertiser_id.jl.z')