# -*- coding: utf-8 -*-


from keras.layers import Input
from keras.layers import Embedding
from keras.layers import SpatialDropout1D
from keras.layers import Conv1D
from keras.layers import MaxPooling1D
from keras.layers import LSTM
from keras.layers import Dense
from keras.layers import Dropout
from keras.models import Model
from keras.layers.merge import concatenate
from keras.layers import Bidirectional
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.utils import np_utils

import os
import pandas as pd
from sklearn.model_selection import train_test_split
from lstm_model import make_tokenizer, pad_sequence, make_embedding_matrix
from util.attention import AttentionWithContext


def cnn_lstm_leak_model(app_tknz, app_embed_dim, app_embed_matrix, app_max_len,
                         brand_tknz, brand_embed_dim, brand_embed_matrix, brand_max_len,
                         leak_dim,
                         num_class):
    app_input = Input(shape=(app_max_len,))
    app_part = Embedding(len(app_tknz.word_index)+1, app_embed_dim, weights=[app_embed_matrix])(app_input)
    app_part = SpatialDropout1D(0.3)(app_part)
    # app_part = Conv1D(filters=16, kernel_size=3, padding='same', activation='relu')(app_part)
    # app_part = MaxPooling1D(pool_size=2)(app_part)
    app_part = Bidirectional(LSTM(50, return_sequences=True))(app_part)
    app_part = AttentionWithContext()(app_part)
    app_part = Dropout(0.2)(app_part)

    brand_input = Input(shape=(brand_max_len,))
    brand_part = Embedding(len(brand_tknz.word_index)+1, brand_embed_dim, weights=[brand_embed_matrix], trainable=False)(brand_input)
    brand_part = SpatialDropout1D(0.2)(brand_part)
    # brand_part = Conv1D(filters=32, kernel_size=2, padding='same', activation='relu')(brand_part)
    # brand_part = MaxPooling1D(pool_size=2)(brand_part)
    brand_part = LSTM(5, return_sequences=True)(brand_part)
    brand_part = AttentionWithContext()(brand_part)
    brand_part = Dropout(0.2)(brand_part)

    leak_input = Input(shape=(leak_dim,))
    leak_part = Dense(6, kernel_initializer='glorot_uniform', activation='sigmoid')(leak_input)

    merged = concatenate([app_part, brand_part, leak_part])

    preds = Dense(num_class, activation='softmax')(merged)

    model = Model(inputs=[app_input, brand_input, leak_input], outputs=preds)
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    # model.summary()
    return model


if __name__ == '__main__':
    appMaxLen = 174  # 174
    appEmbedDim = 50
    appTknz = make_tokenizer(os.path.join("data", "deviceid_packages_cooked.txt"),
                             os.path.join("cache", "app.tokenizer"))
    appEmbeddings = make_embedding_matrix(appTknz, appEmbedDim, os.path.join("cache", "app_cbow_50d.vec"),
                                          os.path.join("cache", "app"))

    brandMaxLen = 10
    brandEmbedDim = 5
    brandTknz = make_tokenizer(os.path.join("data", "deviceid_brand_cooked.txt"),
                               os.path.join("cache", "brand.tokenizer"))
    brandEmbeddings = make_embedding_matrix(brandTknz, brandEmbedDim, os.path.join("cache", "brand_cbow_5d.vec"),
                                            os.path.join("cache", "brand"))

    names = ["app", "brand", "label", "gender", "age"]
    df = pd.read_csv(os.path.join("data", "app_brand.train"), sep=",", names=names)
    print df.shape

    leak_df = pd.read_csv(os.path.join("data", "app_gender.train"), sep=",")
    leak_df.mean().to_pickle(os.path.join("cache", "age_mean.pkl"))
    leak_df.std().to_pickle(os.path.join("cache", "age_std.pkl"))
    leak_df = (leak_df - leak_df.mean()) / leak_df.std()
    leak_names = leak_df.columns.values
    leakDim = leak_df.shape[1]
    print leak_df.shape

    df = pd.concat([df, leak_df], axis=1)
    print df.shape

    label = df.pop("label")
    y = np_utils.to_categorical(label)

    train_X, valid_X, train_y, valid_y = train_test_split(df, y, random_state=42, test_size=0.1, stratify=y)
    
    train_input = [pad_sequence(train_X["app"], appTknz, appMaxLen),
                   pad_sequence(train_X["brand"], brandTknz, brandMaxLen),
                   train_X[leak_names]]
    valid_input = [pad_sequence(valid_X["app"], appTknz, appMaxLen),
                   pad_sequence(valid_X["brand"], brandTknz, brandMaxLen),
                   valid_X[leak_names]]

    cnnLstm = cnn_lstm_leak_model(
        appTknz, appEmbedDim, appEmbeddings, appMaxLen,
        brandTknz, brandEmbedDim, brandEmbeddings, brandMaxLen,
        leakDim,
        num_class=22
    )
    """
    train_input = [pad_sequence(train_X["app"], appTknz, appMaxLen)]
    valid_input = [pad_sequence(valid_X["app"], appTknz, appMaxLen)]

    cnnLstm = cnn_lstm_model(appTknz, appEmbedDim, appEmbeddings, appMaxLen, num_class=22)
    """
    earlyStop = EarlyStopping(monitor="val_loss", patience=3, verbose=0)
    checkPoint = ModelCheckpoint("model/cnn_lstm_attention.h5", monitor="val_loss", save_best_only=True, verbose=0)

    cnnLstm.fit(train_input, train_y, validation_data=(valid_input, valid_y), epochs=15, batch_size=64,
                callbacks=[earlyStop, checkPoint])
