#!/usr/bin/env python    
# -*- coding: utf-8 -*- 

################################################################################
#
# Copyright (c) 2017. All Rights Reserved
#
################################################################################
"""
该模块对quora 重复文档进行识别，采用lstm+attention模型；

Authors: Fan Tao (fantao@mail.ustc.edu.cn)
Date:    2017/04/04 11:34:00
"""

# from gensim.models import Word2Vec
from keras.callbacks import ModelCheckpoint
from keras.layers import Dense, Dropout, Flatten, Input, Reshape, Dot, Add
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM
from keras.models import Model
from keras.utils import plot_model
import numpy as np
import os
import json

import data_process
import word2vec_pretrain
import gcc_omcp_utils as utils

EMBED_DIM = 64
HIDDEN_DIM = 50
BATCH_SIZE = 128
NBR_EPOCHS = 10

MODEL_DIR = utils.BASE_DIR


def model(in_file, val_file):
    """ 判断文档是否重复的lstm+attention模型；
    """
    # 数据预处理阶段；
    print("data process...")
    # ques_pairs是一个元组，对应：语料，意图，是否重复，id.
    ques_pairs = data_process.parse_quora_dul_data(in_file)
    # word2idx 关键字按次数由多到少的一个dict.
    word2idx = data_process.build_vocab(ques_pairs)
    # 保存word2idx信息
    with open(os.path.join(MODEL_DIR, "word2idx.json"), "w", encoding="utf-8") as jsonfile:
        json.dump(word2idx, jsonfile, ensure_ascii=False, indent=2)
    vocab_size = len(word2idx) + 1
    seq_maxlen = data_process.get_seq_maxlen(ques_pairs)
    # 保存seq_maxlen
    with open(os.path.join(MODEL_DIR, "seq_maxlen.data"), "w", encoding="utf-8") as datafile:
        datafile.write(str(seq_maxlen))
    print("seq_maxlen="+str(seq_maxlen))
    # x_ques1train，x_ques2train 语料，意图的数字化表示，
    # ytrain，pidstrain 是否重复对应的数组和id对应的数组，
    x_ques1train, x_ques2train, ytrain, pidstrain = data_process.vectorize_ques_pair(ques_pairs, word2idx, seq_maxlen)
    # 同上，一样的代码，测试数据。
    ques_pairstest = data_process.parse_quora_dul_data(val_file)
    x_ques1test, x_ques2test, ytest, pidstest = data_process.vectorize_ques_pair_zz(ques_pairstest, word2idx, seq_maxlen)

    print(x_ques1train.shape, x_ques1test.shape, x_ques2train.shape, x_ques2test.shape, ytrain.shape, ytest.shape,
          pidstrain.shape, pidstest.shape)
    
    # 计算embeding 初始weight；
    # 利用gesim计算的词向量模型
    w2v_embedding_model = word2vec_pretrain.train_word2vec(ques_pairs,
                                                           num_features=EMBED_DIM, 
                                                           min_word_count=1, 
                                                           context=5)

    embedding_weights = np.zeros((vocab_size, EMBED_DIM))
    for word, index in word2idx.items():
        if word in w2v_embedding_model:
            embedding_weights[index, :] = w2v_embedding_model[word]
        else:
            print(word + " not exist")
            embedding_weights[index, :] = np.random.uniform(-0.25, 0.25, w2v_embedding_model.vector_size)
    
    # 建立模型；
    print("Building model...")
    input_layer_one = Input(shape=(seq_maxlen,), dtype='float32', name='input_layer_one')
    input_one = Embedding(output_dim=EMBED_DIM, input_dim=vocab_size, input_length=seq_maxlen,
                          weights=[embedding_weights])(input_layer_one)
    input_one = LSTM(HIDDEN_DIM, return_sequences=True)(input_one)
    input_one = Dropout(0.3)(input_one)

    input_layer_two = Input(shape=(seq_maxlen,), dtype='float32', name='input_layer_two')
    input_two = Embedding(output_dim=EMBED_DIM, input_dim=vocab_size, input_length=seq_maxlen,
                          weights=[embedding_weights])(input_layer_two)
    input_two = LSTM(HIDDEN_DIM, return_sequences=True)(input_two)
    input_two = Dropout(0.3)(input_two)

    attn_layer = Dot(axes=[1, 1])([input_one, input_two])
    attn_layer = Flatten()(attn_layer)
    attn_layer = Dense((seq_maxlen * HIDDEN_DIM))(attn_layer)
    attn_layer = Reshape((seq_maxlen, HIDDEN_DIM))(attn_layer)

    main_out = Add()([input_one, attn_layer])
    main_out = Flatten()(main_out)
    main_out = Dense(2, activation="softmax")(main_out)

    model = Model(inputs=[input_layer_one, input_layer_two], outputs=[main_out])

    # 编译模型,对学习过程进行配置
    # 这里主要是设置三个参数：优化器optimizer，损失函数loss和指标列表metrics
    model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
    plot_model(model, to_file=os.path.join(MODEL_DIR, "modle.png"))
    
    print("Training...")
    # verbose：信息展示模式，0或1(checkpoint的保存信息，类似Epoch 00001: saving model to ...)
    # save_best_only：当设置为True时，监测值有改进时才会保存当前的模型
    checkpoint = ModelCheckpoint(filepath=os.path.join(MODEL_DIR, "quora_dul_best_lstm_atten.hdf5"), verbose=1,
                                 save_best_only=True)
    # fit 训练模型
    # X：训练数据 y : 标签 batch_size : 每次训练和梯度更新块的大小 nb_epoch: 迭代次数(训练轮数)
    # verbose : 进度表示方式。0表示不显示数据，1表示显示进度条，2表示用只显示一个数据
    # validation_split: 验证数据的使用比例 callbacks : 回调函数列表。就是函数执行完后自动调用的函数列表
    model.fit([x_ques1train, x_ques2train], ytrain, batch_size=BATCH_SIZE, epochs=NBR_EPOCHS, validation_split=0.2,
              verbose=2, callbacks=[checkpoint])

    # predict
    print("predict...")
    # 用于对测试数据的分类预测，对于测试数据的预测分类结果数组
    y_test_pred = model.predict([x_ques1test, x_ques2test], batch_size=BATCH_SIZE)
    data_process.pred_save(os.path.join(MODEL_DIR, "y_test.pred"), y_test_pred, ytest, pidstest)
    
    print("Evaluation...")
    # 对模型进行评估,验证
    loss, acc = model.evaluate([x_ques1test, x_ques2test], ytest, batch_size=BATCH_SIZE)
    print("Test1 loss/accuracy final model = %.4f, %.4f" % (loss, acc))
    
    model.save_weights(os.path.join(MODEL_DIR, "quora_dul_lstm_atten-final.hdf5"))
    with open(os.path.join(MODEL_DIR, "quora_dul_lstm.json"), "w") as fjson:
        fjson.write(model.to_json())
    
    model.load_weights(filepath=os.path.join(MODEL_DIR, "quora_dul_lstm_atten-final.hdf5"))
    loss, acc = model.evaluate([x_ques1test, x_ques2test], ytest, batch_size=BATCH_SIZE)
    print("Test2 loss/accuracy best model = %.4f, %.4f" % (loss, acc))

    print("save modle.......")
    model.save(MODEL_DIR+'gcc-omcp-model.h5')


if __name__ == '__main__':
    model(os.path.join(MODEL_DIR, "95511_training.txt"), os.path.join(MODEL_DIR, "95511_test.txt"))
    # model(os.path.join(MODEL_DIR, "PH_sample"), os.path.join(MODEL_DIR, "PH_sample"))
