# coding: utf-8

import tensorflow as tf
import numpy as np

import os
import sys
import h5py

sys.path.append('../bilm_elmo_with_word2vec/')
from bilm import TokenBatcher, BidirectionalLanguageModel, weight_layers, dump_token_embeddings




class TextCNN(object):
    """文本分类，CNN模型"""
    def __init__(self, config):  # 三个待输入的数据
        self.input_x = tf.placeholder(tf.int32, [None, None], name='input_x')
        self.input_y = tf.placeholder(tf.float32, [None, config.num_classes], name='input_y')
        self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
        l2_loss = tf.constant(0.0)  # 先不用，写0

        """CNN模型"""
        with tf.variable_scope(''):
            print("使用预训练的 ELMo 词向量...")

            prefix = '../150wLAIYE_train_ELMo_word_data/'  # 相对路径
            vocab_file = prefix + '/laiye_travel_word_300d_5min_5window_word2vec_vocab'  # 词汇表
            options_file = prefix + '/options.json'  
            weight_file = prefix + '/weights.hdf5'
            token_embedding_file = prefix + '/final_laiye_vocab_embedding.hdf5'

            # Build the biLM graph.
            bilm = BidirectionalLanguageModel(
                    options_file,
                    weight_file,
                    use_character_inputs=False,
                    embedding_weight_file=token_embedding_file)

            context_embeddings_op = bilm(self.input_x)
            
            elmo_context_input = weight_layers('input', context_embeddings_op, l2_coef=0.0)
            self.embedded_chars = elmo_context_input['weighted_op']  # 相当于 tf.nn.embedding_lookup()
            # print('加载elmo词向量后的形状：', self.embedded_chars.shape)
            self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)  # 加一个维度

        pooled_outputs = []
        for i, filter_size in enumerate(config.filter_sizes):  # filter_sizes卷积核尺寸
            with tf.variable_scope("Conv-Maxpool-%s" % filter_size):
                # W = tf.get_variable(name='W1', shape=[filter_size, config.embedding_dim, 1, config.num_filters])
                # b = tf.get_variable(name='b1', shape=[config.num_filters])
                filter_shape = [filter_size, 2*config.embedding_dim, 1, config.num_filters]  # 谷歌大佬的方法
                W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W1")  # W进行高斯初始化
                b = tf.Variable(tf.constant(0.1, shape=[config.num_filters]), name="b1")  # b给初始化为一个常量

                conv = tf.nn.conv2d(
                    self.embedded_chars_expanded,
                    W,
                    strides=[1, 1, 1, 1],  # 步长
                    padding="VALID",
                    name="conv")

                h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")  # 激励函数加快学习进度
                # print(h.shape)
                pooled = tf.nn.max_pool(
                    h,
                    ksize=[1, config.seq_length - filter_size + 1, 1, 1],  # 过滤器大小
                    strides=[1, 1, 1, 1],   # 步长
                    padding='VALID',
                    name="max-pool")
                pooled_outputs.append(pooled)

        num_filters_total = config.num_filters * len(config.filter_sizes)  # 128x3
        self.h_pool = tf.concat(pooled_outputs, 3)  # 拼接
        self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])  # 扁平化数据，跟全连接层相连

        with tf.variable_scope("dropout"):  # conv max-pool dense dropout relu dense softmax argmax loss 
            self.h_drop = tf.nn.dropout(self.h_pool_flat, self.keep_prob)

        with tf.variable_scope("output"):
            # W2 = tf.get_variable("W2", shape=[num_filters_total, config.num_classes])
            # b2 = tf.get_variable("b2", shape=[config.num_classes])
            W2 = tf.get_variable(name="W2", shape=[num_filters_total, config.num_classes],
                                 initializer=tf.contrib.layers.xavier_initializer())  # 定义初始化方式
            b2 = tf.Variable(tf.constant(0.1, shape=[config.num_classes]), name="b2")

            # 损失函数导入
            l2_loss += tf.nn.l2_loss(W2)
            l2_loss += tf.nn.l2_loss(b2)

            self.scores = tf.nn.xw_plus_b(self.h_drop, W2, b2, name="scores")  # 得分函数，每个类别的概率
            self.probability = tf.nn.softmax(self.scores)  # 概率分布归一化

            self.predictions = tf.argmax(self.scores, 1, name="predictions")  # 预测结果，返回最大的那个数值所在的下标

        with tf.variable_scope("loss"):
            losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)
            self.loss = tf.reduce_mean(losses) + config.l2_reg_lambda * l2_loss

        with tf.variable_scope("accuracy"):
            correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))  # 返回值 bool 类型
            self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")  # bool转成float


class BiLSTM_Att(object):
    def __init__(self, config):
        # 三个待输入的数据
        self.input_x = tf.placeholder(tf.int32, [None, None], name='input_x')
        self.input_y = tf.placeholder(tf.float32, [None, config.num_classes], name='input_y')
        self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')

        # self.x_length = tf.reduce_sum(tf.sign(self.input_x), 1)
        # self.x_length = tf.cast(self.x_length, tf.int32)

        # with tf.name_scope('word_embedding'):
        #     if not config.is_pre_train:
        #         print("使用随机的词向量...")
        #         # self.W2V = tf.get_variable(name='W2V', shape=[config.vocab_size, config.embedding_dim])
        #         self.W2V = tf.Variable(tf.random_uniform([config.vocab_size, config.embedding_dim], -1.0, 1.0), name="W2V")
        #     else:
        #         print("使用预训练的词向量...")
        #         word_embedding = np.load(config.word2vec_npy)  # 加载词向量npy文件
        #         self.W2V = tf.get_variable(name='W2V', initializer=word_embedding, trainable=config.is_trainable)
        #     embedding_inputs = tf.nn.embedding_lookup(self.W2V, self.input_x)

        with tf.variable_scope(''):
            print("使用预训练的 ELMo 词向量...")

            prefix = '../6kwQA_train_ELMo_word_data/'  # 相对路径
            vocab_file = prefix + '/QA_clean_all_title_seg_txt_300d_min5_word2vec_vocab'  # 词汇表
            options_file = prefix + '/options.json'  
            weight_file = prefix + '/weights.hdf5'
            token_embedding_file = prefix + '/final_6kwQA_vocab_embedding.hdf5'

            # Build the biLM graph.
            bilm = BidirectionalLanguageModel(
                    options_file,
                    weight_file,
                    use_character_inputs=False,
                    embedding_weight_file=token_embedding_file)

            context_embeddings_op = bilm(self.input_x)
             
            elmo_context_input = weight_layers('input', context_embeddings_op, l2_coef=0.0)
            embedding_inputs = elmo_context_input['weighted_op']  # 相当于 tf.nn.embedding_lookup()
            
        with tf.name_scope('BiLSTM'):
            def unit_lstm():  # tf.contrib.rnn == tf.nn.rnn_cell
                lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(config.rnn_hidden_size)  # tf.nn.rnn_cell.LSTMCell
                lstm_cell = tf.nn.rnn_cell.DropoutWrapper(lstm_cell, output_keep_prob=self.keep_prob)
                return lstm_cell

            lstm_cell_fw = tf.nn.rnn_cell.MultiRNNCell([unit_lstm() for _ in range(config.rnn_num_layers)])
            lstm_cell_bw = tf.nn.rnn_cell.MultiRNNCell([unit_lstm() for _ in range(config.rnn_num_layers)])

            initial_state_fw = lstm_cell_fw.zero_state(tf.shape(self.input_y)[0], tf.float32)
            initial_state_bw = lstm_cell_bw.zero_state(tf.shape(self.input_y)[0], tf.float32)

            (output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(
                lstm_cell_fw, lstm_cell_bw, embedding_inputs, initial_state_fw=initial_state_fw, initial_state_bw=initial_state_bw, dtype=tf.float32)
            bilstm_output = tf.concat([output_fw, output_bw], 2)  # rnn_output = tf.concat([output_fw, output_bw], 2)


        with tf.variable_scope('Attention'):
            att_w = tf.get_variable(shape=[2*config.rnn_hidden_size, config.attention_size], name='att_w')
            att_b = tf.get_variable(shape=[config.attention_size], name='att_b')
            att_u = tf.get_variable(shape=[config.attention_size, 1], name='att_u')

            logits = tf.tanh(tf.nn.xw_plus_b(tf.reshape(bilstm_output, [-1, 2*config.rnn_hidden_size]), att_w, att_b))
            logits = tf.matmul(logits, att_u)
            alpha = tf.nn.softmax(tf.reshape(logits, [-1, config.seq_length]), 1)
            att_output = tf.reduce_sum(tf.multiply(bilstm_output, tf.reshape(alpha, [-1, config.seq_length, 1])), 1)

        with tf.variable_scope("dropout"):
            self.h_drop = tf.nn.dropout(att_output, self.keep_prob)  # 加 Attention

        with tf.variable_scope("output"):
            self.scores = tf.layers.dense(self.h_drop, config.num_classes, name="scores")
            self.probability = tf.nn.softmax(self.scores)  # 概率分布归一化
            self.predictions = tf.argmax(self.scores, 1, name="predictions")  # 预测结果，返回最大的那个数值所在的下标

        with tf.variable_scope("loss"):
            losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)
            self.loss = tf.reduce_mean(losses)

        with tf.variable_scope("accuracy"):
            correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))  # 返回值 bool 类型
            self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")  # bool转成float


class LSTM_MFCNN(object):  # 新版本 考虑句子实际长度
    def __init__(self, config):
        
        self.input_x = tf.placeholder(tf.int32, [None, None], name='input_x')
        self.input_y = tf.placeholder(tf.float32, [None, config.num_classes], name='input_y')
        self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')

        # self.x_length = tf.reduce_sum(tf.sign(self.input_x), 1)
        # self.x_length = tf.cast(self.x_length, tf.int32)

        l2_loss = tf.constant(0.0)  # 先不用，写0

        # with tf.variable_scope('Word_embedding'):
        #     if not config.is_pre_train:
        #         print("使用随机的词向量...")
        #         # self.W2V = tf.get_variable(name='W2V', shape=[config.vocab_size, config.embedding_dim])
        #         self.W2V = tf.Variable(tf.random_uniform([config.vocab_size, config.embedding_dim], -1.0, 1.0),
        #                                name="W2V")
        #     else:
        #         print("使用预训练的词向量...")
        #         word_embedding = np.load(config.word2vec_npy)  # 加载词向量npy文件
        #         self.W2V = tf.get_variable(name='W2V', initializer=word_embedding, trainable=True)

        #     self.embedded_chars = tf.nn.embedding_lookup(self.W2V, self.input_x)
            # self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)  # 加一个维度

        with tf.variable_scope(''):
            print("使用预训练的 ELMo 词向量...")

            prefix = '../6kwQA_train_ELMo_word_data/'  # 相对路径
            vocab_file = prefix + '/QA_clean_all_title_seg_txt_300d_min5_word2vec_vocab'  # 词汇表
            options_file = prefix + '/options.json'  
            weight_file = prefix + '/weights.hdf5'
            token_embedding_file = prefix + '/final_6kwQA_vocab_embedding.hdf5'

            # Build the biLM graph.
            bilm = BidirectionalLanguageModel(
                    options_file,
                    weight_file,
                    use_character_inputs=False,
                    embedding_weight_file=token_embedding_file)

            context_embeddings_op = bilm(self.input_x)
             
            elmo_context_input = weight_layers('input', context_embeddings_op, l2_coef=0.0)
            self.embedded_chars = elmo_context_input['weighted_op']  # 相当于 tf.nn.embedding_lookup()


        with tf.variable_scope('LSTM'):
            def unit_lstm():  # tf.contrib.rnn == tf.nn.rnn_cell
                lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(config.rnn_hidden_size)
                # lstm_cell = tf.nn.rnn_cell.LSTMCell(config.rnn_hidden_size)
                lstm_cell = tf.nn.rnn_cell.DropoutWrapper(lstm_cell, output_keep_prob=self.keep_prob)
                return lstm_cell
            
            rnn_cell = tf.nn.rnn_cell.MultiRNNCell([unit_lstm() for _ in range(config.rnn_num_layers)])
            init_state = rnn_cell.zero_state(tf.shape(self.input_y)[0], tf.float32)
            rnn_outputs, rnn_state = tf.nn.dynamic_rnn(rnn_cell, self.embedded_chars, initial_state=init_state, dtype=tf.float32)

        self.embedded_chars_expanded = tf.expand_dims(rnn_outputs, -1)

        pooled_outputs = []
        for i, filter_size in enumerate(config.filter_sizes):  # filter_sizes卷积核尺寸
            with tf.variable_scope("Conv-Maxpool-%s" % filter_size):
                W = tf.get_variable(name='W1', shape=[filter_size, config.rnn_hidden_size, 1, config.num_filters])
                b = tf.get_variable(name='b1', shape=[config.num_filters])
                # filter_shape = [filter_size, config.rnn_hidden_size, 1, config.num_filters]
                # W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W1")  # W进行高斯初始化
                # b = tf.Variable(tf.constant(0.1, shape=[config.num_filters]), name="b1")  # b给初始化为一个常量

                conv = tf.nn.conv2d(
                    self.embedded_chars_expanded,
                    W,
                    strides=[1, 1, 1, 1],  # 步长
                    padding="VALID",
                    name="conv")
                h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")  # 激励函数加快学习进度
        
                pooled = tf.nn.max_pool(
                    h,
                    ksize=[1, config.seq_length - filter_size + 1, 1, 1],  # 过滤器大小
                    strides=[1, 1, 1, 1],   # 步长
                    padding='VALID',
                    name="max-pool")
                pooled_outputs.append(pooled)

        num_filters_total = config.num_filters * len(config.filter_sizes)  # 128x3
        self.h_pool = tf.concat(pooled_outputs, 3)  # 拼接
        self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])  # 扁平化数据，跟全连接层相连

        with tf.variable_scope("dropout"):
            self.h_drop = tf.nn.dropout(self.h_pool_flat, self.keep_prob)

        with tf.variable_scope("output"):
            W2 = tf.get_variable(name="W2", shape=[num_filters_total, config.num_classes],
                                 initializer=tf.contrib.layers.xavier_initializer())  # 定义初始化方式
            b2 = tf.get_variable(name="b2", shape=[config.num_classes])

            l2_loss += tf.nn.l2_loss(W2)
            l2_loss += tf.nn.l2_loss(b2)

            self.scores = tf.nn.xw_plus_b(self.h_drop, W2, b2, name="scores")  # 得分函数，每个类别的概率
            self.probability = tf.nn.softmax(self.scores)  # 概率分布归一化

            self.predictions = tf.argmax(self.scores, 1, name="predictions")  # 预测结果，返回最大的那个数值所在的下标

        with tf.variable_scope("loss"):
            losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)
            self.loss = tf.reduce_mean(losses) + config.l2_reg_lambda * l2_loss

        with tf.variable_scope("accuracy"):
            correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))  # 返回值 bool 类型
            self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")  # bool转成float

