import gensim
import os
from gensim.models.word2vec import Word2Vec, PathLineSentences
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
import tensorflow as tf
import matplotlib.pyplot as plt
import json

# 命名实体识别实战


class OneHot(object):
    def __init__(self):
        self.__label_encoder = LabelEncoder()
        self.__onehot_encodeder = OneHotEncoder()

    def encode(self, target_list):
        integer_encoded = self.__label_encoder.fit_transform(np.array(target_list))
        integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
        self.__onehot_encodeder = self.__onehot_encodeder.fit_transform(integer_encoded)
        return self.__onehot_encodeder.toarray()

    def encode_label(self, target_list):
        integer_encoded = self.__label_encoder.fit_transform(np.array(target_list))
        return integer_encoded

    def decode(self, encoder_list):
        return self.__label_encoder.inverse_transform([np.argmax(np.array(encoder_list), axis=1)])


def read_file_to_corpus(folder):
    corpus = []
    for filename in os.listdir(folder):
        with open(os.path.join(folder, filename), encoding="utf-8") as f:
            for line in f:
                corpus.append(line.split())
    return corpus



def get_vec_model(model_path):
    vec_model = gensim.models.Word2Vec.load(model_path)
    return vec_model


def get_train_list(source_folder, target_folder):
    source_string = []
    target_string = []
    for filename in os.listdir(source_folder):
        target_file_name = "targetH_" + "_".join(filename.split("_")[1:])
        if os.path.exists(os.path.join(target_folder, target_file_name)):
            with open(os.path.join(source_folder, filename), encoding="utf-8") as source:
                with open(os.path.join(target_folder, target_file_name), encoding="utf-8") as target:
                    for source_line in source:
                        for target_line in target:
                            if len(source_line.split()) == len(target_line.split()):
                                source_string.append(source_line.split())
                                target_string.append(target_line.split())
    return source_string, target_string

def get_train_feature(source_string, vec_model, max_sequence=1000):
    index2word_set = set(vec_model.wv.index2word)
    row_vector_list = []
    for source_line in source_string:
        i = 0
        row_vector = []
        # 将每一行的词的词向量都展平开来添加，变成一维
        for source_word in source_line:
            if i < max_sequence:
                if source_word in index2word_set:
                    row_vector= np.append(row_vector, vec_model[source_word])
                else:
                    row_vector = np.append(row_vector, np.zeros(vec_model.trainables.layer1_size, dtype='float32'))
            i += 1
        if len(source_line) < max_sequence:
            row_vector = np.append(row_vector,
                                   np.zeros((vec_model.trainables.layer1_size * (max_sequence - len(source_line)),),
                                            dtype='float32'))
        row_vector_list.append(row_vector)
    return np.matrix(row_vector_list)

# 自定义 将训练数据转化为三位张量，准备入模
def get_train_feature_my(source_string, vec_model, max_sequence=1000):
    index2word_set = set(vec_model.wv.index2word)
    row_vector_list = []
    for source_line in source_string:
        i = 0
        row_vector = []
        # 将每一行的词的词向量都展平开来添加，变成一维
        for source_word in source_line:
            if i < max_sequence:
                if source_word in index2word_set:
                    row_vector_curr = vec_model[source_word]
                else:
                    row_vector_curr = np.zeros(vec_model.trainables.layer1_size, dtype='float32')

                row_vector.append(row_vector_curr)
            i += 1
        if len(source_line) < max_sequence:
            row_vector = np.append(row_vector,
                                   np.zeros((vec_model.trainables.layer1_size * (max_sequence - len(source_line)),),
                                            dtype='float32'))
        row_vector_list.append(row_vector)
    return np.matrix(row_vector_list)

# 对标签进行数值化
def get_target_label(target_string,max_sequence=1000):
    onehot_model = OneHot()
    # 将每一行按照定长进行处理，短的默认"O"补全，长的就截取
    for i in range(0, len(target_string)):
        curr_lab = target_string[i]
        curr_lab_len = len(curr_lab)
        if curr_lab_len < max_sequence:
            extend_list = ["O"]*(max_sequence - curr_lab_len)
            curr_lab.extend(extend_list)
            if curr_lab is None:
                curr_lab = ["O"]*max_sequence
        else:
            if target_string[i] is None:
                target_string[i] = ["O"]*max_sequence
            else:
                target_string[i] = target_string[i][0:max_sequence]

    num_rows = len(target_string)
    # 所有标签成一列
    flat_list = [item for sublist in target_string for item in sublist]
    # onehot，不是真正onehot 是将标签字符转为数值
    target_vector = onehot_model.encode_label(flat_list)
    target_vector = target_vector.reshape(-1, max_sequence)
    return target_vector, onehot_model

def get_train_list(source_folder, target_folder,path):
    source_string = []
    target_string = []
    for filename in os.listdir(path):
        target_file_name = "targetH_" + "_".join(filename.split("_")[1:])
        if os.path.exists(os.path.join(target_folder, target_file_name)):
            with open(os.path.join(source_folder, filename), 'r', encoding="utf-8") as source:
                with open(os.path.join(target_folder, target_file_name), 'r', encoding="utf-8") as target:
                    for source_line, target_line in zip(source.readlines(), target.readlines()):
                        s_line = source_line.split()
                        t_line = target_line.split()
                        if len(s_line) == len(t_line):
                            source_string.append(s_line)
                            target_string.append(t_line)
    print('源数据读取完毕，共' + str(len(source_string)) + '行')
    return source_string, target_string

def get_train_list_my(source_folder, target_folder):
    max_len = 0
    source_string = []
    target_string = []
    with open(source_folder, 'r', encoding="utf-8") as source:
        with open(target_folder, 'r', encoding="utf-8") as target:
            for source_line, target_line in zip(source.readlines(), target.readlines()):
                s_line = source_line.split()
                t_line = target_line.split()

                if len(s_line)> max_len:
                    max_len = len(s_line)

                if len(s_line) == len(t_line):
                    source_string.append(s_line)
                    target_string.append(t_line)
    print('源数据读取完毕，共' + str(len(source_string)) + '行','最长行',max_len)
    return source_string, target_string

def get_train_list_my1(source_folder, target_folder):
    source_string = []
    target_string = []
    with open(source_folder, 'r', encoding="utf-8") as source:
        with open(target_folder, 'r', encoding="utf-8") as target:
            for source_line in source.readlines():
                s_line = source_line.split()
                # t_line = target_line.split()
                # if len(s_line) == len(t_line):
                source_string.append(s_line)
                    # target_string.append(t_line)
    print('源数据读取完毕，共' + str(len(source_string)) + '行')
    return source_string, target_string

def get_vec_from_corpus(corpus, config,size=128, min_count=2):
    # corpus = [["cat", "say", "meow"], ["dog", "say", "woof"]]
    size = config['model']['embedding_size']
    save_path = config['model']['w2v_save_dir']
    vec_model = gensim.models.Word2Vec(corpus, size=size, min_count=1)
    vec_model.save(save_path)
    return vec_model

def lstm_crf(X):
    embedding_size = configs['model']['embedding_size']
    # 隐藏层神经元个数
    unit_num = configs['model']['unit_num']
    dropout_rate = None
    # 输出层神经元个数，与标签的个数对应
    output_size = configs['model']['output_size']
    #
    batch_size = configs['training']['batch_size']
    seq_length = configs['model']['sequence_length']
    lr = configs['model']['lr']

    cell_forward = tf.nn.rnn_cell.BasicLSTMCell(unit_num)
    cell_backward = tf.nn.rnn_cell.BasicLSTMCell(unit_num)
    # 入模的x维度 张量 [128*140*5]
    # seq_length 确定以后，其实RNN的深度就确定了就是140， 每一层RNN的输入X 是embedding_size
    # input_bi_lstm 就是[128*140*1] 如果批量执行 那就是[128*140*5]
    input_bi_lstm = tf.reshape(X, [batch_size, seq_length, embedding_size])
    # 每一层的RNN的具体结构这里不用关注，应该就是输入，隐藏，输出层 三层
    # 这里得到这140个RNN的每一个RNN的Hi的输出和状态
    bi_outputs, bi_state = tf.nn.bidirectional_dynamic_rnn(cell_forward,
                                    cell_backward, input_bi_lstm, dtype=tf.float32)

    # a = WH(t-1) + UX(t)
    # 双向LSTM中 H(t-1) 有两个 在这里进行拼接
    bi_output = tf.concat(bi_outputs, axis=2)

    # H到O输出层的权重矩阵
    W = tf.get_variable("projection_w", [2 * unit_num, output_size])
    b = tf.get_variable("projection_b", [output_size])
    x_reshape = tf.reshape(bi_output, [-1, 2 * unit_num])
    projection = tf.matmul(x_reshape, W) + b
    outputs = tf.reshape(projection, [batch_size, seq_length, output_size])
    return outputs

def train(feature, target, configs):
    # 超参
    embedding_size = configs['model']['embedding_size']
    unit_num = configs['model']['unit_num']
    batch_size = configs['training']['batch_size']
    seq_length = configs['model']['sequence_length']
    lr = configs['model']['lr']
    iter_num = configs['training']['epochs']
    save_path = configs['model']['save_dir']

    # 如果是批量训练， 整个网络的输入X的维度是 [10, 140*128]
    X = tf.placeholder(tf.float32, shape=[batch_size, seq_length*embedding_size])
    #  Y的维度是 [10 ,140]
    Y = tf.placeholder(tf.int32, shape=[batch_size, seq_length])
    # lstm+crf 模型构建 得到每个RNN的输出层O的结果
    pred = lstm_crf(X)

    real_y = tf.reshape(Y, [batch_size, seq_length])
    # 条件随机场的运算
    log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(pred, real_y, tf.convert_to_tensor(batch_size * [seq_length], dtype=tf.int32))
    sess = tf.Session()
    # Add a training op to tune the parameters.
    # 损失函数
    loss = tf.reduce_mean(-log_likelihood)
    train_op = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss)
    saver = tf.train.Saver(tf.global_variables(), max_to_keep=15)
    sess.run(tf.global_variables_initializer())
    total_loss = []

    for i in range(iter_num):
        for step in range(int(feature.shape[0]/batch_size)-1):
            f_x = feature[step * batch_size:(step + 1) * batch_size]
            f_y = target[step * batch_size:(step + 1) * batch_size]
            tf_unary_scores, tf_transition_params, _, loss_ = sess.run([pred, transition_params, train_op, loss],
                                                                       feed_dict={X: f_x,
                                                                                  Y: f_y
                                                                                  })
        total_loss.append(loss_)
        print("迭代第：" + str(i) + "次， Loss为：" + str(loss_))
        if i % 100 == 0:
            print("保存模型：", saver.save(sess, save_path, global_step=i))
    return total_loss

def  loss_plt(total_loss):
    plt.figure()
    plt.plot(list(range(len(total_loss))), total_loss, color='b')
    plt.show()

if __name__ == '__main__':
    configs = json.load(open('config/config.json', 'r'))

    # 读入train数据和label
    source_string, target_string = get_train_list_my("data_pro/source_sub.txt", "data_pro/target_sub.txt")
    # # 训练词向量
    vec_model = get_vec_from_corpus(source_string, configs,min_count=1)

    # vec_model = gensim.models.Word2Vec.load('data/ner_word2vec_model')
    # #
    # # # 将标签数据转为数值类型 最终返回的是二维表  每一行的对应的是一句话的标签数据
    max_sequence = configs['model']['sequence_length']
    target_vector, onehot_model = get_target_label(target_string, max_sequence=max_sequence)

    # 文本的词向量矩阵 最终返回的是二维表，每一行都是一句话的每一个词的词向量的横向拼接
    # 将来训练时，每一个X输入直接按embedding的长度截取即可
    feature = get_train_feature(source_string, vec_model, max_sequence=max_sequence)
    # #
    tf.reset_default_graph()
    total_loss = train(feature, target_vector,configs)

    # loss 可视化
    loss_plt(total_loss)
