# 作者 :南雨
# 时间 : 2022/6/28 9:12
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing.text import Tokenizer
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from gensim.models import word2vec
from tensorflow.keras.preprocessing.sequence import pad_sequences
import numpy as np
from dzj.trec_qa.trec_constant import Trec_constant
from dzj.trec_qa.trec_processing import result

word_list, all_data = result()


def build_word2Model(word_list):
    """
    训练word2vec模型
    :return: w2v_model
    """
    w2v_model = word2vec.Word2Vec(word_list, vector_size=Trec_constant.vector_size,
                                  workers=Trec_constant.nums_workers,
                                  min_count=Trec_constant.min_word_count,
                                  window=Trec_constant.window_size)
    # w2v_model.save(Constant.w2v_model_path)
    return w2v_model


def build_dict_vector():
    """
    构建词典、词向量
    :param model_path: 模型加载路径
    :return: dict_word, word_vector
    """
    # 加载word2vec模型
    # w2v_model = word2vec.Word2Vec.load(model_path)
    w2v_model = build_word2Model(word_list)

    vocab_list = [word for word, count in w2v_model.wv.key_to_index.items()]
    dict_word = {"OOV": 0}  # 普通词典
    dict_word_vector = {}  # 词向量词典
    # 参数说明：矩阵维度，即词典中单词的个数   每个单词用多少个数表示
    embeddings_matrix = np.zeros((len(vocab_list) + 1, Trec_constant.vector_size))

    for i in range(len(vocab_list)):
        word = vocab_list[i]
        dict_word[word] = i + 1  # 构建普通字典，词语：序号
        dict_word_vector[word] = w2v_model.wv[word]  # 构建词向量矩阵的词典，词语：词向量
        embeddings_matrix[i + 1] = w2v_model.wv[word]  # 词向量矩阵

    return dict_word, dict_word_vector, embeddings_matrix


# def get_model_matrix_vector():
#     model_path = Constant.w2v_model_path
#     build_word2Model(word_list)
#     dict_word, dict_word_vector, embeddings_matrix = build_dict_vector(model_path)
#     print("词向量构建成功.....")
#     return embeddings_matrix


def get_tokenizer():
    build_word2Model(word_list)
    embeddings_matrix = build_dict_vector()[2]
    vocab_size = len(embeddings_matrix)

    text_preprocesser = Tokenizer(num_words=vocab_size, oov_token="<UNK>")
    text_preprocesser.fit_on_texts(all_data['text'])
    return text_preprocesser, embeddings_matrix


def get_train_test_val():
    text_preprocesser, embeddings_matrix = get_tokenizer()
    data_sequence = text_preprocesser.texts_to_sequences(all_data['text'])
    # 构造词典
    # word_dict = text_preprocesser.word_index
    # 文本补齐
    data_x = pad_sequences(data_sequence, maxlen=Trec_constant.max_len, padding='post', truncating='post')

    labels = []
    # 合并lable
    for i in range(0, len(all_data)):
        labels.append(all_data['labels'].iloc[i])
    le = LabelEncoder()
    data_labels = le.fit_transform(labels)
    data_y = to_categorical(data_labels, num_classes=len(Trec_constant.labels))
    # 划分数据集
    train_XX, test_X, train_yy, test_y = train_test_split(data_x, data_y, test_size=0.25, random_state=120)
    train_X, val_X, train_y, val_y = train_test_split(train_XX, train_yy, test_size=0.3, random_state=120)

    return train_X, test_X, val_X, train_y, test_y, val_y, embeddings_matrix


# if __name__ == '__main__':
#     train_X, test_X, val_X, train_y, test_y, val_y, embeddings_matrix = get_train_test_val()
#     print(embeddings_matrix[1])
