# 作者 :南雨
# 时间 : 2022/6/28 9:41
import pandas as pd
import pyprind
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
from sklearn.preprocessing import LabelBinarizer, LabelEncoder
from gensim.models import word2vec
from tensorflow.keras.preprocessing.sequence import pad_sequences
import numpy as np
from dzj.med_qa.med_constant import Med_constant


def get_data():
    train = pd.read_csv(Med_constant.train_save_path)
    test = pd.read_csv(Med_constant.test_save_path)
    valid = pd.read_csv(Med_constant.valid_save_path)
    return train, test, valid


def get_all_list():
    """
    获取所有单词组成的list
    :return:
    """
    train_data, test_data, valid_data = get_data()
    all_data = pd.concat([train_data, test_data, valid_data], axis=0)
    all_list = all_data["question"].tolist()
    return all_list


def build_word2Model():
    """
    训练word2vec模型
    :return: w2v_model
    """
    all_list = get_all_list()
    w2v_model = word2vec.Word2Vec(all_list, vector_size=Med_constant.vector_size,
                                  workers=Med_constant.nums_workers,
                                  min_count=Med_constant.min_word_count,
                                  window=Med_constant.window_size)
    # w2v_model.save(Med_constant.w2v_model_path)
    # print("w2vec模型保存成功......")
    return w2v_model


def build_dict_vector():
    """
    构建词典、词向量
    :param model_path: 模型加载路径
    :return: dict_word, word_vector
    """
    # 加载word2vec模型
    # w2v_model = word2vec.Word2Vec.load(model_path)  # 加载模型
    w2v_model = build_word2Model()
    vocab_list = [word for word, count in w2v_model.wv.key_to_index.items()]  # 用模型生成一个包含所有单词的列表
    dict_word = {"OOV": 0}  # 初始化一个普通空词典
    dict_word_vector = {}  # 初始化一个空的词向量词典（这个没用）

    # 初始化一个空的词向量矩阵，词典中单词的个数 ； 每个单词用多少个维度表示
    embeddings_matrix = np.zeros((len(vocab_list) + 1, Med_constant.vector_size))

    # 通过循环填充词向量矩阵、普通词典、词向量词典
    for i in range(len(vocab_list)):
        word = vocab_list[i]
        dict_word[word] = i + 1  # 填充普通字典，词语：序号
        dict_word_vector[word] = w2v_model.wv[word]  # 填充词向量矩阵的词典，词语：词向量
        embeddings_matrix[i + 1] = w2v_model.wv[word]  # 填充词向量矩阵

    return dict_word, dict_word_vector, embeddings_matrix


def text_to_sequence(data, dict_word):
    total_list = []
    pbr = pyprind.ProgBar(len(data))
    for line_text in data["question"]:
        # print(line_text)
        line_word = []
        for word in line_text:
            if word in dict_word.keys():
                # print(word)
                line_word.append(dict_word[word])
            else:
                line_word.append(0)
        total_list.append(line_word)
        pbr.update()
    return total_list


def label_to_onehot(data_df):
    labels = []
    for i in range(0, len(data_df)):
        labels.append(data_df['label_id'].iloc[i])
    le = LabelEncoder()
    data_labels = le.fit_transform(labels)
    data_y = to_categorical(data_labels, num_classes=len(Med_constant.category_dict))
    return data_y


def get_list():
    dict_word = build_dict_vector()[0]
    train_data, test_data, valid_data = get_data()
    train_list = text_to_sequence(train_data, dict_word)
    test_list = text_to_sequence(test_data, dict_word)
    valid_list = text_to_sequence(valid_data, dict_word)

    train_X = pad_sequences(train_list, maxlen=Med_constant.max_len, padding='post', truncating='post')
    test_X = pad_sequences(test_list, maxlen=Med_constant.max_len, padding='post', truncating='post')
    valid_X = pad_sequences(valid_list, maxlen=Med_constant.max_len, padding='post', truncating='post')

    train_y = label_to_onehot(train_data)
    test_y = label_to_onehot(test_data)
    valid_y = label_to_onehot(valid_data)

    return train_X, train_y, test_X, test_y, valid_X, valid_y


def get_test():
    train_data, test_data, valid_data = get_data()
    dict_word = build_dict_vector()[0]
    test_list = text_to_sequence(test_data, dict_word)
    test_X = pad_sequences(test_list, maxlen=Med_constant.max_len, padding='post', truncating='post')
    test_y = label_to_onehot(test_data)
    return test_X, test_y, dict_word

