import numpy as np
from jy.trec_qa.trec_constant import Trec_Const
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.utils import to_categorical
from jy.trec_qa.trec_processing import process
from sklearn.model_selection import train_test_split

data_df = process()


def build_label_id_dict():
    macronum = sorted(set(data_df['label']))
    label_id_dict = dict((note, number) for number, note in enumerate(macronum))
    return label_id_dict


def fun(i):
    return build_label_id_dict()[i]


data_df['label'] = data_df['label'].apply(fun)


def get_text():
    text_list = data_df["question"].tolist()
    return text_list


def get_label():
    labels = []
    for idx in data_df['label']:
        labels.append(idx)
    return labels


def get_tokenizer():
    text_list = get_text()
    tokenizer = Tokenizer(num_words=Trec_Const.MAX_NB_WORDS)
    tokenizer.fit_on_texts(text_list)
    return tokenizer


def get_train_val():
    text_list = get_text()
    labels = get_label()
    tokenizer = get_tokenizer()
    sequences = tokenizer.texts_to_sequences(text_list)
    data = pad_sequences(sequences, maxlen=Trec_Const.MAX_SEQUENCE_LENGTH)
    labels = to_categorical(np.asarray(labels))

    # 标签one-hot
    indices = np.arange(data.shape[0])
    np.random.shuffle(indices)
    data = data[indices]
    labels = labels[indices]
    nb_validation_samples = int(Trec_Const.VALIDATION_SPLIT * data.shape[0])
    x_train = data[:-nb_validation_samples]
    y_train = labels[:-nb_validation_samples]
    x_val = data[:-nb_validation_samples]
    y_val = labels[:-nb_validation_samples]
    x_train, x_test, y_train, y_test = train_test_split(x_train, y_train, test_size=0.2, random_state=120)
    return x_train, y_train, x_val, y_val, x_test, y_test


def build_matrix():
    tokenizer = get_tokenizer()
    word_index = tokenizer.word_index
    embeddings_index = {}
    f = open(Trec_Const.vector_path, encoding='utf-8')
    for line in f:
        values = line.split()
        word = values[0]
        coefs = np.asarray(values[1:], dtype='float32')
        embeddings_index[word] = coefs
    f.close()
    embedding_matrix = np.random.random((len(word_index) + 1, Trec_Const.EMBEDDING_DIM))
    for word, i in word_index.items():
        embedding_vector = embeddings_index.get(word)
        if embedding_vector is not None:
            embedding_matrix[i] = embedding_vector
    return embedding_matrix
