""" input data preprocess.
"""
import gensim
import numpy as np
from gensim.models.keyedvectors import KeyedVectors

data_prefix = '../data/nlu_data/'
word2vec_path = data_prefix+'wiki.en.vec'
training_data_path = data_prefix + 'train_shuffle.txt'
test_data_path = data_prefix + 'test.txt'

seen_intent = ['music', 'search', 'movie', 'weather', 'restaurant']
unseen_intent = ['playlist', 'book']


def load_w2v(file_name):
    """ load w2v model
        input: model file name
        output: w2v model
    """
    w2v = KeyedVectors.load_word2vec_format(
            file_name, binary=False)
    return w2v

def process_label(input_intents: 'list[str]', w2v: KeyedVectors):
    """
    put label into dict
    """
    class_dict = {}
    label_vec = []
    class_id = 0
    for line in input_intents:
        label = line.split(' ')
        for w in label:
            if w not in w2v.key_to_index.keys():
                print("label not in w2v dict", w)
        # compute label vec
        label_sum = np.sum([w2v.get_vector(w) for w in label], axis = 0)
        label_vec.append(label_sum)
        # store class names => index
        class_dict[' '.join(label)] = class_id
        class_id = class_id + 1
    return class_dict, np.array(label_vec)

def load_vec(file_path, w2v: KeyedVectors, class_dict: dict, in_max_len: int = 0):
    """
    in_max_len: 序列最长长度(0则不限长度)
    """
    input_x = [] # input sentence word ids
    input_y = [] # input label ids
    s_len = [] # input sentence length
    max_len = 0
    with open(file_path) as lines:
        for line in lines:
            arr = line.strip().split('\t')
            label = [w for w in arr[0].split(' ')]
            question = [w for w in arr[1].split(' ')]
            cname = ' '.join(label)
            if not cname in class_dict.keys():
                print('{} is unknown label'.format(cname))
                continue
            # trans words into indexes
            x_arr = [w2v.key_to_index[w] for w in question if w in w2v.key_to_index.keys()]
            s_l = len(x_arr)
            if s_l <= 1:
                continue
            if in_max_len == 0:
                if s_l > max_len:
                    max_len = len(x_arr)
            input_x.append(np.array(x_arr))
            input_y.append(class_dict[cname])
            s_len.append(s_l)
    # add paddings
    max_len = max(in_max_len, max_len)
    x_padding = []
    for i in range(len(input_x)):
        if (max_len < s_len[i]):
            x_padding.append(input_x[i][0:max_len])
            continue
        tmp = np.append(input_x[i], np.zeros((max_len - s_len[i],), dtype=np.int64))
        x_padding.append(tmp)

    x_padding = np.array(x_padding)
    input_y = np.array(input_y)
    s_len = np.array(s_len)
    return x_padding, input_y, s_len, max_len


def get_label_1hot(data):
    Ybase = data['y_tr']
    sample_num = Ybase.shape[0]
    labels = np.unique(Ybase)
    class_num = labels.shape[0]
    labels = range(class_num)
    # get label index
    ind = np.zeros((sample_num, class_num), dtype=np.float)
    for i in range(class_num):
        ind[Ybase == labels[i], i] = 1
    return ind


def read_datasets():
    print("------------------read datasets begin-------------------")
    data = {}

    # load word2vec model
    print("------------------load word2vec begin-------------------")
    w2v: gensim.models.KeyedVectors = load_w2v(word2vec_path)
    print("------------------load word2vec end---------------------")

    # load normalized word embeddings
    embedding = w2v.vectors
    data['embedding'] = embedding
    norm_embedding = norm_matrix(embedding)
    data['embedding'] = norm_embedding

    #norm_m = w2v.get_normed_vectors()
    # TODO: 搭好后看看l1 norm 与l2 norm vector表现有何区别
    
    # pre process seen and unseen labels
    sc_dict, sc_vec = process_label(seen_intent, w2v)
    uc_dict, uc_vec = process_label(unseen_intent, w2v)
    # trans data into embedding vectors
    max_len = 0
    x_tr, y_tr, s_len, max_len = load_vec(
            training_data_path, w2v, sc_dict, max_len)
    x_te, y_te, u_len, max_len = load_vec(
            test_data_path, w2v, uc_dict, max_len)

    # 训练数据data, label
    data['x_tr'] = x_tr
    data['y_tr'] = y_tr
    data['s_len'] = s_len  # 各list长度
    data['sc_vec'] = sc_vec # label vectors
    data['sc_dict'] = sc_dict

    data['x_te'] = x_te
    data['y_te'] = y_te
    data['u_len'] = u_len
    data['uc_vec'] = uc_vec
    data['uc_dict'] = uc_dict

    data['max_len'] = max_len

    ind = get_label_1hot(data)
    data['s_label'] = ind # pytorch没必要用1-hot input
    print("------------------read datasets end---------------------")
    return data

def norm_matrix(matrix):
      """ normalize matrix by column
	input : numpy array, dtype = float32
	output : normalized numpy array, dtype = float32
      """
      # check dtype of the input matrix
      np.testing.assert_equal(type(matrix).__name__, 'ndarray')
      np.testing.assert_equal(matrix.dtype, np.float32)
      row_sums = matrix.sum(axis=1)
      # replace zero denominator
      row_sums[row_sums == 0] = 1
      norm_matrix = matrix / row_sums[:, np.newaxis]
      return norm_matrix