# coding=utf-8

# import tensorflow as tf
import numpy as np

def load_data(fpath1, fpath2, maxlen1, maxlen2):
    '''Loads source and target data and filters out too lengthy samples.
    fpath1: source file path. string.
    fpath2: target file path. string.
    maxlen1: source sent maximum length. scalar.
    maxlen2: target sent maximum length. scalar.

    Returns
    sents1: list of source sents
    sents2: list of target sents
    '''
    sents1, sents2 = [], []
    with open(fpath1, 'r', encoding='utf-8') as f1, open(fpath2, 'r', encoding='utf-8') as f2:
        for sent1, sent2 in zip(f1, f2):
            if len(sent1.split()) + 1 > maxlen1: continue # 1: </s>
            if len(sent2.split()) + 1 > maxlen2: continue  # 1: </s>
            sents1.append(sent1.strip())
            sents2.append(sent2.strip())
    return sents1, sents2

def load_vocab(vocab_fpath):
    '''Loads vocabulary file and returns idx<->token maps
    vocab_fpath: string. vocabulary file path.
    Note that these are reserved
    0: <pad>, 1: <unk>, 2: <s>, 3: </s>

    Returns
    two dictionaries.
    '''
    vocab = [line.split()[0] for line in open(vocab_fpath, 'r', encoding='utf-8').read().splitlines()]
    token2idx = {token: idx for idx, token in enumerate(vocab)}
    idx2token = {idx: token for idx, token in enumerate(vocab)}
    return token2idx, idx2token

def encode(inp, type, dict):
    '''Converts string to number. Used for `generator_fn`.
    inp: 1d byte array.
    type: "x" (source side) or "y" (target side)
    dict: token2idx dictionary

    Returns
    list of numbers
    '''
    # inp_str = inp.decode("utf-8")
    inp_str = inp
    if type=="x": tokens = inp_str.split() + ["</s>"]
    else: tokens = ["<s>"] + inp_str.split() + ["</s>"]

    x = [dict.get(t, dict["<unk>"]) for t in tokens]  # JM: 如果不存在则返回默认值"<unk>"的id
    return x

def padding_list(lst, padding_value=0):
    max_len = max([len(item) for item in lst])
    for item in lst:
        padding_size = max_len - len(item)
        item += [padding_value] * padding_size
    return lst

def get_full_data(fpath1, fpath2, maxlen1, maxlen2, vocab_fpath, shuffle=False):
    # xs_lst = list()
    # ys_lst = list()
    sents1, sents2 = load_data(fpath1, fpath2, maxlen1, maxlen2)
    sents1, sents2 = sents1[:200], sents2[:200]  # JM: debug purpose, keep size down
    token2idx, idx2token = load_vocab(vocab_fpath)

    x_lst = list()
    x_seqlen_lst = list()
    sent1_lst = list()

    decoder_input_lst = list()
    y_lst = list()
    y_seqlen_lst = list()
    sent2_lst = list()
    for sent1, sent2 in zip(sents1, sents2):
        x = encode(sent1, "x", token2idx)
        y = encode(sent2, "y", token2idx)
        decoder_input, y = y[:-1], y[1:]

        x_seqlen, y_seqlen = len(x), len(y)

        x_lst.append(x)
        x_seqlen_lst.append(x_seqlen)
        sent1_lst.append(sent1)

        decoder_input_lst.append(decoder_input)
        y_lst.append(y)
        y_seqlen_lst.append(y_seqlen)
        sent2_lst.append(sent2)

        # xs, ys = (x, x_seqlen, sent1), (decoder_input, y, y_seqlen, sent2)
        # xs_lst.append(xs)
        # ys_lst.append(ys)
    # return xs_lst, ys_lst, token2idx
    # xs = tf.convert_to_tensor(np.array(x_lst))
    # x_seqlen_t = tf.convert_to_tensor(x_seqlen_lst)
    # sent1_t = tf.convert_to_tensor(sent1_lst)
    padding_list(x_lst, 0)
    padding_list(decoder_input_lst, 0)
    padding_list(y_lst, 0)

    return (x_lst, x_seqlen_lst, sent1_lst), (decoder_input_lst, y_lst, y_seqlen_lst, sent2_lst), token2idx, idx2token

    # return (tf.convert_to_tensor(np.array(x_lst)), tf.convert_to_tensor(np.array(x_seqlen_lst)), tf.convert_to_tensor(np.array(sent1_lst))), (tf.convert_to_tensor(np.array(decoder_input_lst)), tf.convert_to_tensor(np.array(y_lst)), tf.convert_to_tensor(np.array(y_seqlen_lst)), tf.convert_to_tensor(np.array(sent2_lst))), token2idx

if __name__ == "__main__":
    fpath1 = "../transformer/iwslt2016/segmented/train.de.bpe"
    fpath2 = "../transformer/iwslt2016/segmented/train.en.bpe"
    vocab_fpath = "../transformer/iwslt2016/segmented/bpe.vocab"
    maxlen1 = 100
    maxlen2 = 100
    xs_lst, ys_lst = get_full_data(fpath1, fpath2, maxlen1, maxlen2, vocab_fpath)
    print("done")