# coding=utf-8
"""
参考 https://zhuanlan.zhihu.com/p/64881836 改造 Transformer_tf2.py
"""
import time
import datetime
import numpy as np
import tensorflow as tf
from data_load import load_data, get_full_data, load_vocab
tf.enable_eager_execution()


fpath1 = "../transformer/iwslt2016/segmented/train.de.bpe"
fpath2 = "../transformer/iwslt2016/segmented/train.en.bpe"
vocab_fpath = "../transformer/iwslt2016/segmented/bpe.vocab"
maxlen1 = 100
maxlen2 = 100
vocab_size = 32000
d_ff = 2048  # 2048
d_model = 512  # 512
num_blocks = 6 # 6

num_heads = 8
dropout_rate = 0.3

total_epoch = 1000
init_lr = 0.0003
batch_size = 25 # 50

num_units = d_model

def noam_scheme(init_lr, global_step, warmup_steps=4000.):
    '''Noam scheme learning rate decay
    init_lr: initial learning rate. scalar.
    global_step: scalar.
    warmup_steps: scalar. During warmup_steps, learning rate increases
        until it reaches init_lr.
    '''
    step = tf.cast(global_step + 1, dtype=tf.float32)
    return init_lr * warmup_steps ** 0.5 * tf.minimum(step * warmup_steps ** -1.5, step ** -0.5)

# def ln(inputs, epsilon=1e-8, scope="ln"):
#     '''Applies layer normalization. See https://arxiv.org/abs/1607.06450.
#     inputs: A tensor with 2 or more dimensions, where the first dimension has `batch_size`.
#     epsilon: A floating number. A very small number for preventing ZeroDivision Error.
#     scope: Optional scope for `variable_scope`.
#
#     Returns:
#       A tensor with the same shape and data dtype as `inputs`.
#     '''
#     with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
#         inputs_shape = inputs.get_shape()
#         params_shape = inputs_shape[-1:]
#
#         mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True)
#         beta = tf.get_variable("beta", params_shape, initializer=tf.zeros_initializer())
#         gamma = tf.get_variable("gamma", params_shape, initializer=tf.ones_initializer())
#         normalized = (inputs - mean) / ((variance + epsilon) ** (.5))
#         outputs = gamma * normalized + beta
#
#     return outputs

def label_smoothing(inputs, epsilon=0.1):
    V = inputs.get_shape().as_list()[-1]  # number of channels
    return ((1 - epsilon) * inputs) + (epsilon / V)

def mask(inputs, key_masks=None, type=None):
    """Masks paddings on keys or queries to inputs
    inputs: 3d tensor. (h*N, T_q, T_k)
    key_masks: 3d tensor. (N, 1, T_k)
    type: string. "key" | "future"

    e.g.,
    >> inputs = tf.zeros([2, 2, 3], dtype=tf.float32)
    >> key_masks = tf.constant([[0., 0., 1.],
                                [0., 1., 1.]])
    >> mask(inputs, key_masks=key_masks, type="key")
    array([[[ 0.0000000e+00,  0.0000000e+00, -4.2949673e+09],
        [ 0.0000000e+00,  0.0000000e+00, -4.2949673e+09]],

       [[ 0.0000000e+00, -4.2949673e+09, -4.2949673e+09],
        [ 0.0000000e+00, -4.2949673e+09, -4.2949673e+09]],

       [[ 0.0000000e+00,  0.0000000e+00, -4.2949673e+09],
        [ 0.0000000e+00,  0.0000000e+00, -4.2949673e+09]],

       [[ 0.0000000e+00, -4.2949673e+09, -4.2949673e+09],
        [ 0.0000000e+00, -4.2949673e+09, -4.2949673e+09]]], dtype=float32)
    """
    # 很小的负数，在softmax之前mask
    padding_num = -2 ** 32 + 1
    if type in ("k", "key", "keys"):
        # Generate masks:判断是否是0向量，只针对0向量进行mask 注释来源：https://blog.csdn.net/weixin_40901056/article/details/97514718
        key_masks = tf.to_float(key_masks)
        key_masks = tf.tile(key_masks, [tf.shape(inputs)[0] // tf.shape(key_masks)[0], 1]) # (h*N, seqlen)
        # JM: 所以这里搞这么一顿操作，最终目的就是生成一个(800, 57)的矩阵，和input的(800, 57)对应，因为如果input的句子长度不满，会用0补齐，
        # 所以这里的masks就是句子前面的生成0，补0的则生成1，这样就避免了0参与softmax计算
        key_masks = tf.expand_dims(key_masks, 1)  # (h*N, 1, seqlen)  JM: 变为 (800, 1, 57)
        outputs = inputs + key_masks * padding_num  # (800, 57, 57) + (800, 1, 57) = (800, 57, 57) JM: 后面为1的全变成了一个很小的负数 (-2 ** 32 +1)
    # elif type in ("q", "query", "queries"):
    #     # Generate masks
    #     masks = tf.sign(tf.reduce_sum(tf.abs(queries), axis=-1))  # (N, T_q)
    #     masks = tf.expand_dims(masks, -1)  # (N, T_q, 1)
    #     masks = tf.tile(masks, [1, 1, tf.shape(keys)[1]])  # (N, T_q, T_k)
    #
    #     # Apply masks to inputs
    #     outputs = inputs*masks
    elif type in ("f", "future", "right"):
        diag_vals = tf.ones_like(inputs[0, :, :])  # (T_q, T_k)
        tril = tf.linalg.LinearOperatorLowerTriangular(diag_vals).to_dense()  # (T_q, T_k)
        future_masks = tf.tile(tf.expand_dims(tril, 0), [tf.shape(inputs)[0], 1, 1])  # (N, T_q, T_k)

        paddings = tf.ones_like(future_masks) * padding_num
        outputs = tf.where(tf.equal(future_masks, 0), paddings, inputs)
    else:
        print("Check if you entered type correctly!")

    return outputs


def scaled_dot_product_attention(Q, K, V, key_masks,
                                 causality=False, dropout_rate=0.,
                                 training=True,
                                 scope="scaled_dot_product_attention"):
    d_k = Q.get_shape().as_list()[-1]
    # dot product
    # 论文计算query和key相似度使用了dot-product attention，即query和key进行点乘（内积）来计算相似度。
    # JM: 所以这里主要是做attention的第1步，计算query和key的相似度，至于为什么内积能代表相似度，就是纯数学问题了
    outputs = tf.matmul(Q, tf.transpose(K, [0, 2,
                                            1]))  # (N, T_q, T_k) # JM: (800, 57, 64) * (800, 64, 57) = (800, 57, 57)
    # scale
    outputs /= d_k ** 0.5
    # key masking  # JM: 先将Q，K乘起来再做mask，所以不需要再分query的mask和key的mask了
    outputs = mask(outputs, key_masks=key_masks, type="key")
    # causality or future blinding masking
    if causality:
        outputs = mask(outputs, type="future")
    # softmax
    # JM: 第2步是做softmax
    outputs = tf.nn.softmax(outputs)
    # attention = tf.transpose(outputs, [0, 2, 1])
    # tf.summary.image("attention", tf.expand_dims(attention[:1], -1))
    # # query masking
    # outputs = mask(outputs, Q, K, type="query")

    # dropout
    outputs = tf.layers.dropout(outputs, rate=dropout_rate, training=training)
    # weighted sum (context vectors)
    # JM: 第3步是加权求和
    outputs = tf.matmul(outputs, V)  # (N, T_q, d_v)
    return outputs

def positional_encoding(inputs,
                        maxlen,
                        masking=True,
                        scope="positional_encoding"):
    '''Sinusoidal Positional_Encoding. See 3.5
    inputs: 3d tensor. (N, T, E)
    maxlen: scalar. Must be >= T
    masking: Boolean. If True, padding positions are set to zeros.
    scope: Optional scope for `variable_scope`.

    returns
    3d tensor that has the same shape as inputs.
    '''

    E = inputs.get_shape().as_list()[-1] # static E: 512
    N, T = tf.shape(inputs)[0], tf.shape(inputs)[1] # dynamic N: 100, T: 57
    # with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
        # position indices
    position_ind = tf.tile(tf.expand_dims(tf.range(T), 0), [N, 1]) # (N, T) => (100, 57) JM: 这个就是单纯的index号, 例如1个样本[0,1,2,3,4,...,T]这样

    # First part of the PE function: sin and cos argument
    """
    ref: https://www.zhihu.com/question/279523792
    在偶数位置，使用正弦编码，在奇数位置，使用余弦编码。
    这个编码公式的意思就是：给定词语的位置pos，我们可以把它编码成dmodel维的向量！
    也就是说，位置编码的每一个维度对应正弦曲线，波长构成了从2π到10000∗2π的等比序列。
    上面的位置编码是绝对位置编码。但是词语的相对位置也非常重要。这就是论文为什么要使用三角函数的原因！
    正弦函数能够表达相对位置信息，主要数学依据是以下两个公式：
    sin(α+β)=sinαcosβ+cosαsinβ
    cos(α+β)=cosαcosβ−sinαsinβ
    上面的公式说明，对于词汇之间的位置偏移k，PE(pos+k)可以表示成PE(pos)和PE(k)的组合形式，这就是表达相对位置的能力！
    以上就是PEPE的所有秘密。
    """
    # JM: 所以第一步表达的是绝对位置
    position_enc = np.array([
        [pos / np.power(10000, (i-i%2)/E) for i in range(E)]  # JM: 一个单词有512个特征，为每个特征生成sin和con参数
        for pos in range(maxlen)])  # (N, E) => (100, 512)  # JM: 这个后面需要去embedding寻找，所以和W之前的形状一样

    # Second part, apply the cosine to even columns and sin to odds.
    # JM: 第二步用sin, cos表达相对位置
    position_enc[:, 0::2] = np.sin(position_enc[:, 0::2])  # dim 2i
    position_enc[:, 1::2] = np.cos(position_enc[:, 1::2])  # dim 2i+1
    position_enc = tf.convert_to_tensor(position_enc, tf.float32) # (maxlen, E)

    # lookup
    outputs = tf.nn.embedding_lookup(position_enc, position_ind)  # JM: 单看一个样本，有57个单词，每个单词需要512个数值去描述，所以lookup出来的形状是(100, 57, 512)

    # masks
    if masking:
        outputs = tf.where(tf.equal(inputs, 0), inputs, outputs)

    # JM:　所以这里的outputs和inputs进来的数值无关，仅仅是新生成了和inputs形状一样的位置信息编码(表示位置重要性)，返回出去，后面和inputs相加即所谓的进行位置编码了
    # 当然为什么用sin, cos来编码，自己猜测的原因是单词在句子中位置的重要性符合sin和cos分布
    return tf.to_float(outputs)

def ff(d_model, d_ff):
    return tf.keras.Sequential([
        tf.keras.layers.Dense(d_ff, name="denseHidden", activation='relu'),
        tf.keras.layers.Dense(d_model, name="denseOutput")
    ])

def ln():
    return tf.keras.layers.LayerNormalization(
        axis=-1, epsilon=1e-8, center=True, scale=True, beta_initializer='zeros',
        gamma_initializer='ones', beta_regularizer=None, gamma_regularizer=None,
        beta_constraint=None, gamma_constraint=None, trainable=True, name=None)
# class LayerNormalization(tf.keras.layers.Layer):
#     def __init__(self, epsilon=1e-8):
#         self.epsilon = epsilon
#         self.beta = self.add_weight(name="beta", shape=input_shape[-1:], initializer=tf.zeros_initializer(),
#                                     trainable=True)
#         self.gamma = self.add_weight(name="gamma", shape=input_shape[-1:], initializer=tf.ones_initializer(),
#                                      trainable=True)
#         super(LayerNormalization, self).__init__()
#
#     def build(self, input_shape):
#
#         super(LayerNormalization, self).build(input_shape)
#
#     def __call__(self, inputs):
#         # inputs_shape = inputs.get_shape()
#         # params_shape = inputs_shape[-1:]
#
#         mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True)
#         # beta = tf.get_variable("beta", params_shape, initializer=tf.zeros_initializer())
#         # gamma = tf.get_variable("gamma", params_shape, initializer=tf.ones_initializer())
#         normalized = (inputs - mean) / ((variance + self.epsilon) ** (.5))
#         outputs = self.gamma * normalized + self.beta
#         return outputs

class MultiHeadAttention(tf.keras.layers.Layer):
    def __init__(self, d_model, num_heads):
        super(MultiHeadAttention, self).__init__()
        self.num_heads = num_heads
        self.d_model = d_model

        self.Q = tf.keras.layers.Dense(self.d_model, name="denseQ")
        self.K = tf.keras.layers.Dense(self.d_model, name="denseK")
        self.V = tf.keras.layers.Dense(self.d_model, name="denseV")

        # self.ln = LayerNormalization(epsilon=1e-8)
        self.lnn = ln()

    def __call__(self, q, k, v, key_masks, causality=False, training=True, dropout_rate=0):
        # 分头前的前向网络，获取q、k、v语义
        q = self.Q(q)  # (batch_size, seq_len, d_model)
        k = self.K(k)
        v = self.V(v)

        # Split and concat # JM: 所以这里仅仅是分8头，样本数量乘以8，d_model除以8，还未实际开始计算attention
        q_ = tf.concat(tf.split(q, self.num_heads, axis=2), axis=0)  # (h*N, T_q, d_model/h) (800, 57, 64) JM: 所以h是头数
        k_ = tf.concat(tf.split(k, self.num_heads, axis=2), axis=0)  # (h*N, T_k, d_model/h)
        v_ = tf.concat(tf.split(v, self.num_heads, axis=2), axis=0)  # (h*N, T_k, d_model/h)

        # 通过缩放点积注意力层
        outputs = scaled_dot_product_attention(
            q_, k_, v_, key_masks, causality, dropout_rate, training)
        outputs = tf.concat(tf.split(outputs, num_heads, axis=0), axis=2)

        # Residual connection
        outputs += q  # JM: 这里的残差连接确实是放在外面encoder中比较好，考虑移出去
        # Normalize
        outputs = self.lnn(outputs)
        return outputs

class EncoderLayer(tf.keras.layers.Layer):
    def __init__(self, d_model, n_heads, d_ff, dropout_rate=0.1):
        super(EncoderLayer, self).__init__()

        self.mha = MultiHeadAttention(d_model, n_heads)  # JM: 多头自注意力层
        self.ffn = ff(d_model, d_ff)

    def __call__(self, inputs, src_mask):
        enc = self.mha(inputs, inputs, inputs, src_mask)
        # JM: TODO: here should be norm-layer
        enc = self.ffn(enc)
        # JM: TODO: consider move residual block from multi-head-attention to here
        return enc

class Encoder(tf.keras.layers.Layer):
    def __init__(self, n_layers, d_model, n_heads, d_ff, dropout_rate):
        super(Encoder, self).__init__()
        self.d_model = d_model
        self.n_layers = n_layers
        self.encode_layers = [EncoderLayer(d_model, n_heads, d_ff, dropout_rate)
                             for _ in range(n_layers)]
        self.dropout = tf.keras.layers.Dropout(dropout_rate)

    def __call__(self, xs, training=True):
        x, seqlens, sents1 = xs
        src_masks = tf.math.equal(x, 0)  # (N, T1)

        # embedding
        enc = tf.nn.embedding_lookup(embeddings, x)  # (N, T1, d_model) # JM: embedding_lookup是作为计算的，因为embeddings会被tape记录
        enc = enc * (d_model ** 0.5)  # JM: 这个scale是什么用？ d_model=512的话，等于enc向量点积了越22

        enc += positional_encoding(enc, maxlen1)
        enc = self.dropout(enc, training=training)
        for i in range(self.n_layers):
            enc = self.encode_layers[i](enc, src_masks)

        return enc, src_masks, sents1

class DecoderLayer(tf.keras.layers.Layer):
    def __init__(self, d_model, num_heads, d_ff, drop_rate=0.1):
        super(DecoderLayer, self).__init__()

        self.mha1 = MultiHeadAttention(d_model, num_heads)  # JM: 多头自注意力层
        self.mha2 = MultiHeadAttention(d_model, num_heads)
        self.ffn = ff(d_model, d_ff)

    def __call__(self, decoder_inputs, memory, src_masks, tgt_masks):
        dec = self.mha1(decoder_inputs, decoder_inputs, decoder_inputs, tgt_masks, causality=True)
        dec = self.mha2(dec, memory, memory, src_masks)
        # JM: TODO: here should be norm-layer
        dec = self.ffn(dec)
        # JM: TODO: consider move residual block from multi-head-attention to here
        # dec += decoder_inputs
        return dec

class Decoder(tf.keras.layers.Layer):
    def __init__(self, n_layers, d_model, n_heads, d_ff, dropout_rate):
        super(Decoder, self).__init__()
        self.d_model = d_model
        self.n_layers = n_layers
        self.decoder_layers = [DecoderLayer(d_model, n_heads, d_ff, dropout_rate)
                               for _ in range(n_layers)]

        self.dropout = tf.keras.layers.Dropout(dropout_rate)

    def __call__(self, ys, memory, src_masks, training=True):
        decoder_inputs, y, seqlens, sents2 = ys
        tgt_masks = tf.math.equal(decoder_inputs, 0)  # (N, T2)

        # embedding
        dec = tf.nn.embedding_lookup(embeddings, decoder_inputs)  # (N, T2, d_model)
        dec = dec * (d_model ** 0.5)  # scale

        dec += positional_encoding(dec, maxlen2)
        dec = self.dropout(dec, training=training)

        for i in range(self.n_layers):
            dec = self.decoder_layers[i](dec, memory, src_masks, tgt_masks)

        weights = tf.transpose(embeddings)
        logits = tf.einsum('ntd,dk->ntk', dec, weights)  # (N, T2, vocab_size)，矩阵相乘，消除d_model维度
        y_hat = tf.to_int32(tf.argmax(logits, axis=-1))  # JM: 看最后一维(第3维)，也就是32000个词组中挑一个最大值，把他的index返回，所以这里就意味着在vocab中选中了哪个单词

        return logits, y_hat, y, sents2

def eval(xs, ys, encoder, decoder):
    def convert_idx_to_token_tensor(inputs, idx2token):
        def my_func(inputs):
            return " ".join(idx2token[elem] for elem in inputs)
        return tf.py_func(my_func, [inputs], tf.string)

    decoder_inputs, y, y_seqlen, sents2 = ys
    decoder_inputs = tf.ones((tf.shape(xs[0])[0], 1), tf.int32) * token2idx["<s>"]  # JM: 将所有样本输入全初始化成<s>. xs[0]=x=shape(N, T1)=N个样本，其中最长的句子长度为T1
    ys = (decoder_inputs, y, y_seqlen, sents2)  # JM: 但是y还是没有变化，应该是原来的句子
    memory, src_masks, sents1 = encoder(xs, False)  # JM：这里eval阶段的xs究竟是什么
    print("\n%s, 111... " % datetime.datetime.now().isoformat(), end='')
    for _ in range(maxlen2):
        logits, y_hat, y, sents2 = decoder(ys, memory, src_masks, False)
        if tf.reduce_sum(y_hat, 1) == token2idx["<pad>"]: break

        _decoder_inputs = tf.concat((decoder_inputs, y_hat), 1)
        ys = (_decoder_inputs, y, y_seqlen, sents2)
    print("\n%s, 222... " % datetime.datetime.now().isoformat(), end='')
    pred = convert_idx_to_token_tensor(y_hat[0], idx2token)
    return pred

print("reading data...")
xs, ys, token2idx, idx2token = get_full_data(fpath1, fpath2, maxlen1, maxlen2, vocab_fpath)
print("done reading data...")

# embeddings (32000, 512)
embeddings = tf.get_variable('weight_mat',
                                   dtype=tf.float32,
                                   shape=(vocab_size, num_units),
                                   initializer=tf.contrib.layers.xavier_initializer())
# JM: 知道为什么有时候tape上没有这个变量了，因为加了下面zero pad后就没有在tape上直接运算过

batch_num = int(((len(xs[0]) - 1)/batch_size) + 1)
epoch_start_tick = time.time()
stopwatch_tick = time.time()
encoder = Encoder(num_blocks, d_model, num_heads, d_ff, dropout_rate)
decoder = Decoder(num_blocks, d_model, num_heads, d_ff, dropout_rate)

checkpoint_path = './checkpoint/train'
ckpt = tf.train.Checkpoint(encoder=encoder, decoder=decoder, embeddings=embeddings)
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=1)
if ckpt_manager.latest_checkpoint:
    ckpt.restore(ckpt_manager.latest_checkpoint)
    print('last checkpoit restore')

for epoch in range(0, total_epoch):
    tf.reset_default_graph()
    lr = noam_scheme(init_lr, epoch)  # JM: 这个是用来动态化学习率的
    optimizer = tf.train.AdamOptimizer(lr)
    for batch_index in range(batch_num):
        start_index = batch_index * batch_size
        end_index = min((batch_index + 1) * batch_size, len(xs[0]))
        xs_batch = (tf.convert_to_tensor(np.array(xs[0][start_index:end_index])), tf.convert_to_tensor(np.array(xs[1][start_index:end_index])), tf.convert_to_tensor(np.array(xs[2][start_index:end_index])))
        ys_batch = (tf.convert_to_tensor(np.array(ys[0][start_index:end_index])), tf.convert_to_tensor(np.array(ys[1][start_index:end_index])), tf.convert_to_tensor(np.array(ys[2][start_index:end_index])),
              tf.convert_to_tensor(np.array(ys[3][start_index:end_index])))

        with tf.GradientTape() as tape:
            embeddings = tf.concat((tf.zeros(shape=[1, num_units]),
                                   embeddings[1:, :]), 0)
            memory, src_masks, sents1 = encoder(xs_batch, training=True)
            logits, y_hat, y, sents2 = decoder(ys_batch, memory, src_masks, training=True)

            y_ = label_smoothing(tf.one_hot(y, depth=vocab_size))
            ce = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=y_)
            nonpadding = tf.to_float(tf.not_equal(y, token2idx["<pad>"]))  # 0: <pad> # 反转padding的功能，一个句子中真实包含token的位置标为1
            # loss = tf.reduce_sum(ce)
            # JM: 这个loss的算法应该是有数学依据的...所以这里的逻辑应该是先把是0(也就是填充的token)排除掉
            # 前半段tf.reduce_sum(ce * nonpadding)是只做有真实token的ce的reduce_sum,
            # 后面那段的意思是计算所有真实token的个数，加一个1e-7是为了防止0的出现
            # 所以最后去求的是每个真实token的loss
            loss = tf.reduce_sum(ce * nonpadding) / (tf.reduce_sum(nonpadding) + 1e-7)
            print('.', end='')

        # encoder_var_list = encoder.trainable_variables
        # decoder_var_list = decoder.trainable_variables

        var_list = tape.watched_variables()
        gradients = tape.gradient(loss, var_list)
        optimizer.apply_gradients(zip(gradients, var_list))

    print(epoch, end='')
    if epoch % 2 == 0:
        print("took %ss" % (time.time() - stopwatch_tick))
        stopwatch_tick = time.time()

    if epoch != 0 and epoch % 50 == 0:

        # JM: save checkpoint
        ckpt_manager.save()
        # eval, here always take first sent as sample to do eval
        print("\n%s, eval start... " % datetime.datetime.now().isoformat(), end='')
        xs_eval_batch = (tf.convert_to_tensor(np.array(xs[0][0:1])),
                         tf.convert_to_tensor(np.array(xs[1][0:1])),
                         tf.convert_to_tensor(np.array(xs[2][0:1])))
        ys_eval_batch = (tf.convert_to_tensor(np.array(ys[0][0:1])),
                         tf.convert_to_tensor(np.array(ys[1][0:1])),
                         tf.convert_to_tensor(np.array(ys[2][0:1])),
                         tf.convert_to_tensor(np.array(ys[3][0:1])))
        pred = eval(xs_eval_batch, ys_eval_batch, encoder, decoder)
        pred_sent = "".join(str(pred.numpy(), encoding="utf8"))
        pred_sent = pred_sent.split("</s>")[0].strip()
        pred_sent = pred_sent.replace("▁", " ")  # remove bpe symbols
        print("\n%s, eval sent: %s" % (datetime.datetime.now().isoformat(), pred_sent))
        # JM: calc accuracy, currently we just calc for current batch, actually we should calc from scratch
        # hypotheses = y_hat.numpy().tolist()
        # _hypotheses = []
        # for h in hypotheses:
        #     sent = "".join(idx2token[idx] for idx in h)
        #     sent = sent.split("</s>")[0].strip()
        #     sent = sent.replace("▁", " ")  # remove bpe symbols
        #     _hypotheses.append(sent.strip())
        # print(_hypotheses[0])
        # print(_hypotheses[1])
        #
        # ys_batch_target = [str(x, encoding="utf8") for x in ys_batch[3].numpy().tolist()]
        # correct_predictions = tf.equal(_hypotheses, ys_batch_target)
        # accuracy = tf.reduce_mean(input_tensor=tf.cast(correct_predictions, "float"), name="accuracy")
        print("take %ss finish %s, epoch: %s, batch: %s, Train loss is : %f, Train accuracy is : %f" % (
            (time.time() - epoch_start_tick), datetime.datetime.now().isoformat(),
            epoch, batch_index, loss, 0.))
        epoch_start_tick = time.time()