# coding=utf-8
import time
import datetime
import numpy as np
import tensorflow as tf
from data_load import load_data, get_full_data, load_vocab
tf.enable_eager_execution()


fpath1 = "../transformer/iwslt2016/segmented/train.de.bpe"
fpath2 = "../transformer/iwslt2016/segmented/train.en.bpe"
vocab_fpath = "../transformer/iwslt2016/segmented/bpe.vocab"
maxlen1 = 100
maxlen2 = 100
vocab_size = 32000
d_ff = 2048
d_model = 512
num_blocks = 6

num_heads = 8
dropout_rate = 0.3

# xs, ys = xs_lst[0], ys_lst[0]
# xs (100, 3)
# ys (100, 4)
# aaa = np.array(xs_lst)
# xs, ys = tf.convert_to_tensor(np.array(xs_lst)), tf.convert_to_tensor(np.array(ys_lst))

num_units = d_model
# embeddings (32000, 512)
embeddings = tf.get_variable('weight_mat',
                                   dtype=tf.float32,
                                   shape=(vocab_size, num_units),
                                   initializer=tf.contrib.layers.xavier_initializer())

# ff_dense_layer1 = tf.layers.Dense(num_units[0], name="ff_dense_layer1", activation=tf.nn.relu)
# ff_dense_layer2 = tf.layers.Dense(num_units[1], name="ff_dense_layer2")
#
# mul_attention_dense_layerQ = tf.layers.Dense(d_model, name="mul_attention_dense_layerQ", use_bias=True)
# mul_attention_dense_layerK = tf.layers.Dense(d_model, name="mul_attention_dense_layerK", use_bias=True)
# mul_attention_dense_layerV = tf.layers.Dense(d_model, name="mul_attention_dense_layerV", use_bias=True)

def ff(inputs, num_units, scope="positionwise_feedforward"):
    '''position-wise feed forward net. See 3.3

    inputs: A 3d tensor with shape of [N, T, C].
    num_units: A list of two integers.
    scope: Optional scope for `variable_scope`.

    Returns:
      A 3d tensor with the same shape and dtype as inputs
    '''
    # Inner layer
    # layer = Dense(units,
    #               activation=activation,
    #               use_bias=use_bias,
    #               kernel_initializer=kernel_initializer,
    #               bias_initializer=bias_initializer,
    #               kernel_regularizer=kernel_regularizer,
    #               bias_regularizer=bias_regularizer,
    #               activity_regularizer=activity_regularizer,
    #               kernel_constraint=kernel_constraint,
    #               bias_constraint=bias_constraint,
    #               trainable=trainable,
    #               name=name,
    #               _scope=name,
    #               _reuse=reuse)
    # return layer.apply(inputs)
    layer1 = tf.layers.Dense(num_units[0], name="ff1", activation=tf.nn.relu)
    outputs = layer1.apply(inputs)

    layer2 = tf.layers.Dense(num_units[1], name="ff2")
    outputs = layer2.apply(outputs)
    # outputs = tf.layers.dense(inputs, num_units[0], name="ff1", activation=tf.nn.relu)
    # with tf.variable_scope('ff1', reuse=True):
    #     w = tf.get_variable("kernel")
    #     print(w)
        # outputs = tf.layers.dense(inputs, num_units[0], name="ff2", activation=tf.nn.relu)
    # Outer layer
    # outputs = tf.layers.dense(outputs, num_units[1], name="ff2")
    # Residual connection
    outputs += inputs

    # # Normalize
    # outputs = ln(outputs)

    return outputs

def mask(inputs, key_masks=None, type=None):
    """Masks paddings on keys or queries to inputs
    inputs: 3d tensor. (h*N, T_q, T_k)
    key_masks: 3d tensor. (N, 1, T_k)
    type: string. "key" | "future"

    e.g.,
    >> inputs = tf.zeros([2, 2, 3], dtype=tf.float32)
    >> key_masks = tf.constant([[0., 0., 1.],
                                [0., 1., 1.]])
    >> mask(inputs, key_masks=key_masks, type="key")
    array([[[ 0.0000000e+00,  0.0000000e+00, -4.2949673e+09],
        [ 0.0000000e+00,  0.0000000e+00, -4.2949673e+09]],

       [[ 0.0000000e+00, -4.2949673e+09, -4.2949673e+09],
        [ 0.0000000e+00, -4.2949673e+09, -4.2949673e+09]],

       [[ 0.0000000e+00,  0.0000000e+00, -4.2949673e+09],
        [ 0.0000000e+00,  0.0000000e+00, -4.2949673e+09]],

       [[ 0.0000000e+00, -4.2949673e+09, -4.2949673e+09],
        [ 0.0000000e+00, -4.2949673e+09, -4.2949673e+09]]], dtype=float32)
    """
    # 很小的负数，在softmax之前mask
    padding_num = -2 ** 32 + 1
    if type in ("k", "key", "keys"):
        # Generate masks:判断是否是0向量，只针对0向量进行mask 注释来源：https://blog.csdn.net/weixin_40901056/article/details/97514718
        key_masks = tf.to_float(key_masks)
        key_masks = tf.tile(key_masks, [tf.shape(inputs)[0] // tf.shape(key_masks)[0], 1]) # (h*N, seqlen)
        # JM: 所以这里搞这么一顿操作，最终目的就是生成一个(800, 57)的矩阵，和input的(800, 57)对应，因为如果input的句子长度不满，会用0补齐，
        # 所以这里的masks就是句子前面的生成0，补0的则生成1，这样就避免了0参与softmax计算
        key_masks = tf.expand_dims(key_masks, 1)  # (h*N, 1, seqlen)  JM: 变为 (800, 1, 57)
        outputs = inputs + key_masks * padding_num  # (800, 57, 57) + (800, 1, 57) = (800, 57, 57) JM: 后面为1的全变成了一个很小的负数 (-2 ** 32 +1)
    # elif type in ("q", "query", "queries"):
    #     # Generate masks
    #     masks = tf.sign(tf.reduce_sum(tf.abs(queries), axis=-1))  # (N, T_q)
    #     masks = tf.expand_dims(masks, -1)  # (N, T_q, 1)
    #     masks = tf.tile(masks, [1, 1, tf.shape(keys)[1]])  # (N, T_q, T_k)
    #
    #     # Apply masks to inputs
    #     outputs = inputs*masks
    elif type in ("f", "future", "right"):
        diag_vals = tf.ones_like(inputs[0, :, :])  # (T_q, T_k)
        tril = tf.linalg.LinearOperatorLowerTriangular(diag_vals).to_dense()  # (T_q, T_k)
        future_masks = tf.tile(tf.expand_dims(tril, 0), [tf.shape(inputs)[0], 1, 1])  # (N, T_q, T_k)

        paddings = tf.ones_like(future_masks) * padding_num
        outputs = tf.where(tf.equal(future_masks, 0), paddings, inputs)
    else:
        print("Check if you entered type correctly!")

    return outputs

def label_smoothing(inputs, epsilon=0.1):
    V = inputs.get_shape().as_list()[-1]  # number of channels
    return ((1 - epsilon) * inputs) + (epsilon / V)

def scaled_dot_product_attention(Q, K, V, key_masks,
                                 causality=False, dropout_rate=0.,
                                 training=True,
                                 scope="scaled_dot_product_attention"):
    d_k = Q.get_shape().as_list()[-1]
    # dot product
    # 论文计算query和key相似度使用了dot-product attention，即query和key进行点乘（内积）来计算相似度。
    # JM: 所以这里主要是做attention的第1步，计算query和key的相似度，至于为什么内积能代表相似度，就是纯数学问题了
    outputs = tf.matmul(Q, tf.transpose(K, [0, 2, 1]))  # (N, T_q, T_k) # JM: (800, 57, 64) * (800, 64, 57) = (800, 57, 57)
    # scale
    outputs /= d_k ** 0.5
    # key masking  # JM: 先将Q，K乘起来再做mask，所以不需要再分query的mask和key的mask了
    outputs = mask(outputs, key_masks=key_masks, type="key")
    # causality or future blinding masking
    if causality:
        outputs = mask(outputs, type="future")
    # softmax
    # JM: 第2步是做softmax
    outputs = tf.nn.softmax(outputs)
    # attention = tf.transpose(outputs, [0, 2, 1])
    # tf.summary.image("attention", tf.expand_dims(attention[:1], -1))
    # # query masking
    # outputs = mask(outputs, Q, K, type="query")

    # dropout
    outputs = tf.layers.dropout(outputs, rate=dropout_rate, training=training)
    # weighted sum (context vectors)
    # JM: 第3步是加权求和
    outputs = tf.matmul(outputs, V)  # (N, T_q, d_v)
    return outputs

def positional_encoding(inputs,
                        maxlen,
                        masking=True,
                        scope="positional_encoding"):
    '''Sinusoidal Positional_Encoding. See 3.5
    inputs: 3d tensor. (N, T, E)
    maxlen: scalar. Must be >= T
    masking: Boolean. If True, padding positions are set to zeros.
    scope: Optional scope for `variable_scope`.

    returns
    3d tensor that has the same shape as inputs.
    '''

    E = inputs.get_shape().as_list()[-1] # static E: 512
    N, T = tf.shape(inputs)[0], tf.shape(inputs)[1] # dynamic N: 100, T: 57
    with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
        # position indices
        position_ind = tf.tile(tf.expand_dims(tf.range(T), 0), [N, 1]) # (N, T) => (100, 57) JM: 这个就是单纯的index号, 例如1个样本[0,1,2,3,4,...,T]这样

        # First part of the PE function: sin and cos argument
        """
        ref: https://www.zhihu.com/question/279523792
        在偶数位置，使用正弦编码，在奇数位置，使用余弦编码。
        这个编码公式的意思就是：给定词语的位置pos，我们可以把它编码成dmodel维的向量！
        也就是说，位置编码的每一个维度对应正弦曲线，波长构成了从2π到10000∗2π的等比序列。
        上面的位置编码是绝对位置编码。但是词语的相对位置也非常重要。这就是论文为什么要使用三角函数的原因！
        正弦函数能够表达相对位置信息，主要数学依据是以下两个公式：
        sin(α+β)=sinαcosβ+cosαsinβ
        cos(α+β)=cosαcosβ−sinαsinβ
        上面的公式说明，对于词汇之间的位置偏移k，PE(pos+k)可以表示成PE(pos)和PE(k)的组合形式，这就是表达相对位置的能力！
        以上就是PEPE的所有秘密。
        """
        # JM: 所以第一步表达的是绝对位置
        position_enc = np.array([
            [pos / np.power(10000, (i-i%2)/E) for i in range(E)]  # JM: 一个单词有512个特征，为每个特征生成sin和con参数
            for pos in range(maxlen)])  # (N, E) => (100, 512)  # JM: 这个后面需要去embedding寻找，所以和W之前的形状一样

        # Second part, apply the cosine to even columns and sin to odds.
        # JM: 第二步用sin, cos表达相对位置
        position_enc[:, 0::2] = np.sin(position_enc[:, 0::2])  # dim 2i
        position_enc[:, 1::2] = np.cos(position_enc[:, 1::2])  # dim 2i+1
        position_enc = tf.convert_to_tensor(position_enc, tf.float32) # (maxlen, E)

        # lookup
        outputs = tf.nn.embedding_lookup(position_enc, position_ind)  # JM: 单看一个样本，有57个单词，每个单词需要512个数值去描述，所以lookup出来的形状是(100, 57, 512)

        # masks
        if masking:
            outputs = tf.where(tf.equal(inputs, 0), inputs, outputs)

        # JM:　所以这里的outputs和inputs进来的数值无关，仅仅是新生成了和inputs形状一样的位置信息编码(表示位置重要性)，返回出去，后面和inputs相加即所谓的进行位置编码了
        # 当然为什么用sin, cos来编码，自己猜测的原因是单词在句子中位置的重要性符合sin和cos分布
        return tf.to_float(outputs)

def multihead_attention(queries, keys, values, key_masks,
                        num_heads=8,
                        dropout_rate=0,
                        training=True,
                        causality=False,
                        scope="multihead_attention"):
    d_model = queries.get_shape().as_list()[-1]
    # Linear projections
    # （1）将Q,K,V输入到8个Self-Attention中，得到8个加权后的矩阵Zi
    # （2）将8个Zi拼接成一个大的特征矩阵（按列拼接）
    # （3）经过一层全连接得到输出Z
    """
    Expand：实际上是经过线性变换，生成Q、K、V三个向量；
    Split heads: 进行分头操作，在原文中将原来每个位置512维度分成8个head，每个head维度变为64；
    Self Attention：对每个head进行Self Attention，具体过程和第一部分介绍的一致；
    Concat heads：对进行完Self Attention每个head进行拼接；
    """
    # Q = mul_attention_dense_layerQ.apply(queries)  # (N, T_q, d_model) (100, 57, 512)
    # K = mul_attention_dense_layerK.apply(keys)  # (N, T_k, d_model)
    # V = mul_attention_dense_layerV.apply(values)  # (N, T_k, d_model)

    Q = tf.layers.dense(queries, d_model, use_bias=True)  # (N, T_q, d_model) (100, 57, 512)
    K = tf.layers.dense(keys, d_model, use_bias=True)  # (N, T_k, d_model)
    V = tf.layers.dense(values, d_model, use_bias=True)  # (N, T_k, d_model)
    # Split and concat # JM: 所以这里仅仅是分8头，样本数量乘以8，d_model除以8，还未实际开始计算attention
    Q_ = tf.concat(tf.split(Q, num_heads, axis=2), axis=0)  # (h*N, T_q, d_model/h) (800, 57, 64) JM: 所以h是头数
    K_ = tf.concat(tf.split(K, num_heads, axis=2), axis=0)  # (h*N, T_k, d_model/h)
    V_ = tf.concat(tf.split(V, num_heads, axis=2), axis=0)  # (h*N, T_k, d_model/h)
    # Attention
    outputs = scaled_dot_product_attention(Q_, K_, V_, key_masks, causality, dropout_rate, training)

    # Restore shape
    outputs = tf.concat(tf.split(outputs, num_heads, axis=0), axis=2)  # (N, T_q, d_model)

    # Residual connection
    outputs += queries

    # Normalize
    # outputs = ln(outputs)
    return outputs

def encode(xs, training=True):
    x, seqlens, sents1 = xs
    src_masks = tf.math.equal(x, 0)  # (N, T1)

    # embedding
    enc = tf.nn.embedding_lookup(embeddings, x)  # (N, T1, d_model)
    enc = enc * (d_model ** 0.5)  # JM: 这个scale是什么用？ d_model=512的话，等于enc向量点积了越22

    enc += positional_encoding(enc, maxlen1)
    enc = tf.layers.dropout(enc, dropout_rate, training=training)

    ## Blocks
    for i in range(num_blocks):
        enc = multihead_attention(queries=enc,
                                  keys=enc,
                                  values=enc,
                                  key_masks=src_masks,
                                  num_heads=num_heads,
                                  dropout_rate=dropout_rate,
                                  training=training,
                                  causality=False)

        enc = ff(enc, num_units=[d_ff, d_model])

    memory = enc
    return memory, sents1, src_masks

def decode(ys, memory, src_masks, training=True):
    decoder_inputs, y, seqlens, sents2 = ys
    tgt_masks = tf.math.equal(decoder_inputs, 0)  # (N, T2)

    # embedding
    dec = tf.nn.embedding_lookup(embeddings, decoder_inputs)  # (N, T2, d_model)
    dec = dec * (d_model ** 0.5)  # scale

    dec += positional_encoding(dec, maxlen2)
    dec = tf.layers.dropout(dec, dropout_rate, training=training)

    for i in range(num_blocks):
    # JM: 这一层是自注意力层，所以QKV都是自己
        dec = multihead_attention(queries=dec,
                                  keys=dec,
                                  values=dec,
                                  key_masks=tgt_masks,
                                  num_heads=num_heads,
                                  dropout_rate=dropout_rate,
                                  training=training,
                                  causality=True, # JM: 没注意到，这里使用了future mask，此时的自注意力应该只知道部分前部分信息，后面的要遮蔽掉, GPT正是利用此处原理
                                  scope="self_attention")
        # Vanilla attention
        # JM: 而这一层是标准注意力层，是decoder对encoder的映射，所以Q是decoder，KV是encoder
        dec = multihead_attention(queries=dec,
                                  keys=memory,
                                  values=memory,
                                  key_masks=src_masks,
                                  num_heads=num_heads,
                                  dropout_rate=dropout_rate,
                                  training=training,
                                  causality=False,
                                  scope="vanilla_attention")
        dec = ff(dec, num_units=[d_ff, d_model])

    weights = tf.transpose(embeddings)  # (d_model, vocab_size)
    """
    Multi-Head Attention的输出维度大小为（N，T2，d_model），而weights的维度大小为（d_model，vocab_size）。
    所以这里用了tf.einsum()函数，这里该函数的第一个参数：'ntd,dk->ntk’表示的意思是，->代表乘法操作，ntd,dk->ntk表示两个矩阵相乘后结果的维度为ntk。
    这样就实现了两个维度不同矩阵的乘法。
————————————————
版权声明：本文为CSDN博主「JermeryBesian」的原创文章，遵循 CC 4.0 BY-SA 版权协议，转载请附上原文出处链接及本声明。
原文链接：https://blog.csdn.net/Urbanears/article/details/98662838
    """
    logits = tf.einsum('ntd,dk->ntk', dec, weights)  # (N, T2, vocab_size)，矩阵相乘，消除d_model维度
    y_hat = tf.to_int32(tf.argmax(logits, axis=-1))  # JM: 看最后一维(第3维)，也就是32000个词组中挑一个最大值，把他的index返回，所以这里就意味着在vocab中选中了哪个单词

    return logits, y_hat, y, sents2

xs, ys, token2idx = get_full_data(fpath1, fpath2, maxlen1, maxlen2, vocab_fpath)
optimizer = tf.train.AdamOptimizer(0.0001)

batch_size = 50
batch_num = int(((len(xs[0]) - 1)/batch_size) + 1)
epoch_start_tick = time.time()
for epoch in range(0, 1000):
    for batch_index in range(batch_num):
        start_index = batch_index * batch_size
        end_index = min((batch_index + 1) * batch_size, len(xs[0]))
        xs_batch = (tf.convert_to_tensor(np.array(xs[0][start_index:end_index])), tf.convert_to_tensor(np.array(xs[1][start_index:end_index])), tf.convert_to_tensor(np.array(xs[2][start_index:end_index])))
        ys_batch = (tf.convert_to_tensor(np.array(ys[0][start_index:end_index])), tf.convert_to_tensor(np.array(ys[1][start_index:end_index])), tf.convert_to_tensor(np.array(ys[2][start_index:end_index])),
              tf.convert_to_tensor(np.array(ys[3][start_index:end_index])))

        with tf.GradientTape() as tape:
            memory, sents1, src_masks = encode(xs_batch)
            logits, preds, y, sents2 = decode(ys_batch, memory, src_masks)

            # train scheme
            y_ = label_smoothing(tf.one_hot(y, depth=vocab_size))
            ce = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=y_)
            nonpadding = tf.to_float(tf.not_equal(y, token2idx["<pad>"]))  # 0: <pad> # 反转padding的功能，一个句子中真实包含token的位置标为1
            # loss = tf.reduce_sum(ce)
            # JM: 这个loss的算法应该是有数学依据的...所以这里的逻辑应该是先把是0(也就是填充的token)排除掉
            # 前半段tf.reduce_sum(ce * nonpadding)是只做有真实token的ce的reduce_sum,
            # 后面那段的意思是计算所有真实token的个数，加一个1e-7是为了防止0的出现
            # 所以最后去求的是每个真实token的loss
            loss = tf.reduce_sum(ce * nonpadding) / (tf.reduce_sum(nonpadding) + 1e-7)

        var_list = tape.watched_variables()
        gradients = tape.gradient(loss, var_list)
        optimizer.apply_gradients(zip(gradients, var_list))

        # xs = (x[0][start_index:end_index], x[1][start_index:end_index], x[2][start_index:end_index] for x in xs)
        # ys = (y[0][start_index:end_index], y[1][start_index:end_index], y[2][start_index:end_index], y[3][start_index:end_index] for y in ys)

        print(".", end='')
    if epoch % 2 == 0:
        print("take %ss finish %s, epoch: %s, batch: %s, Train loss is : %f, Train accuracy is : %s" % (
        (time.time() - epoch_start_tick), datetime.datetime.now().isoformat(),
        epoch, batch_index, loss, "unknown"))

# optimizer.minimize(loss)
# train, need add big loop for batch & epoch
# with tf.GradientTape() as tape:
#     memory, sents1, src_masks = encode(xs)
#     logits, preds, y, sents2 = decode(ys, memory, src_masks)
#
#     # train scheme
#     y_ = label_smoothing(tf.one_hot(y, depth=vocab_size))
#     ce = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=y_)
#     loss = tf.reduce_sum(ce)
#
# var_list = tape.watched_variables()
# gradients = tape.gradient(loss, var_list)
# optimizer.apply_gradients(zip(gradients, var_list))

# eval
decoder_inputs, y, y_seqlen, sents2 = ys  # JM: y是用来算loss的，算是真实样本，decoder_inputs才是真的输入，并且decoder_inputs其实不是外部输入，是自己生成的
decoder_inputs = tf.ones((tf.shape(xs[0])[0], 1), tf.int32) * token2idx["<s>"]
ys = (decoder_inputs, y, y_seqlen, sents2)

memory, sents1, src_masks = encode(xs, False)
# JM: 这里不明白为什么不能讲target整句直接输入，非要一个单词一个单词输入
# 能想到的第一个理由是，需要后一个单词的概率需要前一个的输出作为联想，那么这个和RNN一样是个序列
# 优势是训练时不需要依赖时序，所以训练效率较高
for _ in range(maxlen2):
    logits, y_hat, y, sents2 = decode(ys, memory, src_masks, False)
    if tf.reduce_sum(y_hat, 1) == token2idx["<pad>"]: break  # JM: 这句不明白什么意思。明白了，判断到断句，走下一个句子

    _decoder_inputs = tf.concat((decoder_inputs, y_hat), 1)
    ys = (_decoder_inputs, y, y_seqlen, sents2)
# x = tf.Variable(tf.zeros([20, 50]))  # mimic 20 sentences, with max length 50 words
# # x_t = tf.transpose(x)
# decoder_inputs = tf.ones((tf.shape(x)[0], 1), tf.int32) * 2 # self.token2idx["<s>"]  # mimic <s> stand for id=2
# print("done")