import numpy as np
from numpy.random import randn


"""
在做深度学习的理解的时候，将模型各个环节的输出的维度梳理清楚，
能够前后相接的正常运行，那么就会对模型的运行过程有清晰的理解
我们在此标记一下各个向量的维度：
原始输入:                           sentence       -> (10,) # 10个token 
经过embedding层:                    embedding      -> (10, 20) # embedding有10个维度
经过positional embedding层:         pos_embedding  -> (10, 20) # positional embedding和embedding保持相同的维度
embedding+positional embedding:    X              -> (10, 20)
转换矩阵:                            W_q,W_k,W_v:   -> (20, 15) 
对X转换后的向量                       Q,K,V          -> (10, 15) 
attention计算后在线性变换               Z            -> (10, 20) # 在此处恢复了最初embedding的维度
残差                                 X = X + Z      -> (10, 20)
add and layer normalization          X             -> (10, 20)
全连接层                              X             -> (10, 20) -> (10, 40) -> (10, 20)
该encoder结束，将X传入下一个encoder


目前来看，由于这几年的代码积累，已经可以写出自己想实现的大部分程序。
"""


def gen_kvq(s, W_q, W_k, W_v):
    q = s.dot(W_q)
    k = s.dot(W_k)
    v = s.dot(W_v)
    return q,k,v


def softmax(X):
    """
    始终对numpy的axis的运算不是特别清楚
    """
    return np.exp(X) / np.sum(np.exp(X))


def get_embedding(sentence, dim):
    return np.array([randn(dim) for i in range(len(sentence))])


def get_pos_embedding(vecs):
    length, dim = vecs.shape
    pos = np.tile(np.arange(length).reshape(-1,1), (1, dim))
    print("pos.shape:", pos.shape)
    d_indice = np.arange(dim)
    d_mask_even = np.array([1 if i%2 == 0 else 0 for i in d_indice])
    d_mask_odd = np.array([1 if i%2 == 1 else 0 for i in d_indice])
    i_list = np.power(10000, d_indice/dim)
    pe = np.sin(pos / i_list) * d_mask_even + np.cos(pos / i_list) * d_mask_odd
    print("pe.shape:", pe.shape)
    return pe


def gat_layer_norm(X):
    eps = 1e-8
    std = np.sqrt(np.sum(np.power(X, 2), axis=1, keepdims=True) + eps)
    avg = np.mean(X, axis=1, keepdims=True)
    return (X - avg) / std


class Linear:

    def __init__(self, input_dim, output_dim):
        self.W = randn(input_dim, output_dim)
        self.b = randn(output_dim)

    def __call__(self, X):
        return X.dot(self.W) + self.b


def relu(X):
    mask = 1 * (X >= 0)
    return np.abs(X * mask)


class Encoder:

    def __init__(self, embedding_dim=20, qkv_dim=15):
        self.embedding_dim = embedding_dim
        self.qkv_dim = qkv_dim
        self.W_q = randn(embedding_dim, qkv_dim)
        self.W_k = randn(embedding_dim, qkv_dim)
        self.W_v = randn(embedding_dim, qkv_dim)
        self.W_z = randn(qkv_dim, embedding_dim)
        self.linear_a = Linear(embedding_dim, 2 * embedding_dim)
        self.linear_b = Linear(2 * embedding_dim, embedding_dim)
        self.eps = 1e-8

    def gen_kvq(self, s):
        Q = s.dot(self.W_q)
        K = s.dot(self.W_k)
        V = s.dot(self.W_v)
        return Q, K, V

    def attention(self, X):
        Q, K, V = self.gen_kvq(X)
        Z_tmp1 = Q.dot(K.T) / np.sqrt(self.qkv_dim)
        # 以此缩小不相关词的权重
        Z_tmp2 = softmax(Z_tmp1).dot(V)
        # 线性层将Z_tmp2的维度转换到X相同
        Z = Z_tmp2.dot(self.W_z)
        return Z

    def add_and_norm(self, X, out):
        X = X + out
        std = np.sqrt(np.sum(np.power(X, 2), axis=1, keepdims=True) + self.eps)
        avg = np.mean(X, axis=1, keepdims=True)
        return (X - avg) / std

    def __call__(self, X):
        # 进行attention运算
        Z = self.attention(X)

        # 进行add和normalize
        X = self.add_and_norm(X, Z)

        # 全连接层，两个线性层
        X_a = relu(self.linear_a(X))
        X_b = self.linear_b(X_a)

        # 再进行add和normalize
        output = self.add_and_norm(X, X_b)
        return output


class Encoders:

    def __init__(self, encoder_num, embedding_dim=20, qkv_dim=15):
        self.encoders = [Encoder(embedding_dim, qkv_dim) for i in range(encoder_num)]

    def __call__(self, X):
        for i, encoder in enumerate(self.encoders):
            X = encoder(X)
        return X


class Decoder:

    def __init__(self, embedding_dim=20, qkv_dim=20):
        self.embedding_dim = embedding_dim
        self.qkv_dim = qkv_dim
        self.W_q = randn(embedding_dim, qkv_dim)
        self.W_k = randn(embedding_dim, qkv_dim)
        self.W_v = randn(embedding_dim, qkv_dim)
        self.W_z = randn(qkv_dim, embedding_dim)
        self.linear_a = Linear(embedding_dim, 2 * embedding_dim)
        self.linear_b = Linear(2 * embedding_dim, embedding_dim)
        self.eps = 1e-8
        self.embedding_dim = embedding_dim
        self.qkv_dim = qkv_dim

    def gen_kvq(self, s):
        Q = s.dot(self.W_q)
        K = s.dot(self.W_k)
        V = s.dot(self.W_v)
        return Q, K, V

    def get_masked(self, X):
        X = np.tril(X)
        X[X == 0] = -np.inf
        return X

    def attention_with_encoder(self, X, encoder_outputs):
        Q = X
        K = encoder_outputs
        V = encoder_outputs
        Z_tmp1 = Q.dot(K.T) / np.sqrt(self.qkv_dim)
        # 以此缩小不相关词的权重
        Z_tmp2 = softmax(Z_tmp1).dot(V)
        # 线性层将Z_tmp2的维度转换到X相同
        Z = Z_tmp2.dot(self.W_z)
        return Z

    def attention(self, X):
        Q, K, V = self.gen_kvq(X)
        Z_tmp1 = Q.dot(K.T) / np.sqrt(self.qkv_dim)
        Z_tmp1_masked = self.get_masked(Z_tmp1)
        # 以此缩小不相关词的权重
        Z_tmp2 = softmax(Z_tmp1_masked).dot(V)
        # 线性层将Z_tmp2的维度转换到X相同
        Z = Z_tmp2.dot(self.W_z)
        return Z

    def add_and_norm(self, X, out):
        X = X + out
        std = np.sqrt(np.sum(np.power(X, 2), axis=1, keepdims=True) + self.eps)
        avg = np.mean(X, axis=1, keepdims=True)
        return (X - avg) / std

    def __call__(self, encoder_inputs, decoder_inputs):
        X = decoder_inputs
        # 进行attention运算
        Z = self.attention(X)

        # 进行add和normalize
        X = self.add_and_norm(X, Z)

        # 与encoder的输出进行attention
        X = self.attention_with_encoder(X, encoder_inputs)

        # 全连接层，两个线性层
        X_a = relu(self.linear_a(X))
        X_b = self.linear_b(X_a)

        # 再进行add和normalize
        output = self.add_and_norm(X, X_b)
        return output


class Decoders:

    def __init__(self, decoder_num, embedding_dim=20, qkv_dim=20, vocab_dim=100):
        self.decoders = [Decoder(embedding_dim, qkv_dim) for i in range(decoder_num)]
        self.vocab_dim = vocab_dim
        self.linear = Linear(embedding_dim, vocab_dim)

    def __call__(self, encoder_inputs, decoder_inputs):
        for i, decoder in enumerate(self.decoders):
            decoder_inputs = decoder(encoder_inputs, decoder_inputs)
        outputs = self.linear(decoder_inputs)
        outputs = softmax(outputs)
        return outputs


def main():
    embedding_dim = 20
    qkv_dim = 15
    vocab_dim = 40
    # 开始前向传播过程
    sentence = "I will go to a supermarket to buy some fruits".split()
    # 将句子转换为词向量
    embedding = get_embedding(sentence, embedding_dim)

    # 得到位置嵌入
    pos_embedding = get_pos_embedding(embedding)

    # 对词嵌入和位置嵌入求和
    X = embedding + pos_embedding

    # 创建encoder组合
    encoder_num = 6
    encoders = Encoders(encoder_num, embedding_dim, qkv_dim)
    encoder_outputs = encoders(X)
    print("r.shape:", encoder_outputs.shape)

    # ----构建decoder----
    sentence_chn = "我 打算 去 超市 买 一些 水果".split()

    # 将句子转换为词向量
    decoder_embedding = get_embedding(sentence_chn, embedding_dim)

    # 得到位置嵌入
    decoder_pos_embedding = get_pos_embedding(decoder_embedding)

    # 对词嵌入和位置嵌入求和
    decoder_inputs = decoder_embedding + decoder_pos_embedding

    # 创建decoder组合
    decoder_num = 6
    decoders = Decoders(decoder_num, embedding_dim, qkv_dim=embedding_dim, vocab_dim=vocab_dim)
    decoder_output = decoders(encoder_outputs, decoder_inputs)
    print("decoder_output.shape:", decoder_output.shape)
    print("final:", decoder_output)


if __name__ == "__main__":
    main()





