import paddle
import numpy as np
from tqdm import tqdm


class FeedFroward(paddle.nn.Layer):
    def __init__(self, hidden_dim):
        super(FeedFroward, self).__init__()
        self.fc1 = paddle.nn.Linear(hidden_dim, hidden_dim // 2, bias_attr=False)
        self.fc2 = paddle.nn.Linear(hidden_dim // 2, hidden_dim, bias_attr=False)

    def forward(self, feed_x):
        feed_x = self.fc1(feed_x)
        # sin cos 一样 其次是silu 和tanh
        feed_x = paddle.cos(feed_x)
        feed_x = self.fc2(feed_x)
        return feed_x


class QKS(paddle.nn.Layer):
    def __init__(self, voc_size, hidden_dim, n_layers):
        super(QKS, self).__init__()

        self.emx = paddle.nn.Embedding(voc_size, hidden_dim)
        self.qlk_list = paddle.nn.LayerList(
            [ReLeft(hidden_dim) for _ in range(n_layers)])

        self.out_layer = paddle.nn.Linear(hidden_dim, voc_size)
        # 添加残差连接和层归一化
        self.norm1 = paddle.nn.LayerNorm(hidden_dim)

    def forward(self, x, l_state):
        x = self.emx(x)
        ll = x

        for l in self.qlk_list:
            ll, l_state = l(ll, l_state)
        outl = self.out_layer(ll)
        return outl, l_state[:, :, -1:]


class ReLeft(paddle.nn.Layer):
    def __init__(self, hidden_dim):
        super(ReLeft, self).__init__()

        self.emk = paddle.nn.Linear(hidden_dim, hidden_dim)
        self.emq = paddle.nn.Linear(hidden_dim, hidden_dim)
        # self.dr=paddle.nn.Dropout(0.2)
        self.qlk = FeedFroward(hidden_dim)

        # 添加残差连接和层归一化
        self.norm1 = paddle.nn.LayerNorm(hidden_dim)

    def forward(self, x, l_state):
        # 使用非线性激活函数

        q = self.emq(x)
        k = self.emk(x)

        seq_len = q.shape[1]

        mask = paddle.triu(paddle.ones([seq_len, seq_len])).T

        oll, l_state = self.l_forward(q, k, l_state, mask, seq_len)

        x = x + oll

        x = self.norm1(x)
        outl = self.qlk(x)
        # x 可以保持稳定
        outl = self.norm1(outl)

        return outl, l_state

    def l_forward(self, q, k, l_state, mask, seq_len):
        bsz = q.shape[0]
        k = (k.reshape([bsz, seq_len, -1, 1]) + paddle.zeros([seq_len])).transpose([0, 2, 1, 3])
        l_k = l_state + paddle.sum(k * mask.T, -2)
        l_state = l_k
        l_k = l_k.transpose([0, 2, 1])
        qlk = self.norm1(q + l_k)

        return qlk, l_state


def emheading_train_and_sample():
    print("*" * 100)

    # net = ReNetAll(12935, 256, 8)
    net = QKS(12935, 128, 16)

    # net.eval()
    x = paddle.to_tensor([
        np.random.randint(1, 11240, 5120),
        np.random.randint(1, 11240, 5120),
    ], dtype='int64')

    # 模拟训练

    loss_f = paddle.nn.CrossEntropyLoss()

    opt = paddle.optimizer.Adam(parameters=net.parameters(), learning_rate=0.0003)
    bar = tqdm(range(1260))
    for epoch in bar:
        state_l = paddle.zeros([1])

        for i in range(0, x.shape[-1], 256):
            j = i + 256

            outl, state_l = net(x[:, i + 1:j - 1], paddle.to_tensor(state_l.numpy()))
            loss = loss_f(outl, x[:, i + 2:j])

            bar.set_description("epoch---{}--loss--{:.5f}".format(epoch, loss.item()))

            loss.backward()
        opt.step()
        opt.clear_grad()
    # 解码，验证
    net.eval()
    state_l = paddle.zeros([1])

    for i in range(0, x.shape[-1], 256):
        j = i + 256

        out0, out1, state_l = net(x[:, i:j - 1], x[:, i + 1:j], paddle.to_tensor(state_l.numpy()))


# 进行模型训练和预测
if __name__ == '__main__':
    emheading_train_and_sample()
