import paddle
import paddle.nn as nn


class FeedForward(nn.Layer):
    def __init__(self, hidden_dim):
        super(FeedForward, self).__init__()
        self.fc_one = nn.Linear(hidden_dim, hidden_dim // 2, bias_attr=False)
        self.fc_two = nn.Linear(hidden_dim // 2, hidden_dim, bias_attr=False)
        self.gelu = nn.GELU()

    def forward(self, feed_x):
        feed_x = self.fc_one(feed_x)
        feed_x = self.gelu(feed_x)
        feed_x = self.fc_two(feed_x)
        return feed_x


class Block(nn.Layer):
    def __init__(self, hidden_dim, block_size):
        super(Block, self).__init__()
        self.layer_nor_x = nn.LayerNorm(2 * hidden_dim)

        self.layer_a = nn.Linear(2 * hidden_dim, hidden_dim, bias_attr=True)
        self.layer_b = nn.Linear(2 * hidden_dim, hidden_dim, bias_attr=True)
        self.layer_c = nn.Linear(2 * hidden_dim, hidden_dim, bias_attr=True)
        self.layer_abc = self.pooled_layer = nn.MaxPool1D(kernel_size=3)

        self.layer_nor_a = nn.LayerNorm(hidden_dim)
        self.layer_nor_b = nn.LayerNorm(hidden_dim)
        self.layer_nor_c = nn.LayerNorm(hidden_dim)
        self.mlp = FeedForward(hidden_dim)
        self.block_size = block_size

    def mask_data(self, a):
        mask = paddle.triu(paddle.ones(
            [a.shape[1] // self.block_size + self.block_size, a.shape[1] // self.block_size + self.block_size]))
        a = a.reshape([a.shape[0], self.block_size, -1, a.shape[-1]])

        a = a.transpose([0, 3, 2, 1]) @ mask[:self.block_size, :self.block_size]
        o = a @ mask[self.block_size - 1:self.block_size, :self.block_size].T
        o = o.squeeze(-1) @ mask[1:o.shape[2] + 1, :o.shape[2]]
        a = a + o.unsqueeze(-1)
        a = a.transpose([0, 3, 2, 1])
        a = a.reshape([a.shape[0], -1, a.shape[-1]])
        return a

    def forward(self, x):
        x = self.layer_nor_x(x)

        a = self.mask_data(x)
        b = self.mask_data(x)
        c = self.mask_data(x)
        a = self.layer_a(a)
        b = self.layer_b(b)
        c = self.layer_c(c)

        a = self.layer_nor_a(a)
        b = self.layer_nor_b(b)
        c = self.layer_nor_c(c)

        abc = paddle.concat([a, b, c], -1)

        x = self.layer_nor_a(self.layer_abc(abc))

        x = self.layer_nor_b(self.mlp(x) + x)
        return x


class OneBlock(nn.Layer):
    def __init__(self, hidden_dim, block_size=8):
        super(OneBlock, self).__init__()
        self.layer0 = paddle.nn.Linear(hidden_dim, hidden_dim)
        self.layer1 = paddle.nn.Linear(hidden_dim, hidden_dim)
        self.layer2 = paddle.nn.Linear(2 * hidden_dim, hidden_dim)
        self.block_size = block_size
        self.layer_nor_x = nn.LayerNorm(hidden_dim)

    def forward(self, x):
        x0 = self.layer0(x)
        x1 = self.layer1(x0)
        x2 = self.layer2(paddle.concat([x1, x0], -1)) * paddle.nn.functional.sigmoid(x0)
        x2 = self.layer_nor_x(self.mask_data(x2))
        return x2

    def mask_data(self, a):
        mask = paddle.triu(paddle.ones(
            [a.shape[1] // self.block_size + self.block_size, a.shape[1] // self.block_size + self.block_size]))
        a = a.reshape([a.shape[0], self.block_size, -1, a.shape[-1]])

        a = a.transpose([0, 3, 2, 1]) @ mask[:self.block_size, :self.block_size]
        o = a @ mask[self.block_size - 1:self.block_size, :self.block_size].T
        o = o.squeeze(-1) @ mask[1:o.shape[2] + 1, :o.shape[2]]
        a = a + o.unsqueeze(-1)
        a = a.transpose([0, 3, 2, 1])
        a = a.reshape([a.shape[0], -1, a.shape[-1]])
        return a


class CvFo(nn.Layer):
    def __init__(self, voc_size, hidden_dim, row_layers, block_size):
        super(CvFo, self).__init__()
        self.em = nn.Embedding(voc_size, hidden_dim)
        self.block = nn.LayerList([Block(hidden_dim, block_size) for _ in range(row_layers)])
        self.q_star = nn.LayerList(
            [nn.Linear(3 * hidden_dim, 2 * hidden_dim, bias_attr=False) for _ in range(row_layers)])
        self.out_layer = nn.Linear(2 * hidden_dim, voc_size, bias_attr=False)

        self.t_layer = FeedForward(2 * hidden_dim)
        self.s_layer = FeedForward(2 * hidden_dim)

        self.star_label = paddle.nn.Linear(1, 2*hidden_dim, bias_attr=False)
        self.to_s0 = FeedForward(2 * hidden_dim)
        self.to_s1 = FeedForward(2 * hidden_dim)
        self.to_s3 = FeedForward(2 * hidden_dim)


    def forward(self, sx, t):
        sx = self.em(sx)
        t = self.em(t)

        sx = sx.transpose([0, 2, 3, 1]).reshape([sx.shape[0], sx.shape[2], -1])
        t = t.transpose([0, 2, 3, 1]).reshape([t.shape[0], t.shape[2], -1])
        q_star = sx
        for block, star in zip(self.block, self.q_star):
            sx = block(sx)
            sx = star(paddle.concat([sx, q_star], -1))
        star = sx[:, -1:]
        star_label=paddle.concat([star]*t.shape[1],1)
        star = self.star_label(star.transpose([0, 2, 1]))
        t = self.t_layer(t)
        # sx = paddle.sin(sx * t)
        # 训练的是 abc t0,1,2 输入到模型中
        # * 化  sx *a *b *c   预测  *C *C *C  一个loss
        # 构造的参数如下：

        # -- *a -- *b -- *c  * * *  loss
        # t0,t1,t2   * * *   b c d  loss
        # cat(*,t)-label-loss       训练模型

        sx1 = paddle.sin(t @ self.to_s0(star))
        sx1 = paddle.sin(t @ self.to_s1(star) + sx1)
        sx1 = paddle.sin(t @ self.to_s3(star) + sx1)
        out = self.out_layer(sx1)
        return out, sx, star_label

    def sample(self, sx, t):
        sx = self.em(sx)
        t = self.em(t)

        sx = sx.transpose([0, 2, 3, 1]).reshape([sx.shape[0], sx.shape[2], -1])
        t = t.transpose([0, 2, 3, 1]).reshape([t.shape[0], t.shape[2], -1])
        q_star = sx
        for block, star in zip(self.block, self.q_star):
            sx = block(sx)
            sx = star(paddle.concat([sx, q_star], -1))
        star = sx[:, -1:]
        # star_label = paddle.concat([star] * t.shape[1], 1)
        star = self.star_label(star.transpose([0, 2, 1]))
        t = self.t_layer(t)
        # sx = paddle.sin(sx * t)
        # 训练的是 abc t0,1,2 输入到模型中
        # * 化  sx *a *b *c   预测  *C *C *C  一个loss
        # 构造的参数如下：

        # -- *a -- *b -- *c  * * *  loss
        # t0,t1,t2   * * *   b c d  loss
        # cat(*,t)-label-loss       训练模型

        sx1 = paddle.sin(t @ self.to_s0(star))
        sx1 = paddle.sin(t @ self.to_s1(star) + sx1)
        sx1 = paddle.sin(t @ self.to_s3(star) + sx1)
        out = self.out_layer(sx1)
        return out


if __name__ == '__main__':
    net = CvFo(512, 412, 8, 8)
    net.t = paddle.randint(0, 123, [1, 90 * 8])
    net(paddle.randint(0, 123, [2, 2, 90 * 8]))
