import paddle
import paddle.nn as nn



class FeedForward(nn.Layer):
    def __init__(self, hidden_dim):
        super(FeedForward, self).__init__()
        self.fc_one = nn.Linear(hidden_dim, hidden_dim // 2, bias_attr=False)
        self.fc_two = nn.Linear(hidden_dim // 2, hidden_dim, bias_attr=False)
        self.gelu = nn.GELU()

    def forward(self, feed_x):
        feed_x = self.fc_one(feed_x)
        feed_x = self.gelu(feed_x)
        feed_x = self.fc_two(feed_x)
        return feed_x


class Block(nn.Layer):
    def __init__(self, hidden_dim, block_size):
        super(Block, self).__init__()
        self.layer_nor_x = nn.LayerNorm(2*hidden_dim)

        self.layer_a = nn.Linear(2 * hidden_dim, hidden_dim, bias_attr=True)
        self.layer_b = nn.Linear(2 * hidden_dim, hidden_dim, bias_attr=True)
        self.layer_c = nn.Linear(2 * hidden_dim, hidden_dim, bias_attr=True)
        self.layer_abc =self.pooled_layer=nn.MaxPool1D(kernel_size=3)

        self.layer_nor_a = nn.LayerNorm(hidden_dim)
        self.layer_nor_b = nn.LayerNorm(hidden_dim)
        self.layer_nor_c = nn.LayerNorm(hidden_dim)
        self.mlp = FeedForward(hidden_dim)
        self.block_size = block_size


    def mask_data(self, a):
        mask = paddle.triu(paddle.ones(
            [a.shape[1] // self.block_size + self.block_size, a.shape[1] // self.block_size + self.block_size]))
        a = a.reshape([a.shape[0], self.block_size, -1, a.shape[-1]])

        a = a.transpose([0, 3, 2, 1]) @ mask[:self.block_size, :self.block_size]
        o = a @ mask[self.block_size - 1:self.block_size, :self.block_size].T
        o = o.squeeze(-1) @ mask[1:o.shape[2] + 1, :o.shape[2]]
        a = a + o.unsqueeze(-1)
        a = a.transpose([0, 3, 2, 1])
        a = a.reshape([a.shape[0], -1, a.shape[-1]])
        return a

    def forward(self,x):
        x = self.layer_nor_x(x)

        a = self.mask_data(x)
        b = self.mask_data(x)
        c = self.mask_data(x)
        a = self.layer_a(a)
        b = self.layer_b(b)
        c = self.layer_c(c)

        a = self.layer_nor_a(a)
        b = self.layer_nor_b(b)
        c = self.layer_nor_c(c)

        abc = paddle.concat([a, b, c], -1)


        x = self.layer_nor_a(self.layer_abc(abc))

        x = self.layer_nor_b(self.mlp(x) + x)
        return x


class OneBlock(nn.Layer):
    def __init__(self, hidden_dim,block_size=8):
        super(OneBlock, self).__init__()
        self.layer0 = paddle.nn.Linear(hidden_dim, hidden_dim)
        self.layer1 = paddle.nn.Linear(hidden_dim, hidden_dim)
        self.layer2 = paddle.nn.Linear(2*hidden_dim, hidden_dim)
        self.block_size = block_size
        self.layer_nor_x = nn.LayerNorm(hidden_dim)

    def forward(self, x):
        x0 = self.layer0(x)
        x1 = self.layer1(x0)
        x2 = self.layer2(paddle.concat([x1, x0], -1)) * paddle.nn.functional.sigmoid(x0)
        x2 = self.layer_nor_x(self.mask_data(x2))
        return x2
    def mask_data(self, a):
        mask = paddle.triu(paddle.ones(
            [a.shape[1] // self.block_size + self.block_size, a.shape[1] // self.block_size + self.block_size]))
        a = a.reshape([a.shape[0], self.block_size, -1, a.shape[-1]])

        a = a.transpose([0, 3, 2, 1]) @ mask[:self.block_size, :self.block_size]
        o = a @ mask[self.block_size - 1:self.block_size, :self.block_size].T
        o = o.squeeze(-1) @ mask[1:o.shape[2] + 1, :o.shape[2]]
        a = a + o.unsqueeze(-1)
        a = a.transpose([0, 3, 2, 1])
        a = a.reshape([a.shape[0], -1, a.shape[-1]])
        return a


class CvFo(nn.Layer):
    def __init__(self, voc_size, hidden_dim, row_layers, block_size):
        super(CvFo, self).__init__()
        self.em = nn.Embedding(voc_size, hidden_dim)
        # self.pos = nn.Linear(hidden_dim, hidden_dim, bias_attr=False)
        self.block = nn.LayerList([Block(hidden_dim, block_size) for _ in range(row_layers )])
        self.q_star = nn.LayerList(
            [nn.Linear(3 * hidden_dim, 2 * hidden_dim, bias_attr=False) for _ in range(row_layers )])

        self.out_layer = nn.Linear(2*hidden_dim, voc_size, bias_attr=False)

    def forward(self, sx):
        sx = self.em(sx)
        sx =sx.transpose([0,2,3,1]).reshape([sx.shape[0],sx.shape[2],-1])


        q_star = sx

        for block,star in zip(self.block, self.q_star):
            sx = block(sx)
            sx = star(paddle.concat([sx, q_star], -1))

        out = self.out_layer(sx)
        return out
class CvFoRank(nn.Layer):
    def __init__(self, voc_size, hidden_dim, row_layers, block_size):
        super(CvFoRank, self).__init__()
        self.em = nn.Embedding(voc_size, hidden_dim)
        self.block = nn.LayerList([OneBlock(hidden_dim,block_size) for _ in range(row_layers)])
        self.out_layer1=FeedForward(hidden_dim)
        self.pooled_layer=nn.MaxPool1D(kernel_size=row_layers)
        self.out_layer = nn.Linear(hidden_dim, voc_size, bias_attr=False)

    def forward(self, sx):
        sx = self.em(sx)
        block_list=[]
        for block in self.block:
            a = block(sx)
            block_list.append(a)
        sx=self.pooled_layer(paddle.concat(block_list,axis=-1))

        self.out_layer1(sx)

        out = self.out_layer(sx)
        return out

if __name__ == '__main__':
    net = CvFo(512, 412, 8, 8)
    net(paddle.randint(0, 123, [2, 90 * 8]))
    # net = CvFoRank(512, 412, 8, 8)
    # net(paddle.randint(0, 123, [2, 90 * 8]))

# 当前将字符串编码为n个数字与数值一一对应
# 但是数字在加法的时候会导致一个bug 那就是 3+3=6 但是2+4=6 这样模型就无法去识别了
# 这个bug可以表示为通公式就是 m+n=k (m-1)+(n-1)=k  只是两个序列就有k个问题。所以编码的时候要保证这个问题，不会出现。
# 假设拿到一个序列mn 要检测(m-x)(n-x) 是否存在存在就不能编码。
# 这样会消耗大量的资源。
# 能不能在一开始就将这个问题解决掉呢？
# 假设voc_size=10,先编码不能组到一起的字符串，给能组到一起的数字。
# 上面的bug同样出现在2个以上的数字上。
# 直接使用多套编码方可以解决。