import torch


class FeedForward(torch.nn.Module):
    def __init__(self, hidden_dim):
        super(FeedForward, self).__init__()
        self.fc_one = torch.nn.Linear(hidden_dim, hidden_dim // 2, bias=False)
        self.fc_two = torch.nn.Linear(hidden_dim // 2, hidden_dim, bias=False)
        self.gre = torch.nn.GELU()

    def forward(self, feed_x):
        feed_x = self.fc_one(feed_x)
        feed_x = self.gre(feed_x)
        feed_x = self.fc_two(feed_x)
        return feed_x


class Block(torch.nn.Module):
    def __init__(self, hidden_dim):
        super(Block, self).__init__()
        self.layer_nor_x = torch.nn.LayerNorm(hidden_dim)
        self.layer_nor_p = torch.nn.LayerNorm(hidden_dim)
        self.layer_a = torch.nn.Linear(2 * hidden_dim, hidden_dim, bias=True)
        self.layer_b = torch.nn.Linear(2 * hidden_dim, hidden_dim, bias=True)
        self.layer_c = torch.nn.Linear(2 * hidden_dim, hidden_dim, bias=True)
        self.layer_abc = torch.nn.Linear(2 * hidden_dim, hidden_dim, bias=True)


        self.layer_nor_a = torch.nn.LayerNorm(hidden_dim)
        self.layer_nor_b = torch.nn.LayerNorm(hidden_dim)
        self.layer_nor_c = torch.nn.LayerNorm(hidden_dim)
        self.mlp = FeedForward(hidden_dim)

    def forward(self, p, x):
        x = self.layer_nor_x(x)
        p = self.layer_nor_p(p)
        a = self.layer_a(torch.cat([x, p], -1))
        b = self.layer_b(torch.cat([x, p], -1))
        c = self.layer_c(torch.cat([x, p], -1))
        a = torch.cumsum(a, 1)
        b = torch.cumsum(b, 1)
        c = torch.cumsum(c, 1)
        a = self.layer_nor_a(a)
        b = self.layer_nor_b(b)
        c = self.layer_nor_c(c)
        #  加上max_pool1d效果好

        abc = torch.concat([a.view([x.shape[0], -1, 1]), b.view([x.shape[0], -1, 1]), c.view([x.shape[0], -1, 1])], -1)
        x = torch.concat([torch.nn.functional.max_pool1d(abc, 3).view(x.shape) ,x],-1)
        x = self.layer_nor_a(self.layer_abc(x))

        #  不加虽然前期低但是网上张
        x = self.layer_nor_b(self.mlp(x) + x)
        return x


class CvFo(torch.nn.Module):
    def __init__(self, voc_size, hidden_dim, row_layers):
        super(CvFo, self).__init__()
        self.em = torch.nn.Embedding(voc_size, hidden_dim)
        self.pos = torch.nn.Linear(hidden_dim, hidden_dim, bias=False)
        self.block = torch.nn.ModuleList([Block(hidden_dim) for _ in range(row_layers-row_layers//3)])
        self.q_star  = torch.nn.ModuleList([torch.nn.Linear(2 * hidden_dim, hidden_dim, bias=False) for _ in range(row_layers//3)])

        self.out_layer = torch.nn.Linear(hidden_dim, voc_size, bias=False)

    def forward(self, sx):
        sx = self.em(sx)
        q_star = sx
        psx = self.pos(sx)
        for block in self.block:
            sx = block(psx, sx)
        for  star in self.q_star:
            sx = star(torch.cat([sx, q_star], -1))

        out = self.out_layer(sx)
        return out


if __name__ == '__main__':
    net = CvFo(512, 412, 8)
    net(torch.randint(0, 123, [2, 15]))
