import torch


class FeedForward(torch.nn.Module):
    def __init__(self, hidden_dim):
        super(FeedForward, self).__init__()
        self.fc_one = torch.nn.Linear(hidden_dim, hidden_dim // 2, bias=False)
        self.fc_two = torch.nn.Linear(hidden_dim // 2, hidden_dim, bias=False)
        self.gre = torch.nn.GELU()

    def forward(self, feed_x):
        feed_x = self.fc_one(feed_x)
        feed_x = self.gre(feed_x)
        feed_x = self.fc_two(feed_x)
        return feed_x


class Block(torch.nn.Module):
    def __init__(self, hidden_dim):
        super(Block, self).__init__()
        self.layer_nor_x = torch.nn.LayerNorm(hidden_dim)
        self.layer_nor_p = torch.nn.LayerNorm(hidden_dim)
        self.layer_a = torch.nn.Linear(2 * hidden_dim, hidden_dim, bias=False)
        self.layer_b = torch.nn.Linear(2 * hidden_dim, hidden_dim, bias=False)
        self.layer_c = torch.nn.Linear(2 * hidden_dim, hidden_dim, bias=False)
        self.layer_nor_a = torch.nn.LayerNorm(hidden_dim)
        self.layer_nor_b = torch.nn.LayerNorm(hidden_dim)
        self.layer_nor_c = torch.nn.LayerNorm(hidden_dim)
        self.mlp = FeedForward(hidden_dim)

    def forward(self, p, x):
        x = self.layer_nor_x(x)
        p = self.layer_nor_p(p)
        a = self.layer_a(torch.cat([x, p], -1))
        b = self.layer_b(torch.cat([x, p], -1))
        c = self.layer_c(torch.cat([x, p], -1))
        a = torch.cumsum(a, 1)
        b = torch.cumsum(b, 1)
        c = torch.cumsum(c, 1)
        a = self.layer_nor_a(a)
        b = self.layer_nor_b(b)
        c = self.layer_nor_c(c)
        abc = torch.concat([a.view([x.shape[0], -1, 1]), b.view([x.shape[0], -1, 1]), c.view([x.shape[0], -1, 1])], -1)
        x = x + torch.nn.functional.max_pool1d(abc, 3).view(x.shape)
        x = self.mlp(x) + x
        return x


class CenterBlock(torch.nn.Module):
    def __init__(self, hidden_dim, row_layers):
        super(CenterBlock, self).__init__()
        self.pos = torch.nn.Linear(hidden_dim, hidden_dim, bias=False)
        self.block = torch.nn.ModuleList([Block(hidden_dim) for _ in range(row_layers)])

    def forward(self, sx):
        psx = self.pos(sx)
        for block in self.block:
            sx = block(psx, sx)
        return sx


class CvFo(torch.nn.Module):
    def __init__(self, voc_size, hidden_dim, row_layers, center_model_num, basic, center_model_list=(),
                 center_row_ly=()):
        super(CvFo, self).__init__()
        self.em = torch.nn.Embedding(voc_size, hidden_dim)
        if center_row_ly:
            self.center = torch.nn.ModuleList([CenterBlock(hidden_dim, row_ly) for row_ly in range(center_model_list)])
        else:
            self.center = torch.nn.ModuleList([CenterBlock(hidden_dim, row_layers) for _ in range(center_model_num)])
        self.out_layer = torch.nn.Linear(hidden_dim, voc_size, bias=False)
        self.basic = basic
        if basic == "differentiation":
            self.load_division(center_model_list)
            self.em.requires_grad_(False)
            self.out_layer.requires_grad_(False)
            for i in range(len(self.center) - 1):
                self.center[i].requires_grad_(False)
        elif basic == "division":
            self.load_egg()
            self.em.requires_grad_(False)
            self.out_layer.requires_grad_(False)

    def forward(self, sx):
        if len(self.center) != 1 and self.basic == "differentiation":

            sx = self.em(sx)
            sx_list = []
            for center in self.center[:-1]:
                one_sx = center(sx)
                one_sx = one_sx.view([sx.shape[0], -1, 1])
                sx_list.append(one_sx)
            differentiation = torch.concat(sx_list, -1)
            sx = sx + torch.nn.functional.max_pool1d(differentiation, len(self.center) - 1).view(sx.shape)
            sx = self.center[-1](sx)
            out = self.out_layer(sx)
        else:
            sx = self.em(sx)
            sx = self.center[0](sx)
            out = self.out_layer(sx)
        return out

    def load_division(self, center_list=()):
        self.em.load_state_dict(torch.load("division_em.torch"))
        self.out_layer.load_state_dict(torch.load("division_out_layer.torch"))
        for i, block in enumerate(self.center):
            if center_list:

                self.center[i].load_state_dict(torch.load(center_list[i]))
            else:
                self.center[i].load_state_dict(torch.load("division_{}.torch".format(i)))

    def load_egg(self):
        self.load_state_dict(torch.load("egg.torch"))

    def save_egg(self):
        torch.save(self.state_dict(), "egg.torch")

    def save_division(self, name):
        torch.save(self.em.state_dict(), "division_em.torch")
        torch.save(self.out_layer.state_dict(), "division_out_layer.torch")
        torch.save(self.center[0].state_dict(), "division_{}.torch".format(name))

    def save_differentiation(self):
        torch.save(self.center[0].state_dict(), "differentiation.torch")


if __name__ == '__main__':
    net = CvFo(512, 412, 8, 2, "differentiation")
    net(torch.randint(0, 123, [2, 15]))
    # net.save_division(0)
    # net.save_division(1)
    # net.load_division()
#    实验
#  第一  训练基本的模型 得到 em 和out layer
#    第二  使用该基础模型  计算所有数据loss 将数据分开 几个
#    第三  建立几个model  冻结 em 和out layer 只训练中间层
#   第四   建立一个model 只训练中间层  并将上面的所有中间层进行池化融合 输入该model
#   第五  推理该模型
#  实验要求 要求网络是改变参数就能达到上述效果 且 中间的层数可以任意调整 随时增加和减少
