import torch


class Attention(torch.nn.Module):
    def __init__(self, hidden_dim, heads, layers_num=8):
        super(Attention, self).__init__()

        self.q = torch.nn.Linear(hidden_dim, heads * hidden_dim, bias=False)
        self.k = torch.nn.Linear(hidden_dim, heads * hidden_dim, bias=False)
        self.v = torch.nn.Linear(hidden_dim, heads * hidden_dim, bias=False)
        self.heads = heads
        self.group_norm0 = torch.nn.GroupNorm(num_groups=heads, num_channels=heads)
        self.group_norm2 = torch.nn.ModuleList(
            [torch.nn.GroupNorm(num_groups=heads, num_channels=heads) for _ in range(layers_num)])

        self.feed_01 = torch.nn.ModuleList(
            [torch.nn.Linear(hidden_dim, heads * hidden_dim, bias=False) for _ in range(layers_num)])

    def forward(self, sx):
        b, s, h = sx.shape
        q = torch.nn.functional.relu(self.q(sx))
        k = torch.nn.functional.relu(self.k(sx))
        v = self.v(sx)

        qk = q.view([b, s, self.heads, h]).permute([0, 2, 1, 3]) @ k.view([b, s, self.heads, h]).permute(
            [0, 2, 3, 1])
        mask = torch.triu(torch.ones([s, s]))
        # mask[mask == 0] = -np.inf
        qk_mask = qk * mask
        qk = qk_mask / (torch.sum(qk_mask, -2).unsqueeze(-2) + 0.00000000000001)
        qkv = qk.permute([0, 1, 3, 2]) @ v.view([b, s, self.heads, h]).permute([0, 2, 1, 3])

        qkv = self.group_norm0(qkv)
        qkv = qkv.permute([0, 2, 3, 1])
        qkv = torch.nn.functional.max_pool1d(qkv.view([b, -1, self.heads]), self.heads).view([b, s, h])

        for feed, group_norm in zip(self.feed_01, self.group_norm2):
            qkv = feed(qkv)
            qkv = qkv.reshape([b, s, self.heads, h]).permute([0, 2, 1, 3])
            qkv = group_norm(qkv)
            qkv = qkv.permute([0, 2, 3, 1])
            qkv = torch.nn.functional.max_pool1d(qkv.view([b, -1, self.heads]), self.heads).view([b, s, h])

        return qkv


class FeedForward(torch.nn.Module):
    def __init__(self, hidden_dim):
        super(FeedForward, self).__init__()
        self.fc_one = torch.nn.Linear(hidden_dim, hidden_dim // 2, bias=False)
        self.fc_two = torch.nn.Linear(hidden_dim // 2, hidden_dim, bias=False)
        self.gre = torch.nn.GELU()

    def forward(self, feed_x):
        feed_x = self.fc_one(feed_x)
        feed_x = self.gre(feed_x)
        feed_x = self.fc_two(feed_x)
        return feed_x


class CvFoBlock(torch.nn.Module):
    def __init__(self, hidden_dim, row_layers, heads, group_num):
        super(CvFoBlock, self).__init__()

        self.p_next_layer = torch.nn.ModuleList(
            [torch.nn.Linear(hidden_dim, hidden_dim, bias=False) for _ in range(row_layers)])
        self.p_ctx_next_layer_one = torch.nn.Linear(hidden_dim, hidden_dim, bias=False)
        self.p_ctx_next_mask_one = torch.nn.Linear(hidden_dim, hidden_dim, bias=False)
        self.p_ctx_next_mask_two = torch.nn.Linear(hidden_dim, hidden_dim, bias=False)
        self.p_ctx_next_mask_thr = torch.nn.Linear(hidden_dim, hidden_dim, bias=False)

        self.p_ctx_next_layer_three = torch.nn.ModuleList(
            [torch.nn.Linear(hidden_dim, hidden_dim, bias=False) for _ in range(row_layers)])

        self.layer_nor = torch.nn.LayerNorm(hidden_dim)
        self.feed = FeedForward(hidden_dim)
        self.attention = Attention(hidden_dim, heads, group_num)

    def forward(self, sx, p_next):
        p_ctx_next = self.p_ctx_next_layer_one(sx)
        p_ctx_next_mask = torch.cumsum(p_ctx_next, 1)
        p_ctx_next_mask_one = self.p_ctx_next_mask_one(p_ctx_next_mask)
        p_ctx_next_mask_two = self.p_ctx_next_mask_two(p_ctx_next_mask)
        p_ctx_next_mask_thr = self.p_ctx_next_mask_two(p_next)
        p_ctx_next_mask = p_ctx_next_mask_one * p_ctx_next_mask_two ** (
                2 - p_ctx_next_mask_two.shape[1] % 2) * torch.sum(p_ctx_next_mask_thr, 1).unsqueeze(1) ** (2 - 1)
        p_ctx_next_mask = p_ctx_next_mask + self.attention(p_ctx_next_mask)
        p_ctx_next_mask = self.layer_nor(p_ctx_next_mask)
        # 这里可以多维池化
        p_ctx_next_mask_list = []

        for p_ctx_next_layer, p_next_layer in zip(self.p_ctx_next_layer_three, self.p_next_layer):
            p_next = p_next_layer(sx)
            p_ctx_next_mask = torch.sin(p_ctx_next_layer(p_ctx_next_mask) + p_next)
            p_ctx_next_mask = p_ctx_next_mask + self.feed(p_ctx_next_mask)
            p_ctx_next_mask = self.layer_nor(p_ctx_next_mask)
            p_ctx_next_mask_list.append(p_ctx_next_mask)
        p_ctx_next_mask = torch.nn.functional.max_pool1d(torch.concat(p_ctx_next_mask_list, -1),
                                                         len(p_ctx_next_mask_list))
        return p_ctx_next_mask


class CvFo(torch.nn.Module):
    def __init__(self, voc_size, hidden_dim, row_layers, heads, group_num):
        super(CvFo, self).__init__()

        self.em = torch.nn.Embedding(voc_size, hidden_dim)
        self.cv = CvFoBlock(hidden_dim, row_layers, heads, group_num)
        self.feed = FeedForward(hidden_dim)

        self.lora = FeedForward(hidden_dim)
        self.out_layer = torch.nn.Linear(hidden_dim, voc_size, bias=False)
        self.layer_nor = torch.nn.LayerNorm(hidden_dim)
        self.p_next = torch.Tensor(list(range(voc_size))).int().reshape([1, -1])

    def forward(self, sx):
        sx = self.em(sx)
        p_next = self.em(self.p_next)
        sx = sx + self.cv(sx, p_next)
        sx = self.layer_nor(sx)
        sx = sx + self.feed(sx)
        sx = self.layer_nor(sx)
        sx = sx + self.lora(sx)
        out = self.out_layer(sx)
        return out

    def load_lora(self, lora_name):
        self.lora.load_dict(torch.load(lora_name))

    def save_lora(self, lora_name):
        torch.save(self.lora.state_dict(), lora_name)


if __name__ == '__main__':
    net = CvFo(512, 412, 8, 8, 8)
    net(torch.randint(0, 123, [2, 15]))
