import torch
import numpy as np

mse = torch.nn.MSELoss(reduction='sum')


class TransformerEncoder(torch.nn.Module):
    def __init__(self, embed_dim, num_heads, dropout, feedforward_dim):
        super().__init__()
        self.attn = torch.nn.MultiheadAttention(embed_dim, num_heads, dropout=dropout)
        self.linear_1 = torch.nn.Linear(embed_dim, feedforward_dim)
        self.linear_2 = torch.nn.Linear(feedforward_dim, embed_dim)
        self.layernorm_1 = torch.nn.LayerNorm(embed_dim)
        self.layernorm_2 = torch.nn.LayerNorm(embed_dim)

    def forward(self, x_in):
        attn_out, _ = self.attn(x_in, x_in, x_in)
        x = self.layernorm_1(x_in + attn_out)
        ff_out = self.linear_2(torch.nn.functional.relu(self.linear_1(x)))
        x = self.layernorm_2(x + ff_out)
        return x


class TransformerAutoEncoder(torch.nn.Module):
    def __init__(
            self,
            num_inputs,
            hidden_size=1024,
            num_subspaces=8,
            # number of candidate subspaces for cluster
            # 子空间数
            embed_dim=128,
            num_heads=8,
            dropout=0,
            feedforward_dim=512,
            debug_limit=1,
    ):
        super().__init__()
        assert hidden_size == embed_dim * num_subspaces
        self.num_subspaces = num_subspaces
        self.num_heads = num_heads
        self.embed_dim = embed_dim

        self.excite = torch.nn.Linear(in_features=num_inputs, out_features=hidden_size)
        self.encoder_1 = TransformerEncoder(embed_dim, num_heads, dropout, feedforward_dim)
        self.encoder_2 = TransformerEncoder(embed_dim, num_heads, dropout, feedforward_dim)
        self.encoder_3 = TransformerEncoder(embed_dim, num_heads, dropout, feedforward_dim)

        self.reconstructor = torch.nn.Linear(in_features=hidden_size, out_features=num_inputs)
        self.debug_limit = debug_limit
        self.forward_cnt = 1

    def divide(self, x):
        batch_size = x.shape[0]
        x = x.reshape((batch_size, self.num_subspaces, self.embed_dim)).permute((1, 0, 2))
        return x

    def combine(self, x):
        batch_size = x.shape[1]
        x = x.permute((1, 0, 2)).reshape((batch_size, -1))
        return x

    def forward(self, x):
        # 此时 x 的第一维度表示此批次数据的数量，第二维度表示特征数目
        # example: [128, 10]
        x = torch.nn.functional.relu(self.excite(x))
        # 此时 x 的第一维度表示此批次数据的数量，第二维度表示网络中神经元的数目
        # example: [128, 1024]
        x = self.divide(x)
        if self.debug_limit:
            print(f"\r{self.forward_cnt} x.shape: {x.shape}", end=' ', flush=True)
        # 此时 x 的第一维度表示子空间数，第二维度表示此批次数据的数量，第三维度表示每一个单词本来的词向量长度
        # query，key，value的输入形状必须是 [sequence_size, batch_size, emb_size]
        # example: [8, 128, 128]
        x1 = self.encoder_1(x)
        x2 = self.encoder_2(x1)
        x3 = self.encoder_3(x2)
        x = self.combine(x3)

        reconstruction = self.reconstructor(x)
        self.debug_limit -= 1
        self.forward_cnt += 1
        return x, reconstruction

    def loss(self, x):
        _, reconstruction = self.forward(x)

        loss = mse(reconstruction, x)
        '''
        print(torch.min(reconstruction), torch.min(x))
        print(torch.max(reconstruction), torch.max(x))
        print(loss)
        import pdb
        pdb.set_trace()
        '''
        return loss


def test_model():
    m = TransformerAutoEncoder(num_inputs=10)
    x = torch.rand((128, 10))
    loss = m.loss(x)
    print(loss)


if __name__ == '__main__':
    test_model()
