import paddle

from tqdm import tqdm


class CvFoBlock(paddle.nn.Layer):
    def __init__(self, voc_size, hidden_dim):
        super(CvFoBlock, self).__init__()
        self.em = paddle.nn.Embedding(voc_size, hidden_dim)
        self.fc = paddle.nn.Linear(hidden_dim, voc_size, bias_attr=False)
        self.layer_nor = paddle.nn.LayerNorm(hidden_dim)

    # def forward(self, sx):
    #     sx = self.em(sx)
    #     ab = sx[:, 1:] - sx[:, :-1]
    #     next_list = []
    #     for i in range(2, ab.shape[1] + 1):
    #         sum_data = paddle.sum(ab[:, :i], 1).unsqueeze(1)
    #         next_d = ab[:, :i] ** 2 / (sum_data + 0.00000000001)
    #         next_list.append(paddle.sum(next_d, 1).unsqueeze(1) + sx[:, i:i + 1])
    #     sx = paddle.concat(next_list, 1)
    #     sx = self.fc(sx)
    #     return sx

    # def forward(self, sx):
    #     sx = self.em(sx)
    #     ab = sx[:, 1:] - sx[:, :-1]
    #
    #     all_sum_data = ab.transpose([0, 2, 1])@ paddle.triu(paddle.ones([ab.shape[1], ab.shape[1]]))[:, 1:]
    #     all_sum_data = all_sum_data.transpose([0, 2, 1]) + 0.00000000001
    #     all_sum_power = ab.transpose([0, 2, 1]) ** 2 @ paddle.triu(paddle.ones([ab.shape[1], ab.shape[1]]))[:, 1:]
    #     all_sum_power = all_sum_power.transpose([0, 2, 1])
    #     sx = self.layer_nor(all_sum_power / all_sum_data+sx[:,2:])
    #
    #     sx = self.fc(sx)
    #     return sx

    def forward(self, sx):
        sx = self.em(sx)
        ab = sx[:, 1:] - sx[:, :-1]

        all_sum_data = ab.transpose([0, 2, 1]) ** 2 @ paddle.triu(paddle.ones([ab.shape[1], ab.shape[1]]))[:, 1:]
        all_sum_data = all_sum_data.transpose([0, 2, 1]) + 0.00000000001
        all_sum_power = ab.transpose([0, 2, 1]) ** 3 @ paddle.triu(paddle.ones([ab.shape[1], ab.shape[1]]))[:, 1:]
        all_sum_power = all_sum_power.transpose([0, 2, 1])
        sx = self.layer_nor(all_sum_power / all_sum_data + sx[:, 2:])

        sx = self.fc(sx)
        return sx

    def eval_forward(self, sx):
        sx = self.em(sx)
        ab = sx[:, 1:] - sx[:, :-1]

        all_sum_data = ab.transpose([0, 2, 1]) ** 2 @ paddle.triu(paddle.ones([ab.shape[1], ab.shape[1]]))[:, -1:]
        all_sum_data = all_sum_data.transpose([0, 2, 1]) + 0.00000000001
        all_sum_power = ab.transpose([0, 2, 1]) ** 3 @ paddle.triu(paddle.ones([ab.shape[1], ab.shape[1]]))[:, -1:]
        all_sum_power = all_sum_power.transpose([0, 2, 1])
        sx = self.layer_nor(all_sum_power / all_sum_data + sx[:, -1:])

        sx = self.fc(sx)
        return sx


def eval_sample(eval_data, net):
    input_data = eval_data[:, :5]
    for _ in tqdm(range(eval_data.shape[1] - 5)):
        input_data = paddle.concat([input_data, paddle.argmax(net.eval_forward(input_data), -1)], -1)
    print((eval_data == input_data).numpy().mean())


def train_sample():
    net = CvFoBlock(1211, 128)
    loss_func = paddle.nn.CrossEntropyLoss()
    opt = paddle.optimizer.Adam(learning_rate=0.0001, parameters=net.parameters())
    bar = tqdm(range(700))
    data = paddle.randint(1, 1210, [2, 256])
    for epoch in bar:
        label = data[:, 3:]
        input_data = data[:, :-1]
        out = net(input_data)
        loss = loss_func(out, label)
        acc = paddle.metric.accuracy(out.reshape([-1, 1211]), label.reshape([-1, 1]))
        bar.set_description("epoch___{}_loss___{:.5f}_acc__{:.5f}".format(epoch, loss.item(), acc.item()))
        opt.clear_grad()
        loss.backward()
        opt.step()
    eval_sample(data[:1], net)


if __name__ == '__main__':
    train_sample()
