import paddle
import numpy as np
import pandas as pd
from tqdm import tqdm


class FeedFroward(paddle.nn.Layer):
    def __init__(self, hidden_dim):
        super(FeedFroward, self).__init__()
        self.fc1 = paddle.nn.Linear(hidden_dim, hidden_dim // 2, bias_attr=False)
        self.fc2 = paddle.nn.Linear(hidden_dim // 2, hidden_dim, bias_attr=False)

    def forward(self, feed_x):
        feed_x = self.fc1(feed_x)
        # sin cos 一样 其次是silu 和tanh
        feed_x = paddle.sin(feed_x)
        feed_x = self.fc2(feed_x)
        return feed_x


class CVFroward(paddle.nn.Layer):
    def __init__(self, hidden_dim, k_size):
        super(CVFroward, self).__init__()
        self.fc1 = paddle.nn.Conv1D(in_channels=hidden_dim, out_channels=hidden_dim // 2, bias_attr=False,
                                    kernel_size=k_size, stride=1)
        self.fc2 = paddle.nn.Conv1D(in_channels=hidden_dim // 2, out_channels=hidden_dim, bias_attr=False,
                                    kernel_size=k_size, stride=1)

        self.pad = k_size - 1

    def forward(self, feed_x):
        feed_x = feed_x.transpose([0, 2, 1])
        zero_pad = paddle.zeros(shape=[feed_x.shape[0], feed_x.shape[1], self.pad], dtype=feed_x.dtype)
        feed_x = paddle.concat([zero_pad, feed_x], -1)
        feed_x = self.fc1(feed_x)
        # sin cos 一样 先是silu 和tanh
        feed_x = paddle.sin(feed_x)
        zero_pad = paddle.zeros(shape=[feed_x.shape[0], feed_x.shape[1], self.pad], dtype=feed_x.dtype)

        feed_x = paddle.concat([zero_pad, feed_x], -1)
        feed_x = self.fc2(feed_x)
        feed_x = feed_x.transpose([0, 2, 1])

        return feed_x


class CVF(paddle.nn.Layer):
    def __init__(self, voc_size, hidden_dim, n_layers, k_size):
        super(CVF, self).__init__()

        self.emx = paddle.nn.Embedding(voc_size, hidden_dim)
        self.cv_list = paddle.nn.LayerList(
            [CVFroward(hidden_dim, k_size) for _ in range(n_layers)])
        self.feed_list = paddle.nn.LayerList(
            [FeedFroward(hidden_dim) for _ in range(n_layers)])

        # self.out_layer = paddle.nn.Linear(hidden_dim, hidden_dim+16)
        self.out_layer = paddle.nn.Linear(hidden_dim, voc_size)
        # 添加残差连接和层归一化
        # self.norm1 = paddle.nn.LayerNorm(hidden_dim+16)
        

    def forward(self, x):
        x = self.emx(x)
        ll = x
        pos_list=[2*np.pi/i for i in range(1,len(self.cv_list)+1)]
        pos_index=0
        for cv, feed in zip(self.cv_list, self.feed_list):
            ll += cv(ll)
            ll = paddle.sin(ll+pos_list[pos_index])
            ll += feed(ll)
            # ll = paddle.sin(ll)
            pos_index+=1

        outl = self.out_layer(ll)
        # outl = self.out_layer1(self.norm1(outl))
        return outl


def emheading_train_and_sample():
    print("*" * 100)

    # net = ReNetAll(12935, 256, 8)
    net = CVF(1235, 128, 6, 8)

    # net.eval()
    x = paddle.to_tensor([
        np.random.randint(1, 1140, 5120),
        np.random.randint(1, 1140, 5120),
    ], dtype='int64')

    # 模拟训练

    loss_f = paddle.nn.CrossEntropyLoss()

    opt = paddle.optimizer.Adam(parameters=net.parameters(), learning_rate=0.0003)
    bar = tqdm(range(1260))
    for epoch in bar:

        for i in range(0, x.shape[-1], 256):
            j = i + 256

            outl = net(x[:, i + 1:j - 1])
            loss = loss_f(outl, x[:, i + 2:j])

            bar.set_description("epoch---{}--loss--{:.5f}".format(epoch, loss.item()))

            loss.backward()
        opt.step()
        opt.clear_grad()
    # 解码，验证
    net.eval()
    state_l = paddle.zeros([1])

    for i in range(0, x.shape[-1], 256):
        j = i + 256

        out0, out1, state_l = net(x[:, i:j - 1], x[:, i + 1:j], paddle.to_tensor(state_l.numpy()))


# 进行模型训练和预测
# if __name__ == '__main__':
#     emheading_train_and_sample()