import paddle
import numpy as np
import pandas as pd
from tqdm import tqdm


class FeedFroward(paddle.nn.Layer):
    def __init__(self, hidden_dim):
        super(FeedFroward, self).__init__()
        self.fc1 = paddle.nn.Linear(hidden_dim, hidden_dim // 2, bias_attr=False)
        self.fc2 = paddle.nn.Linear(hidden_dim // 2, hidden_dim, bias_attr=False)
        self.dr = paddle.nn.Dropout(0.2)

    def forward(self, feed_x):
        feed_x = self.fc1(feed_x)
        # sin cos 一样 其次是silu 和tanh
        feed_x = paddle.sin(feed_x)
        feed_x = self.fc2(self.dr(feed_x))
        return feed_x


class CVFroward(paddle.nn.Layer):
    def __init__(self, hidden_dim, k_size):
        super(CVFroward, self).__init__()
        self.fc1 = paddle.nn.Conv1D(in_channels=hidden_dim, out_channels=hidden_dim // 2, bias_attr=False,
                                    kernel_size=k_size, stride=1)
        self.fc2 = paddle.nn.Conv1D(in_channels=hidden_dim // 2, out_channels=hidden_dim, bias_attr=False,
                                    kernel_size=k_size, stride=1)

        self.pad = k_size - 1
        self.dr = paddle.nn.Dropout(0.2)

    def forward(self, feed_x):
        feed_x = feed_x.transpose([0, 2, 1])
        zero_pad = paddle.zeros(shape=[feed_x.shape[0], feed_x.shape[1], self.pad], dtype=feed_x.dtype)
        feed_x = paddle.concat([zero_pad, feed_x], -1)
        feed_x = self.fc1(feed_x)
        # sin cos 一样 先是silu 和tanh
        feed_x = paddle.sin(feed_x)
        zero_pad = paddle.zeros(shape=[feed_x.shape[0], feed_x.shape[1], self.pad], dtype=feed_x.dtype)

        feed_x = paddle.concat([zero_pad, feed_x], -1)
        feed_x = self.fc2(self.dr(feed_x))
        feed_x = feed_x.transpose([0, 2, 1])

        return feed_x


class CvfBlock(paddle.nn.Layer):
    def __init__(self, hidden_dim, k_size):
        super(CvfBlock, self).__init__()
        self.cvf = CVFroward(hidden_dim, k_size)
        self.feed = FeedFroward(hidden_dim)

    def forward(self, ll):
        ll += self.cvf(ll)
        ll += self.feed(ll)
        return ll


class CVF(paddle.nn.Layer):
    def __init__(self, voc_size, hidden_dim, n_layers, k_size):
        super(CVF, self).__init__()

        self.emx = paddle.nn.Embedding(voc_size, hidden_dim)
        self.cv_list = paddle.nn.LayerList(
            [CvfBlock(hidden_dim, k_size) for _ in range(n_layers)])

        self.out_layer = paddle.nn.Linear(hidden_dim, voc_size)

    def forward(self, x):
        x = self.emx(x)
        for cv in self.cv_list:
            x += cv(x)

        x = self.out_layer(paddle.sum(x,1))



        return x
def sequence_clarity_paddle(x,short=2,long=10):
    """
    :param x: [b,s,vos_size]
    :return: [1]
    """
    score_sort = paddle.sort(paddle.nn.functional.softmax(x, -1), -1)
    basic_score = short**(paddle.arange(long)) /sum(short**paddle.arange(long))
    basic_score = paddle.concat([paddle.zeros([score_sort.shape[-1] - long]), basic_score])
    basic_score = paddle.zeros([score_sort.shape[0], score_sort.shape[1], 1]) + basic_score
    ming_que_du = paddle.mean(paddle.nn.functional.cosine_similarity(score_sort, basic_score, -1))
    return ming_que_du


class SequenceClarityLoss(paddle.nn.Layer):
    def __init__(self):
        super(SequenceClarityLoss, self).__init__()
        self.lss=paddle.nn.CrossEntropyLoss()

    def forward(self, x,l):
        score=sequence_clarity_paddle(x)
        ss=self.lss(x,l)*(1-score)
        return ss,score



def emheading_train_and_sample():
    print("*" * 100)

    # net = ReNetAll(12935, 256, 8)
    net = CVF(1235, 128, 6, 8)


    # net.eval()
    x = paddle.to_tensor([
        np.random.randint(1, 1140, 5120),
        np.random.randint(1, 1140, 5120),
    ], dtype='int64')

    # 模拟训练

    # loss_f = paddle.nn.CrossEntropyLoss()
    loss_f = SequenceClarityLoss()

    opt = paddle.optimizer.Adam(parameters=net.parameters(), learning_rate=0.0003)
    bar = tqdm(range(1260))
    for epoch in bar:

        for i in range(0, x.shape[-1], 256):
            j = i + 256

            outl = net(x[:, i + 1:j - 1])
            # loss= loss_f(outl, x[:, i + 2:j])
            loss,score = loss_f(outl,x[:, i + 2:j])
            # score = ming_que_du_paddle(outl)
            # loss_score=ming_que_du_paddle_lossable(outl)

            bar.set_description("epoch---{}--loss--{:.5f}---mqd---{:.5f}".format(epoch, loss.item(),score.item()))

            loss.backward()
        opt.step()
        opt.clear_grad()
    # 解码，验证
    net.eval()
    state_l = paddle.zeros([1])

    for i in range(0, x.shape[-1], 256):
        j = i + 256

        out0, out1, state_l = net(x[:, i:j - 1], x[:, i + 1:j], paddle.to_tensor(state_l.numpy()))


# 进行模型训练和预测
# if __name__ == '__main__':
#     emheading_train_and_sample()


