import paddle
import numpy as np
import pandas as pd
from tqdm import tqdm


class FeedFroward(paddle.nn.Layer):
    def __init__(self, hidden_dim):
        super(FeedFroward, self).__init__()
        self.fc1 = paddle.nn.Linear(hidden_dim, hidden_dim // 2, bias_attr=False)
        self.fc2 = paddle.nn.Linear(hidden_dim // 2, hidden_dim, bias_attr=False)
        self.dr = paddle.nn.Dropout(0.2)

    def forward(self, feed_x):
        feed_x = self.fc1(feed_x)
        # sin cos 一样 其次是silu 和tanh
        feed_x = paddle.sin(feed_x)
        feed_x = self.fc2(self.dr(feed_x))
        return feed_x


class CVFroward(paddle.nn.Layer):
    def __init__(self, hidden_dim, k_size):
        super(CVFroward, self).__init__()
        self.fc1 = paddle.nn.Conv1D(in_channels=hidden_dim, out_channels=hidden_dim // 2, bias_attr=False,
                                    kernel_size=k_size, stride=1)
        self.fc2 = paddle.nn.Conv1D(in_channels=hidden_dim // 2, out_channels=hidden_dim, bias_attr=False,
                                    kernel_size=k_size, stride=1)

        self.pad = k_size - 1
        self.dr = paddle.nn.Dropout(0.2)

    def forward(self, feed_x):
        feed_x = feed_x.transpose([0, 2, 1])
        zero_pad = paddle.zeros(shape=[feed_x.shape[0], feed_x.shape[1], self.pad], dtype=feed_x.dtype)
        feed_x = paddle.concat([zero_pad, feed_x], -1)
        feed_x = self.fc1(feed_x)
        # sin cos 一样 先是silu 和tanh
        feed_x = paddle.sin(feed_x)
        zero_pad = paddle.zeros(shape=[feed_x.shape[0], feed_x.shape[1], self.pad], dtype=feed_x.dtype)

        feed_x = paddle.concat([zero_pad, feed_x], -1)
        feed_x = self.fc2(self.dr(feed_x))
        feed_x = feed_x.transpose([0, 2, 1])

        return feed_x


class CvfBlock(paddle.nn.Layer):
    def __init__(self, hidden_dim, k_size):
        super(CvfBlock, self).__init__()
        self.cvf = CVFroward(hidden_dim, k_size)
        self.feed = FeedFroward(hidden_dim)

    def forward(self, ll):
        ll += self.cvf(ll)
        ll += self.feed(ll)
        return ll


class CVL(paddle.nn.Layer):
    def __init__(self, hidden_dim, k_max):
        super(CVL, self).__init__()
        self.cvf = paddle.nn.LayerList([CvfBlock(hidden_dim, one_ksize) for one_ksize in range(3, k_max)])
        # self.cvl_out = paddle.nn.Linear(len(list(range(3,k_max))), 1,bias_attr=False)
        self.layer_nor = paddle.nn.LayerNorm(hidden_dim)
        self.group_nor = paddle.nn.GroupNorm(num_channels=k_max - 3, num_groups=k_max - 3)

    def forward(self, x):
        x = paddle.concat([cvf(x).unsqueeze(-1) for cvf in self.cvf], -1)
        # x = self.cvl_out(x).squeeze(-1)
        x = self.group_nor(x.transpose([0, 3, 1, 2]))
        x = self.layer_nor(paddle.sum(x, 1))
        return x


class CVFLora(paddle.nn.Layer):
    def __init__(self, hidden_dim, layers=2):
        super(CVFLora, self).__init__()
        self.lora = paddle.nn.LayerList([FeedFroward(hidden_dim) for _ in range(layers)])
        self.layer_nor = paddle.nn.LayerNorm(hidden_dim)
        self.group_nor = paddle.nn.GroupNorm(num_channels=layers, num_groups=layers)

    def forward(self, x):
        x = paddle.concat([lora(x).unsqueeze(-1) for lora in self.lora], -1)
        x = self.group_nor(x.transpose([0, 3, 1, 2]))
        x = self.layer_nor(paddle.sum(x, 1))
        return x


class CVF(paddle.nn.Layer):
    def __init__(self, voc_size, hidden_dim, k_size_max):
        super(CVF, self).__init__()

        self.emx = paddle.nn.Embedding(voc_size, hidden_dim)
        self.cvl = CVL(hidden_dim, k_size_max)
        self.lora = CVFLora(hidden_dim, 5)

        self.out_layer = paddle.nn.Linear(hidden_dim, voc_size, bias_attr=False)

    def forward(self, x):
        x = self.emx(x)

        x = self.cvl(x) + self.lora(x)
        # x = self.cvl(x)

        x = self.out_layer(x)

        return x


def sequence_clarity_paddle(x, short=2, long=10):
    """

    :param x:
    :param short:
    :param long:
    :return:
    """
    score_sort = paddle.sort(paddle.nn.functional.softmax(x, -1), -1)
    basic_score = short ** (paddle.arange(long)) / sum(short ** paddle.arange(long))
    basic_score = paddle.concat([paddle.zeros([score_sort.shape[-1] - long]), basic_score])
    basic_score = paddle.zeros([score_sort.shape[0], score_sort.shape[1], 1]) + basic_score
    ming_que_du = paddle.mean(paddle.nn.functional.cosine_similarity(score_sort, basic_score, -1))
    return ming_que_du


class SequenceClarityLoss(paddle.nn.Layer):
    def __init__(self):
        super(SequenceClarityLoss, self).__init__()
        self.lss = paddle.nn.CrossEntropyLoss()

    def forward(self, x, losse):
        score = sequence_clarity_paddle(x)
        ss = self.lss(x, losse) * (1 - score)
        return ss, score


def emheading_train_and_sample():
    print("*" * 100)

    # net = ReNetAll(12935, 256, 8)
    net = CVF(1 + 256 * 12, 128, 8)

    # net.eval()
    x = paddle.to_tensor([
        np.random.randint(1, 1000, 5120),
        np.random.randint(1, 1000, 5120),
    ], dtype='int64')
    # pos = paddle.arange(1000, 1 + 256 * 12, 8)

    # 模拟训练

    # loss_f = paddle.nn.CrossEntropyLoss()
    loss_f = SequenceClarityLoss()

    opt = paddle.optimizer.Adam(parameters=net.parameters(), learning_rate=0.0003)
    bar = tqdm(range(60))
    loss_list = []
    for epoch in bar:

        for i in range(0, x.shape[-1], 256):
            j = i + 256

            outl = net(x[:, i + 1:j - 1])
            # loss= loss_f(outl, x[:, i + 2:j])
            loss, score = loss_f(outl, x[:, i + 2:j])
            # score = ming_que_du_paddle(outl)
            # loss_score=ming_que_du_paddle_lossable(outl)
            loss_list.append(loss.item())

            bar.set_description("epoch---{}--loss--{:.5f}---mqd---{:.5f}".format(epoch, loss.item(), score.item()))

            loss.backward()
        opt.step()
        opt.clear_grad()
    pd.to_pickle({"data": loss_list}, "loss_list.pandas_pickle")


# 进行模型训练和预测
if __name__ == '__main__':
    emheading_train_and_sample()
