import pandas as pd
from matplotlib import pyplot as plt
from tqdm import tqdm
import numpy as np
import paddle


class FeedForward(paddle.nn.Layer):
    def __init__(self, hidden_dim):
        super(FeedForward, self).__init__()
        self.fc_one = paddle.nn.Linear(hidden_dim, hidden_dim // 2, bias_attr=False)
        self.fc_two = paddle.nn.Linear(hidden_dim // 2, hidden_dim, bias_attr=False)
        self.gre = paddle.nn.GELU()

    def forward(self, feed_x):
        feed_x = self.fc_one(feed_x)
        feed_x = self.gre(feed_x)
        feed_x = self.fc_two(feed_x)
        return feed_x


class CvFoBlock(paddle.nn.Layer):
    def __init__(self, hidden_dim, row_layers):
        super(CvFoBlock, self).__init__()

        self.p_next_layer = paddle.nn.LayerList(
            [paddle.nn.Linear(hidden_dim, hidden_dim, bias_attr=False) for _ in range(row_layers)])
        self.p_ctx_next_layer_one = paddle.nn.Linear(hidden_dim, hidden_dim, bias_attr=False)
        self.p_ctx_next_mask_one = paddle.nn.Linear(hidden_dim, hidden_dim, bias_attr=False)
        self.p_ctx_next_mask_two = paddle.nn.Linear(hidden_dim, hidden_dim, bias_attr=False)
        self.p_ctx_next_mask_thr = paddle.nn.Linear(hidden_dim, hidden_dim, bias_attr=False)


        self.p_ctx_next_layer_three = paddle.nn.LayerList(
            [paddle.nn.Linear(hidden_dim, hidden_dim, bias_attr=False) for _ in range(row_layers)])

        self.layer_nor = paddle.nn.LayerNorm(hidden_dim, bias_attr=False)
        self.feed = FeedForward(hidden_dim)

    def forward(self, sx):

        p_ctx_next = self.p_ctx_next_layer_one(sx)

        p_ctx_next = p_ctx_next.transpose([0, 2, 1])
        if p_ctx_next.shape[-1] < 8:

            mask = paddle.triu(paddle.ones([p_ctx_next.shape[-1], p_ctx_next.shape[-1]]))
            p_ctx_next_mask = (p_ctx_next.unsqueeze([-1]) + paddle.zeros([p_ctx_next.shape[-1]])) * mask.unsqueeze(
                0).unsqueeze(0)
            p_ctx_next_mask = paddle.sum(p_ctx_next_mask, axis=-2).transpose([0, 2, 1])
            p_ctx_next_mask_one=self.p_ctx_next_mask_one(p_ctx_next_mask)
            p_ctx_next_mask_two=self.p_ctx_next_mask_two(p_ctx_next_mask)
            p_ctx_next_mask_thr=self.p_ctx_next_mask_thr(p_ctx_next_mask)
            p_ctx_next_mask =p_ctx_next_mask_one*p_ctx_next_mask_two*paddle.sum(p_ctx_next_mask_thr,-1).unsqueeze([-1])

        else:
            p_ctx_next_list = []
            for i in range(0, p_ctx_next.shape[-1], 8):
                j = i + 8
                p_ctx_next_one = p_ctx_next[:, :, i:j]
                if i == 0:
                    p_ctx_next_one_sum = 0
                else:

                    p_ctx_next_one_sum = paddle.sum(p_ctx_next[:, :, :i], -1).unsqueeze([-1]).transpose([0, 2, 1])

                mask = paddle.triu(paddle.ones([p_ctx_next_one.shape[-1], p_ctx_next_one.shape[-1]]))

                p_ctx_next_mask_one = (p_ctx_next_one.unsqueeze([-1]) + paddle.zeros(
                    [p_ctx_next_one.shape[-1]])) * mask.unsqueeze(
                    0).unsqueeze(0)

                p_ctx_next_mask_one = paddle.sum(p_ctx_next_mask_one, axis=-2).transpose([0, 2, 1])+ p_ctx_next_one_sum

                p_ctx_next_mask_one = self.p_ctx_next_mask_one(p_ctx_next_mask_one)
                p_ctx_next_mask_two = self.p_ctx_next_mask_two(p_ctx_next_mask_one)
                p_ctx_next_mask_thr = self.p_ctx_next_mask_thr(p_ctx_next_mask_one)
                p_ctx_next_mask_one = p_ctx_next_mask_one * p_ctx_next_mask_two * paddle.sum(p_ctx_next_mask_thr,
                                                                                         -1).unsqueeze([-1])

                p_ctx_next_list.append(p_ctx_next_mask_one)
            p_ctx_next_mask = paddle.concat(p_ctx_next_list, axis=1)

        # 这里可以多维池化
        p_ctx_next_mask_list = []

        for p_ctx_next_layer, p_next_layer in zip(self.p_ctx_next_layer_three, self.p_next_layer):
            p_next = p_next_layer(sx)
            p_ctx_next_mask = paddle.sin(p_ctx_next_layer(p_ctx_next_mask) + p_next)
            p_ctx_next_mask += self.feed(p_ctx_next_mask)
            p_ctx_next_mask = self.layer_nor(p_ctx_next_mask)
            p_ctx_next_mask_list.append(p_ctx_next_mask)
        p_ctx_next_mask = paddle.nn.functional.max_pool1d(paddle.concat(p_ctx_next_mask_list, -1),
                                                          len(p_ctx_next_mask_list))
        return p_ctx_next_mask


class CvFo(paddle.nn.Layer):
    def __init__(self, voc_size, hidden_dim, row_layers, col_layers):
        super(CvFo, self).__init__()

        self.em = paddle.nn.Embedding(voc_size, hidden_dim)
        self.cv = CvFoBlock(hidden_dim, row_layers)
        self.feed = FeedForward(hidden_dim)
        self.out_layer = paddle.nn.Linear(hidden_dim, voc_size, bias_attr=False)

    def forward(self, sx):
        sx = self.em(sx)
        sx = self.cv(sx)
        sx = self.feed(sx)
        out = self.out_layer(sx)
        return out


def gen_data2():
    with open("fixed_couplets_in.txt", "r", encoding="utf-8") as f:
        train_data = f.readlines()

    with open("fixed_couplets_out.txt", "r", encoding="utf-8") as f:
        dev_data = f.readlines()

    train_data = [i.strip().split() for i in tqdm(train_data)]
    dev_data = [i.strip().split() for i in tqdm(dev_data)]

    train_data = [["<|aos|>"] + i + ["<|bos|>"] + j for i, j in tqdm(zip(train_data, dev_data))]

    # 文本长度分布统计
    train_data_len = [len(i) for i in train_data]
    train_data_len_count = [train_data_len.count(i) for i in sorted(set(train_data_len))]
    plt.plot(sorted(set(train_data_len)), train_data_len_count)
    plt.show()
    train_len_cate = [
        sum([train_data_len.count(i) for i in sorted(set(train_data_len)) if i <= j]) / sum(train_data_len_count) for j
        in tqdm(sorted(set(train_data_len)))]
    plt.plot(sorted(set(train_data_len)), train_len_cate)
    plt.show()
    # 统计分析可知1556  随着长度的增加文本信息量减少 所以选择长度1556 以下的数据 这样能减少训练压力
    train_data = [i for i in train_data if len(i) < 27]

    #  由于网络不是在网络内进行的数据增广不同于当前流行的 Transform rnn 等网络 需要先对数据集进行 处理
    #  直接补全而后 训练的时候随机的切割后面的一部分 方可达到目的
    p_list = ["<|p_{}|>".format(i) for i in range(27)]
    train_data_list = []
    for one in tqdm(train_data):
        one += p_list[len(one):]
        train_data_list.append(one)

    voc = ["<|pad|>"] + sorted(set(np.hstack(train_data_list)))
    voc_dict = {k: v for v, k in enumerate(voc)}
    pd.to_pickle(voc, "voc_data.pandas_pickle")

    train_data_list = [[voc_dict[j] for j in i] for i in tqdm(train_data_list)]

    pd.to_pickle(train_data_list, "train_data.pandas_pickle")


def train_data():
    voc_id = pd.read_pickle("voc_data.pandas_pickle")
    net = CvFo(len(voc_id) + 1, 128, 2, 2)
    loss_func = paddle.nn.CrossEntropyLoss(ignore_index=-1)
    opt = paddle.optimizer.Adam(learning_rate=0.0001, parameters=net.parameters())
    bar = tqdm(range(1700))
    batch_size = 1200
    data_set = pd.read_pickle("train_data.pandas_pickle")

    # plt.ion()
    acc_list = []
    for epoch in bar:
        np.random.shuffle(data_set)
        for i in range(0, len(data_set), batch_size):
            j = i + batch_size

            data = paddle.to_tensor(data_set[i:j]).astype("int64")

            label = data[:, 1:]
            input_data = data[:, :-1]

            out = net(input_data)
            loss = loss_func(out.reshape([-1, out.shape[-1]]), label.reshape([-1]))
            acc = paddle.metric.accuracy(out.reshape([-1, len(voc_id) + 1]), label.reshape([-1, 1]))
            acc_list.append(acc.item())
            bar.set_description(
                "epoch___{}___step___{}_loss___{:.5f}_acc__{:.5f}__{:.5f}".format(epoch, j, loss.item(),
                                                                                  np.mean(acc_list), (paddle.argmax(out,
                                                                                                                    -1) == label).numpy().mean()))
            opt.clear_grad()
            loss.backward()
            opt.step()

        paddle.save(net.state_dict(), "model_{}.paddle".format(epoch))


def eval_data():
    voc_id = pd.read_pickle("voc_data.pandas_pickle")
    net = CvFo(len(voc_id) + 1, 128, 2, 2)
    net.load_dict(paddle.load("model_22.paddle"))
    net.eval()

    data_set = pd.read_pickle("train_data.pandas_pickle")
    np.random.shuffle(data_set)
    bos_index = data_set[0].index(voc_id.index("<|bos|>")) + 1
    data = data_set[0][:bos_index]
    label = data_set[0][bos_index:]
    state = None
    print("".join([voc_id[i] for i in label]))
    try:

        for _ in range(len(label)):

            data_in = paddle.to_tensor(data).astype("int64").reshape([1, -1])

            out = net(data_in)
            k = 100
            logs = out[:, -1:]
            top_k_log, top_k = paddle.topk(logs, k=k)
            top_k = top_k.reshape([-1])
            top_k_log = top_k_log.reshape([-1])
            top_k_score = paddle.nn.functional.softmax(logs, -1).reshape([-1])[top_k]
            p = 0.9
            top_p_score = top_k_score[paddle.cumsum(top_k_score) < p]
            top_p = top_k[paddle.cumsum(top_k_score) < p]
            top_p_log = top_k_log[paddle.cumsum(top_k_score) < p]
            t = 0.3
            top_t_score = paddle.nn.functional.softmax(top_p_log / t)
            token_id = np.random.choice(top_p.numpy().tolist())
            data = [token_id]
            print(voc_id[token_id], end="", flush=True)
            if "<|" in voc_id[token_id]:
                break
    except:
        pass


if __name__ == '__main__':
    # gen_data2()
    # train_data()
    net = CvFoBlock(256, 2)
    net(paddle.randn([2,36, 256]))
    # eval_data()
