import json

import pandas as pd
from matplotlib import pyplot as plt
from tqdm import tqdm
import numpy as np
import paddle


# 使用池化的方法代替 moe 或者是 注意力是的没错
# 为了确保能有足够在s维度的特征表达 上文
# 可以输入多个输出多个池化而后 进行池化
# 多个层池化可以是横向或者纵向的，这个都是可以控制的 一开始设计好横向和纵向的层数
# 无论是横向还是纵向池化都是按照s维度进行

class CvFoMaxPool(paddle.nn.Layer):
    def __init__(self, hidden_dim, row_layers, col_layers):
        super(CvFoMaxPool, self).__init__()

        self.col_list = [paddle.nn.Linear(col_layers * hidden_dim, hidden_dim * col_layers, bias_attr=False) for _ in
                         range(row_layers)]
        self.col_layers = col_layers
        self.row_layers = row_layers

    def forward(self, x):
        b, s, h = x.shape
        one_list = []
        for one_col in self.col_list:
            one = one_col(paddle.concat([x] * self.col_layers, -1))
            one = paddle.nn.functional.max_pool1d(one, self.col_layers)
            one_list.append(one)
        out = paddle.nn.functional.max_pool1d(paddle.concat(one_list, 1).transpose([0, 2, 1]),
                                              s * self.row_layers).transpose([0, 2, 1])

        # plt.imshow(out[:, 0].numpy())
        # plt.pause(0.001)
        # plt.clf()

        return out


class CvFoBlock(paddle.nn.Layer):
    def __init__(self, voc_size, hidden_dim, row_layers, col_layers):
        super(CvFoBlock, self).__init__()

        self.em = paddle.nn.Embedding(voc_size, hidden_dim)
        self.gru_row_list = [paddle.nn.GRU(hidden_dim, hidden_dim, num_layers=1) for _ in
                             range(col_layers * row_layers)]
        self.out_llm = paddle.nn.Linear(hidden_dim, hidden_dim, bias_attr=False)

        self.out_layer = paddle.nn.Linear(hidden_dim, voc_size, bias_attr=False)
        self.row = row_layers
        self.layer_nor= paddle.nn.LayerNorm(hidden_dim)

    def forward(self, sx, st):
        sx = self.em(sx)
        ost_list = []
        osx_list = []
        for gru_row in self.gru_row_list:
            if len(osx_list) < self.row:

                osx, ost = gru_row(sx, st)
                ost_list.append(ost.transpose([2, 1, 0]))
                osx_list.append(osx)
            else:
                osx, ost = gru_row(osx_list.pop(0), st)
                ost_list.append(ost.transpose([2, 1, 0]))
                osx_list.append(osx)

            if len(ost_list) == self.row:
                ost = paddle.nn.functional.max_pool1d(paddle.concat(ost_list, -1), self.row).transpose([2, 1, 0])
                osx_list = []

        # sx,st = self.gru(sx,st)
        sxo = paddle.nn.functional.max_pool1d(
            paddle.concat([i.unsqueeze(-1) for i in osx_list], -1).reshape([sx.shape[0], -1, self.row]), self.row)
        sx =sxo.reshape(sx.shape)

        sx += self.out_llm(sx)

        out = self.out_layer(self.layer_nor(sx))
        return st, out


def gen_data():
    with open("train_2.json", "r", encoding="utf-8") as f:
        train_data = f.read()
        train_data = json.loads(train_data)
    with open("test_2.json", "r", encoding="utf-8") as f:
        test_data = f.read()
        test_data = json.loads(test_data)
    with open("dev_2.json", "r", encoding="utf-8") as f:
        dev_data = f.read()
        dev_data = json.loads(dev_data)
    dev_data = np.hstack([[i["Content"] + one["Question"] + "".join(one["Choices"]) + one["Answer"] for one in
                           i["Questions"]] for i in tqdm(dev_data)])

    train_data = np.hstack([[i["Content"] + one["Question"] + "".join(one["Choices"]) + one["Answer"] for one in
                             i["Questions"]] for i in tqdm(train_data)])

    test_data = np.hstack([[i["Content"] + one["Question"] + "".join(one["Choices"]) + one["Answer"] for one in
                            i["Questions"]] for i in tqdm(test_data)])
    # 文本长度分布统计
    # train_data_len = [len(i) for i in train_data]
    # train_data_len_count = [train_data_len.count(i) for i in sorted(set(train_data_len))]
    # plt.plot(sorted(set(train_data_len)), train_data_len_count)
    # plt.show()
    # train_len_cate = [
    #     sum([train_data_len.count(i) for i in sorted(set(train_data_len)) if i <= j]) / sum(train_data_len_count) for j
    #     in tqdm(sorted(set(train_data_len)))]
    # plt.plot(sorted(set(train_data_len)),train_len_cate)
    # plt.show()
    # 统计分析可知1556  随着长度的增加文本信息量减少 所以选择长度1556 以下的数据 这样能减少训练压力
    train_data = [i for i in train_data if len(i) < 1556]
    test_data = [i for i in test_data if len(i) < 1556]
    dev_data = [i for i in dev_data if len(i) < 1556]
    #  由于网络不是在网络内进行的数据增广不同于当前流行的 Transform rnn 等网络 需要先对数据集进行 处理
    #  直接补全而后 训练的时候随机的切割后面的一部分 方可达到目的
    p_list = ["<|p_{}|>".format(i) for i in range(1560)]
    train_data_list = []
    for one in tqdm(train_data):
        one = list(one)
        one = ["<|aos|>"] + one
        one += p_list[len(one):]
        train_data_list.append(one)

    test_data_list = []
    for one in tqdm(test_data):
        one = list(one)
        one = ["<|aos|>"] + one
        one += p_list[len(one):]
        test_data_list.append(one)

    dev_data_list = []
    for one in tqdm(dev_data):
        one = list(one)
        one = ["<|aos|>"] + one
        one += p_list[len(one):]
        dev_data_list.append(one)

    voc = ["<|pad|>"] + sorted(
        set(np.hstack(test_data_list)) | set(np.hstack(dev_data_list)) | set(np.hstack(train_data_list)))
    voc_dict = {k: v for v, k in enumerate(voc)}
    pd.to_pickle(voc, "voc_data.pandas_pickle")
    test_data_list = [[voc_dict[j] for j in i] for i in tqdm(test_data_list)]
    train_data_list = [[voc_dict[j] for j in i] for i in tqdm(train_data_list)]
    dev_data_list = [[voc_dict[j] for j in i] for i in tqdm(dev_data_list)]

    pd.to_pickle(train_data_list, "train_data.pandas_pickle")
    pd.to_pickle(dev_data_list, "dev_data.pandas_pickle")
    pd.to_pickle(test_data_list, "test_data.pandas_pickle")


def gen_data1():
    with open("train.json", "r", encoding="utf-8") as f:
        train_data = f.readlines()
        train_data = [json.loads(i) for i in tqdm(train_data)]

    with open("dev.json", "r", encoding="utf-8") as f:
        dev_data = f.readlines()
        dev_data = [json.loads(i) for i in tqdm(dev_data)]
    dev_data = [["<|aos|>"] + list(i["content"]) + ["<|bos|>"] + list(i["summary"]) for i in tqdm(dev_data)]

    train_data = [["<|aos|>"] + list(i["content"]) + ["<|bos|>"] + list(i["summary"]) for i in tqdm(train_data)]

    # 文本长度分布统计
    # train_data_len = [len(i) for i in train_data]
    # train_data_len_count = [train_data_len.count(i) for i in sorted(set(train_data_len))]
    # plt.plot(sorted(set(train_data_len)), train_data_len_count)
    # plt.show()
    # train_len_cate = [
    #     sum([train_data_len.count(i) for i in sorted(set(train_data_len)) if i <= j]) / sum(train_data_len_count) for j
    #     in tqdm(sorted(set(train_data_len)))]
    # plt.plot(sorted(set(train_data_len)),train_len_cate)
    # plt.show()
    # 统计分析可知1556  随着长度的增加文本信息量减少 所以选择长度1556 以下的数据 这样能减少训练压力
    train_data = [i for i in train_data if len(i) < 136]

    dev_data = [i for i in dev_data if len(i) < 136]
    #  由于网络不是在网络内进行的数据增广不同于当前流行的 Transform rnn 等网络 需要先对数据集进行 处理
    #  直接补全而后 训练的时候随机的切割后面的一部分 方可达到目的
    p_list = ["<|p_{}|>".format(i) for i in range(136)]
    train_data_list = []
    for one in tqdm(train_data):
        one += p_list[len(one):]
        train_data_list.append(one)

    dev_data_list = []
    for one in tqdm(dev_data):
        one += p_list[len(one):]
        dev_data_list.append(one)

    voc = ["<|pad|>"] + sorted(set(np.hstack(dev_data_list)) | set(np.hstack(train_data_list)))
    voc_dict = {k: v for v, k in enumerate(voc)}
    pd.to_pickle(voc, "voc_data.pandas_pickle")

    train_data_list = [[voc_dict[j] for j in i] for i in tqdm(train_data_list)]
    dev_data_list = [[voc_dict[j] for j in i] for i in tqdm(dev_data_list)]

    pd.to_pickle(train_data_list, "train_data.pandas_pickle")
    pd.to_pickle(dev_data_list, "dev_data.pandas_pickle")


def gen_data2():
    with open("fixed_couplets_in.txt", "r", encoding="utf-8") as f:
        train_data = f.readlines()

    with open("fixed_couplets_out.txt", "r", encoding="utf-8") as f:
        dev_data = f.readlines()

    train_data = [i.strip().split() for i in tqdm(train_data)]
    dev_data = [i.strip().split() for i in tqdm(dev_data)]

    train_data = [["<|aos|>"] + i + ["<|bos|>"] + j for i, j in tqdm(zip(train_data, dev_data))]

    # 文本长度分布统计
    train_data_len = [len(i) for i in train_data]
    train_data_len_count = [train_data_len.count(i) for i in sorted(set(train_data_len))]
    plt.plot(sorted(set(train_data_len)), train_data_len_count)
    plt.show()
    train_len_cate = [
        sum([train_data_len.count(i) for i in sorted(set(train_data_len)) if i <= j]) / sum(train_data_len_count) for j
        in tqdm(sorted(set(train_data_len)))]
    plt.plot(sorted(set(train_data_len)), train_len_cate)
    plt.show()
    # 统计分析可知1556  随着长度的增加文本信息量减少 所以选择长度1556 以下的数据 这样能减少训练压力
    train_data = [i for i in train_data if len(i) < 27]

    #  由于网络不是在网络内进行的数据增广不同于当前流行的 Transform rnn 等网络 需要先对数据集进行 处理
    #  直接补全而后 训练的时候随机的切割后面的一部分 方可达到目的
    p_list = ["<|p_{}|>".format(i) for i in range(27)]
    train_data_list = []
    for one in tqdm(train_data):
        one += p_list[len(one):]
        train_data_list.append(one)

    voc = ["<|pad|>"] + sorted(set(np.hstack(train_data_list)))
    voc_dict = {k: v for v, k in enumerate(voc)}
    pd.to_pickle(voc, "voc_data.pandas_pickle")

    train_data_list = [[voc_dict[j] for j in i] for i in tqdm(train_data_list)]

    pd.to_pickle(train_data_list, "train_data.pandas_pickle")


def train_data():
    voc_id = pd.read_pickle("voc_data.pandas_pickle")
    net = CvFoBlock(len(voc_id) + 1, 256, 5, 3)
    loss_func = paddle.nn.CrossEntropyLoss(ignore_index=-1)
    opt = paddle.optimizer.Adam(learning_rate=0.0001, parameters=net.parameters())
    bar = tqdm(range(1700))
    batch_size = 1200
    data_set = pd.read_pickle("train_data.pandas_pickle")

    # plt.ion()
    acc_list = []
    avg_list = []
    for epoch in bar:
        np.random.shuffle(data_set)
        for i in range(0, len(data_set), batch_size):
            j = i + batch_size

            data = paddle.to_tensor(data_set[i:j]).astype("int64")

            label = data[:, :-1]
            input_data = data[:, 1:]

            state = paddle.zeros([1, label.shape[0], 256])

            state, out = net(input_data, state)
            loss = loss_func(out, label)
            acc = paddle.metric.accuracy(out.reshape([-1, len(voc_id) + 1]), label.reshape([-1, 1]))
            acc_list.append(acc.item())
            bar.set_description(
                "epoch___{}___step___{}_loss___{:.5f}_acc__{:.5f}__{:.5f}".format(epoch, j, loss.item(),
                                                                                  np.mean(acc_list), (paddle.argmax(out,
                                                                                                                    -1) == label).numpy().mean()))
            opt.clear_grad()
            loss.backward()
            opt.step()

            np.mean(acc_list)

        paddle.save(net.state_dict(), "model_{}.paddle".format(epoch))


def eval_data():
    voc_id = pd.read_pickle("voc_data.pandas_pickle")
    net = CvFoBlock(len(voc_id) + 1, 256, 2, 2)
    loss_func = paddle.nn.CrossEntropyLoss()
    net.load_dict(paddle.load("model.paddle"))
    net.eval()

    bar = tqdm(range(1))
    batch_size = 20
    data_set = pd.read_pickle("dev_data.pandas_pickle")

    # plt.ion()
    acc_list = []
    for epoch in bar:
        np.random.shuffle(data_set)
        for i in range(0, len(data_set), batch_size):
            j = i + batch_size

            data = paddle.to_tensor(data_set[i:j]).astype("int64")

            for data_len in range(3, len(data_set[0]) + 1):
                data = data[:, :data_len]
                label = data[:, -1:]
                input_data = data[:, :-1]
                out = net(input_data)
                loss = loss_func(out, label)
                acc = paddle.metric.accuracy(out.reshape([-1, len(voc_id) + 1]), label.reshape([-1, 1]))
                acc_list.append(acc.item())
                bar.set_description(
                    "epoch___{}___step___{}_loss___{:.5f}_acc__{:.5f}".format(epoch, j, loss.item(), np.mean(acc_list)))
            acc_list = []


if __name__ == '__main__':
    # gen_data2()
    # gen_data1()
    # gen_data()
    train_data()
