import pandas as pd
from matplotlib import pyplot as plt
from tqdm import tqdm
import numpy as np
import paddle


# 使用池化的方法代替 moe 或者是 注意力是的没错
# 为了确保能有足够在s维度的特征表达 上文
# 可以输入多个输出多个池化而后 进行池化
# 多个层池化可以是横向或者纵向的，这个都是可以控制的 一开始设计好横向和纵向的层数
# 无论是横向还是纵向池化都是按照s维度进行

class CvFoMaxPool(paddle.nn.Layer):
    def __init__(self, hidden_dim, row_layers, col_layers):
        super(CvFoMaxPool, self).__init__()

        self.col_list = [paddle.nn.Linear(col_layers * hidden_dim, hidden_dim * col_layers, bias_attr=False) for _ in
                         range(row_layers)]
        self.col_layers = col_layers
        self.row_layers = row_layers

    def forward(self, x):
        b, s, h = x.shape
        one_list = []
        for one_col in self.col_list:
            one = one_col(paddle.concat([x] * self.col_layers, -1))
            one = paddle.nn.functional.max_pool1d(one, self.col_layers)
            one_list.append(one)
        out = paddle.nn.functional.max_pool1d(paddle.concat(one_list, 1).transpose([0, 2, 1]),
                                              s * self.row_layers).transpose([0, 2, 1])

        plt.imshow(out[:, 0].numpy())
        plt.pause(0.01)
        plt.clf()

        return out


class CvFoBlock(paddle.nn.Layer):
    def __init__(self, voc_size, hidden_dim, voc_id):
        super(CvFoBlock, self).__init__()
        self.pos = paddle.to_tensor(np.array([[voc_id.index(j) for j in str(i).zfill(3)] for i in range(1000)])).astype(
            "int64")

        self.em = paddle.nn.Embedding(voc_size, hidden_dim, padding_idx=-1)
        self.start = paddle.nn.Linear(hidden_dim, hidden_dim, bias_attr=False)
        self.end = paddle.nn.Linear(hidden_dim, hidden_dim, bias_attr=False)
        self.map = paddle.nn.Linear(hidden_dim, hidden_dim, bias_attr=False)
        self.llm = paddle.nn.Linear(hidden_dim * 2, hidden_dim, bias_attr=False)
        self.sx_pos = paddle.nn.Linear(hidden_dim * 2, hidden_dim, bias_attr=False)
        self.st_pos = paddle.nn.Linear(hidden_dim * 2, hidden_dim, bias_attr=False)
        self.end_flag = paddle.nn.Linear(hidden_dim * 2, hidden_dim, bias_attr=False)

        self.layer_nor = paddle.nn.LayerNorm(hidden_dim)
        self.out_layer = paddle.nn.Linear(hidden_dim, voc_size, bias_attr=False)

        self.max_pool = CvFoMaxPool(hidden_dim, 3, 5)

    def forward(self, sx, st):
        pos=paddle.sum(self.em(self.pos[:sx.shape[1]+1]),1).unsqueeze(0)
        pos=paddle.zeros([sx.shape[0],pos.shape[1],pos.shape[2]])+pos
        sx = self.sx_pos(paddle.concat([self.em(sx),pos[:,:-1]],-1))
        sx = self.max_pool(sx)

        start = self.sx_pos(paddle.concat([self.em(st),pos[:,-1:]],-1))

        map_data = self.map(sx)

        end = self.end(sx)

        llm = self.llm(paddle.concat([start, map_data], -1))

        end_flag = self.end_flag(paddle.concat([llm, end], -1))

        out = self.out_layer(paddle.concat([end_flag, llm], 1))
        return out


def gen_data():
    data_list = []
    for j in range(2, 100):

        for i in range(0, 100, j):
            jj = i + j

            one_data = [
                ["<|aos|>", str(i), "<|bos|>", str(jj), "<|cos|>"] + [str(jjj) for jjj in range(i, ii + 1)] + [
                    "<|true|>", str(ii + 1)
                ] if str(
                    ii) == str(jj)
                else
                ["<|aos|>", str(i), "<|bos|>", str(jj), "<|cos|>"] + [str(jjj) for jjj in range(i, ii + 1)] + [
                    "<|false|>", str(ii + 1)
                ]
                for ii in range(i, jj)]

            data_list += one_data
    data_list = [i[:-3] + ["<|p_{}|>".format(j) for j in range(len(i[:-3]), 107)] + i[-3:] for i in data_list]
    voc = ["<|pad|>"] + list(set(np.hstack(data_list).tolist()))
    pd.to_pickle(voc, "voc_data.pandas_pickle")
    voc_dict = {k: v for v, k in enumerate(voc, 1)}
    pd.to_pickle([[voc_dict[j] for j in i] for i in data_list], "train_data.pandas_pickle")


def train_data():
    voc_id = pd.read_pickle("voc_data.pandas_pickle")
    net = CvFoBlock(len(voc_id) + 1, 128, voc_id)
    loss_func = paddle.nn.CrossEntropyLoss(ignore_index=-1)
    opt = paddle.optimizer.Adam(learning_rate=0.0001, parameters=net.parameters())
    bar = tqdm(range(700))
    batch_size = 10
    data_set = pd.read_pickle("train_data.pandas_pickle")
    np.random.shuffle(data_set)
    plt.ion()
    for epoch in bar:
        for i in range(0, len(data_set), batch_size):
            j = i + batch_size
            data = paddle.to_tensor(data_set[i:j]).astype("int64")
            label = data[:, -2:]
            sst = data[:, -3:-2]
            input_data = data[:, :-3]
            out = net(input_data, sst)
            loss = loss_func(out, label)
            acc = paddle.metric.accuracy(out.reshape([-1, len(voc_id) + 1]), label.reshape([-1, 1]))
            bar.set_description("epoch___{}_loss___{:.5f}_acc__{:.5f}".format(epoch, loss.item(), acc.item()))
            opt.clear_grad()
            loss.backward()
            opt.step()


if __name__ == '__main__':
    # gen_data()
    train_data()
