import pandas as pd
from matplotlib import pyplot as plt
from tqdm import tqdm
import numpy as np
import paddle


class FeedForward(paddle.nn.Layer):
    def __init__(self, hidden_dim):
        super(FeedForward, self).__init__()
        self.fc_one = paddle.nn.Linear(hidden_dim, hidden_dim // 2, bias_attr=False)
        self.fc_two = paddle.nn.Linear(hidden_dim // 2, hidden_dim, bias_attr=False)
        self.gre = paddle.nn.GELU()

    def forward(self, feed_x):
        feed_x = self.fc_one(feed_x)
        feed_x = self.gre(feed_x)
        feed_x = self.fc_two(feed_x)
        return feed_x


class CvFoBlock(paddle.nn.Layer):
    def __init__(self, hidden_dim, row_layers):
        super(CvFoBlock, self).__init__()

        self.p_next_layer = paddle.nn.LayerList(
            [paddle.nn.Linear(hidden_dim, hidden_dim, bias_attr=False) for _ in range(row_layers)])
        self.p_ctx_next_layer_one = paddle.nn.Linear(hidden_dim, hidden_dim, bias_attr=False)
        self.p_ctx_next_mask_one = paddle.nn.Linear(hidden_dim, hidden_dim, bias_attr=False)
        self.p_ctx_next_mask_two = paddle.nn.Linear(hidden_dim, hidden_dim, bias_attr=False)
        self.p_ctx_next_mask_thr = paddle.nn.Linear(hidden_dim, hidden_dim, bias_attr=False)

        self.p_ctx_next_layer_three = paddle.nn.LayerList(
            [paddle.nn.Linear(hidden_dim, hidden_dim, bias_attr=False) for _ in range(row_layers)])

        self.layer_nor = paddle.nn.LayerNorm(hidden_dim, bias_attr=False)
        self.feed = FeedForward(hidden_dim)

    def forward(self, sx, p_next):

        p_ctx_next = self.p_ctx_next_layer_one(sx)

        p_ctx_next = p_ctx_next.transpose([0, 2, 1])
        if p_ctx_next.shape[-1] < 8:

            mask = paddle.triu(paddle.ones([p_ctx_next.shape[-1], p_ctx_next.shape[-1]]))
            p_ctx_next_mask = (p_ctx_next.unsqueeze([-1]) + paddle.zeros([p_ctx_next.shape[-1]])) * mask.unsqueeze(
                0).unsqueeze(0)
            p_ctx_next_mask = paddle.sum(p_ctx_next_mask, axis=-2).transpose([0, 2, 1])
            p_ctx_next_mask_one = self.p_ctx_next_mask_one(p_ctx_next_mask)
            p_ctx_next_mask_two = self.p_ctx_next_mask_two(p_ctx_next_mask)
            p_ctx_next_mask_thr = self.p_ctx_next_mask_two(p_next)
            p_ctx_next_mask = p_ctx_next_mask_one * p_ctx_next_mask_two ** (
                        2 - p_ctx_next_mask_two.shape[1] % 2) * paddle.sum(p_ctx_next_mask_thr,1).unsqueeze(1) ** (2 - 1)


        else:
            p_ctx_next_list = []
            for i in range(0, p_ctx_next.shape[-1], 8):
                j = i + 8
                p_ctx_next_one = p_ctx_next[:, :, i:j]
                if i == 0:
                    p_ctx_next_one_sum = 0
                else:

                    p_ctx_next_one_sum = paddle.sum(p_ctx_next[:, :, :i], -1).unsqueeze([-1]).transpose([0, 2, 1])

                mask = paddle.triu(paddle.ones([p_ctx_next_one.shape[-1], p_ctx_next_one.shape[-1]]))

                p_ctx_next_mask_one = (p_ctx_next_one.unsqueeze([-1]) + paddle.zeros(
                    [p_ctx_next_one.shape[-1]])) * mask.unsqueeze(
                    0).unsqueeze(0)

                p_ctx_next_mask_one = paddle.sum(p_ctx_next_mask_one, axis=-2).transpose([0, 2, 1]) + p_ctx_next_one_sum

                p_ctx_next_mask_one = self.p_ctx_next_mask_one(p_ctx_next_mask_one)
                p_ctx_next_mask_two = self.p_ctx_next_mask_two(p_ctx_next_mask_one)
                p_ctx_next_mask_thr = self.p_ctx_next_mask_thr(p_next)
                p_ctx_next_mask_one =  p_ctx_next_mask_one * p_ctx_next_mask_two ** (
                        2 - p_ctx_next_mask_two.shape[1] % 2) * paddle.sum(p_ctx_next_mask_thr,1).unsqueeze(1) ** (2 - 1)

                p_ctx_next_list.append(p_ctx_next_mask_one)
            p_ctx_next_mask = paddle.concat(p_ctx_next_list, axis=1)

        # 这里可以多维池化
        p_ctx_next_mask_list = []

        for p_ctx_next_layer, p_next_layer in zip(self.p_ctx_next_layer_three, self.p_next_layer):
            p_next = p_next_layer(sx)
            p_ctx_next_mask = paddle.sin(p_ctx_next_layer(p_ctx_next_mask) + p_next)
            p_ctx_next_mask += self.feed(p_ctx_next_mask)
            p_ctx_next_mask = self.layer_nor(p_ctx_next_mask)
            p_ctx_next_mask_list.append(p_ctx_next_mask)
        p_ctx_next_mask = paddle.nn.functional.max_pool1d(paddle.concat(p_ctx_next_mask_list, -1),
                                                          len(p_ctx_next_mask_list))
        return p_ctx_next_mask


class CvFo(paddle.nn.Layer):
    def __init__(self, voc_size, hidden_dim, row_layers, lora=False):
        super(CvFo, self).__init__()

        self.em = paddle.nn.Embedding(voc_size, hidden_dim)
        self.cv = CvFoBlock(hidden_dim, row_layers)
        self.feed = FeedForward(hidden_dim)

        self.lora = FeedForward(hidden_dim)
        self.lora_flag = lora
        self.out_layer = paddle.nn.Linear(hidden_dim, voc_size, bias_attr=False)
        self.layer_nor = paddle.nn.LayerNorm(hidden_dim, bias_attr=False)
        self.p_next = paddle.to_tensor(list(range(voc_size))).astype("int64").reshape([1, -1])

    def forward(self, sx):
        if self.lora_flag:
            with paddle.no_grad():
                sx = self.em(sx)
                p_next = self.em(self.p_next)
                sx+= self.cv(sx, p_next)
                sx=self.layer_nor(sx)
                sx+= self.feed(sx)
                sx = self.layer_nor(sx)
            sx += self.lora(sx)

            with paddle.no_grad():
                out = self.out_layer(sx)
        else:


            sx = self.em(sx)
            p_next = self.em(self.p_next)
            sx += self.cv(sx, p_next)
            sx = self.layer_nor(sx)
            sx += self.feed(sx)
            sx = self.layer_nor(sx)
            sx += self.lora(sx)
            out = self.out_layer(sx)
        return out

    def load_lora(self, lora_name):
        self.lora.load_dict(paddle.load(lora_name))

    def save_lora(self, lora_name):
        paddle.save(self.lora.state_dict(), lora_name)


class CvFo1(paddle.nn.Layer):
    def __init__(self, voc_size, hidden_dim, row_layers, lora=False):
        super(CvFo1, self).__init__()
        self.em = paddle.nn.Embedding(voc_size, hidden_dim)
        self.p_next = paddle.to_tensor(list(range(voc_size))).astype("int64").reshape([1, -1])
        self.out_layer = paddle.nn.Linear(hidden_dim, voc_size, bias_attr=False)

    def forward(self, sx):
        sx = self.em(sx)
        p_next = self.em(self.p_next)
        sx = paddle.sum(sx, 1) ** (2 - sx.shape[1] % 2) * paddle.sum(p_next, 1) ** (2 - 1)
        sx = self.out_layer(sx)
        return sx


def gen_basic_data():
    seq_len = 32
    with open("fixed_couplets_in.txt", "r", encoding="utf-8") as f:
        train_data = f.readlines()

    with open("fixed_couplets_out.txt", "r", encoding="utf-8") as f:
        dev_data = f.readlines()

    train_data = [i.strip().split() for i in tqdm(train_data)]
    dev_data = [i.strip().split() for i in tqdm(dev_data)]

    train_data_list = []
    data_id_index = 0
    for i, j in tqdm(zip(train_data, dev_data)):
        one = i + ["。"] + j + list("|_{}_|".format(data_id_index))
        data_id_index += 1
        train_data_list += one
    seq_len_count = 1
    with open("train_data_list.txt", "a", encoding="utf-8") as f:

        voc = dict()
        for i in tqdm(range(0, len(train_data_list), seq_len)):
            if i > 0:
                j = i + seq_len
                one = train_data_list[i - seq_len_count:j - seq_len_count]
                seq_len_count += 1
            else:
                j = i + seq_len
                one = train_data_list[i:j]
            if len(one) == seq_len:
                f.write(str(one) + "\n")
            for k in one:
                voc[k] = ""
    del train_data_list
    del train_data
    del dev_data

    voc = ["<|pad|>"] + list(voc.keys())
    voc_dict = {k: v for v, k in enumerate(voc)}
    pd.to_pickle(voc, "voc_data.pandas_pickle")

    with open("train_data_list.txt", "r", encoding="utf-8") as f:
        train_data = f.readlines()

    train_data_list = [[voc_dict[j] for j in eval(i)] for i in tqdm(train_data)]
    pd.to_pickle(train_data_list, "train_data.pandas_pickle")


def train_data():
    voc_id = pd.read_pickle("voc_data.pandas_pickle")
    net = CvFo(len(voc_id) + 1, 128, 2)
    loss_func = paddle.nn.CrossEntropyLoss(ignore_index=-1)
    opt = paddle.optimizer.Adam(learning_rate=0.0001, parameters=net.parameters())
    bar = tqdm(range(1700))
    batch_size = 1200
    data_set = pd.read_pickle("train_data.pandas_pickle")

    # plt.ion()
    acc_list = []
    for epoch in bar:
        np.random.shuffle(data_set)
        for i in range(0, len(data_set), batch_size):
            j = i + batch_size

            data = paddle.to_tensor(data_set[i:j]).astype("int64")

            label = data[:, 1:]
            input_data = data[:, :-1]

            out = net(input_data)
            loss = loss_func(out.reshape([-1, out.shape[-1]]), label.reshape([-1]))
            acc = paddle.metric.accuracy(out.reshape([-1, len(voc_id) + 1]), label.reshape([-1, 1]))
            acc_list.append(acc.item())
            bar.set_description(
                "epoch___{}___step___{}_loss___{:.5f}_acc__{:.5f}__{:.5f}".format(epoch, j, loss.item(),
                                                                                  np.mean(acc_list), (paddle.argmax(out,
                                                                                                                    -1) == label).numpy().mean()))
            opt.clear_grad()
            loss.backward()
            opt.step()

        paddle.save(net.state_dict(), "model_{}.paddle".format(epoch))


def train_data_lora(lora_one_name):
    voc_id = pd.read_pickle("voc_data.pandas_pickle")
    net = CvFo(len(voc_id) + 1, 128, 2, True)
    net.load_dict(paddle.load("basic.paddle"))
    loss_func = paddle.nn.CrossEntropyLoss(ignore_index=-1)
    opt = paddle.optimizer.Adam(learning_rate=0.00001, parameters=net.parameters())
    bar = tqdm(range(1700))
    batch_size = 1200
    data_set = pd.read_pickle("train_data.pandas_pickle")

    # plt.ion()
    acc_list = []
    for epoch in bar:
        np.random.shuffle(data_set)
        for i in range(0, len(data_set), batch_size):
            j = i + batch_size

            data = paddle.to_tensor(data_set[i:j]).astype("int64")

            label = data[:, -1:]
            input_data = data[:, :-1]

            out = net(input_data)
            loss = loss_func(out.reshape([-1, out.shape[-1]]), label.reshape([-1]))
            acc = paddle.metric.accuracy(out.reshape([-1, len(voc_id) + 1]), label.reshape([-1, 1]))
            acc_list.append(acc.item())
            bar.set_description(
                "epoch___{}___step___{}_loss___{:.5f}_acc__{:.5f}__{:.5f}".format(epoch, j, loss.item(),
                                                                                  np.mean(acc_list), (paddle.argmax(out,
                                                                                                                    -1) == label).numpy().mean()))
            opt.clear_grad()
            loss.backward()
            opt.step()

        paddle.save(net.lora.state_dict(), "model_{}.paddle".format(lora_one_name))


def eval_data():
    voc_id = pd.read_pickle("voc_data.pandas_pickle")
    net = CvFo(len(voc_id) + 1, 128, 2)
    net.load_dict(paddle.load("model_22.paddle"))
    net.eval()

    data_set = pd.read_pickle("train_data.pandas_pickle")
    np.random.shuffle(data_set)
    bos_index = data_set[0].index(voc_id.index("<|bos|>")) + 1
    data = data_set[0][:bos_index]
    label = data_set[0][bos_index:]
    state = None
    print("".join([voc_id[i] for i in label]))
    try:

        for _ in range(len(label)):

            data_in = paddle.to_tensor(data).astype("int64").reshape([1, -1])

            out = net(data_in)
            k = 100
            logs = out[:, -1:]
            top_k_log, top_k = paddle.topk(logs, k=k)
            top_k = top_k.reshape([-1])
            top_k_log = top_k_log.reshape([-1])
            top_k_score = paddle.nn.functional.softmax(logs, -1).reshape([-1])[top_k]
            p = 0.9
            top_p_score = top_k_score[paddle.cumsum(top_k_score) < p]
            top_p = top_k[paddle.cumsum(top_k_score) < p]
            top_p_log = top_k_log[paddle.cumsum(top_k_score) < p]
            t = 0.3
            top_t_score = paddle.nn.functional.softmax(top_p_log / t)
            token_id = np.random.choice(top_p.numpy().tolist())
            data = [token_id]
            print(voc_id[token_id], end="", flush=True)
            if "<|" in voc_id[token_id]:
                break
    except:
        pass


if __name__ == '__main__':
    # gen_basic_data()
    train_data()
    # net = CvFoBlock(256, 2)
    # net(paddle.randn([2, 5, 256]), paddle.randn([2, 1000, 256]))
    # eval_data()
