import pandas as pd
from tqdm import tqdm
import numpy as np
import paddle


class Attention(paddle.nn.Layer):
    def __init__(self, hidden_dim, heads, layers_num=8):
        super(Attention, self).__init__()

        self.q = paddle.nn.Linear(hidden_dim, heads * hidden_dim, bias_attr=False)
        self.k = paddle.nn.Linear(hidden_dim, heads * hidden_dim, bias_attr=False)
        self.v = paddle.nn.Linear(hidden_dim, heads * hidden_dim, bias_attr=False)
        self.heads = heads
        self.group_norm0 = paddle.nn.GroupNorm(num_groups=heads, num_channels=heads, bias_attr=False)
        self.group_norm2 = paddle.nn.LayerList(
            [paddle.nn.GroupNorm(num_groups=heads, num_channels=heads, bias_attr=False) for _ in range(layers_num)])

        self.feed_01 = paddle.nn.LayerList(
            [paddle.nn.Linear(hidden_dim, heads * hidden_dim, bias_attr=False) for _ in range(layers_num)])

    def forward(self, sx):
        b, s, h = sx.shape
        q = paddle.nn.functional.relu(self.q(sx))
        k = paddle.nn.functional.relu(self.k(sx))
        v = self.v(sx)

        qk = q.reshape([b, s, self.heads, h]).transpose([0, 2, 1, 3]) @ k.reshape([b, s, self.heads, h]).transpose(
            [0, 2, 3, 1])
        mask = paddle.triu(paddle.ones([s, s]))
        # mask[mask == 0] = -np.inf
        qk_mask = qk * mask
        qk = qk_mask / (paddle.sum(qk_mask, -2).unsqueeze([-2]) + 0.00000000000001)
        qkv = qk.transpose([0, 1, 3, 2]) @ v.reshape([b, s, self.heads, h]).transpose([0, 2, 1, 3])

        qkv = self.group_norm0(qkv)
        qkv = qkv.transpose([0, 2, 3, 1])
        qkv = paddle.nn.functional.max_pool1d(qkv.reshape([b, -1, self.heads]), self.heads).reshape([b, s, h])

        for feed, group_norm in zip(self.feed_01, self.group_norm2):
            qkv = feed(qkv)
            qkv = qkv.reshape([b, s, self.heads, h]).transpose([0, 2, 1, 3])
            qkv = group_norm(qkv)
            qkv = qkv.transpose([0, 2, 3, 1])
            qkv = paddle.nn.functional.max_pool1d(qkv.reshape([b, -1, self.heads]), self.heads).reshape([b, s, h])

        # qkv = self.feed_02(qkv)
        # qkv = qkv.reshape([b, s, self.heads, h]).transpose([0, 2, 1, 3])
        # qkv = self.group_norm2(qkv)
        # qkv = qkv.transpose([0, 2, 3, 1])
        # qkv = paddle.nn.functional.max_pool1d(qkv.reshape([b, -1, self.heads]), self.heads).reshape([b, s, h])

        return qkv


class FeedForward(paddle.nn.Layer):
    def __init__(self, hidden_dim):
        super(FeedForward, self).__init__()
        self.fc_one = paddle.nn.Linear(hidden_dim, hidden_dim // 2, bias_attr=False)
        self.fc_two = paddle.nn.Linear(hidden_dim // 2, hidden_dim, bias_attr=False)
        self.gre = paddle.nn.GELU()

    def forward(self, feed_x):
        feed_x = self.fc_one(feed_x)
        feed_x = self.gre(feed_x)
        feed_x = self.fc_two(feed_x)
        return feed_x


class CvFoBlock(paddle.nn.Layer):
    def __init__(self, hidden_dim, row_layers, heads, group_num):
        super(CvFoBlock, self).__init__()

        self.p_next_layer = paddle.nn.LayerList(
            [paddle.nn.Linear(hidden_dim, hidden_dim, bias_attr=False) for _ in range(row_layers)])
        self.p_ctx_next_layer_one = paddle.nn.Linear(hidden_dim, hidden_dim, bias_attr=False)
        self.p_ctx_next_mask_one = paddle.nn.Linear(hidden_dim, hidden_dim, bias_attr=False)
        self.p_ctx_next_mask_two = paddle.nn.Linear(hidden_dim, hidden_dim, bias_attr=False)
        self.p_ctx_next_mask_thr = paddle.nn.Linear(hidden_dim, hidden_dim, bias_attr=False)

        self.p_ctx_next_layer_three = paddle.nn.LayerList(
            [paddle.nn.Linear(hidden_dim, hidden_dim, bias_attr=False) for _ in range(row_layers)])

        self.layer_nor = paddle.nn.LayerNorm(hidden_dim, bias_attr=False)
        self.feed = FeedForward(hidden_dim)
        self.attention = Attention(hidden_dim, heads, group_num)
        # self.attention =FeedForward(hidden_dim)

    def forward(self, sx, p_next):
        p_ctx_next = self.p_ctx_next_layer_one(sx)
        p_ctx_next_mask = paddle.cumsum(p_ctx_next, 1)
        p_ctx_next_mask_one = self.p_ctx_next_mask_one(p_ctx_next_mask)
        p_ctx_next_mask_two = self.p_ctx_next_mask_two(p_ctx_next_mask)
        p_ctx_next_mask_thr = self.p_ctx_next_mask_two(p_next)
        p_ctx_next_mask = p_ctx_next_mask_one * p_ctx_next_mask_two ** (
                2 - p_ctx_next_mask_two.shape[1] % 2) * paddle.sum(p_ctx_next_mask_thr, 1).unsqueeze(1) ** (2 - 1)
        p_ctx_next_mask = p_ctx_next_mask + self.attention(p_ctx_next_mask)
        p_ctx_next_mask = self.layer_nor(p_ctx_next_mask)
        # 这里可以多维池化
        p_ctx_next_mask_list = []

        for p_ctx_next_layer, p_next_layer in zip(self.p_ctx_next_layer_three, self.p_next_layer):
            p_next = p_next_layer(sx)
            p_ctx_next_mask = paddle.sin(p_ctx_next_layer(p_ctx_next_mask) + p_next)
            p_ctx_next_mask = p_ctx_next_mask + self.feed(p_ctx_next_mask)
            p_ctx_next_mask = self.layer_nor(p_ctx_next_mask)
            p_ctx_next_mask_list.append(p_ctx_next_mask)
        p_ctx_next_mask = paddle.nn.functional.max_pool1d(paddle.concat(p_ctx_next_mask_list, -1),
                                                          len(p_ctx_next_mask_list))
        return p_ctx_next_mask


class CvFo(paddle.nn.Layer):
    def __init__(self, voc_size, hidden_dim, row_layers, heads, group_num, lora=False):
        super(CvFo, self).__init__()

        self.em = paddle.nn.Embedding(voc_size, hidden_dim)
        self.cv = CvFoBlock(hidden_dim, row_layers, heads, group_num)
        self.feed = FeedForward(hidden_dim)

        self.lora = FeedForward(hidden_dim)
        self.lora_flag = lora
        self.out_layer = paddle.nn.Linear(hidden_dim, voc_size, bias_attr=False)
        self.layer_nor = paddle.nn.LayerNorm(hidden_dim, bias_attr=False)
        self.p_next = paddle.to_tensor(list(range(voc_size))).astype("int64").reshape([1, -1])

    def forward(self, sx):
        sx = self.em(sx)
        p_next = self.em(self.p_next)
        sx = sx + self.cv(sx, p_next)
        sx = self.layer_nor(sx)
        sx = sx + self.feed(sx)
        sx = self.layer_nor(sx)
        sx = sx + self.lora(sx)
        out = self.out_layer(sx)
        return out

    def load_lora(self, lora_name):
        self.lora.load_dict(paddle.load(lora_name))

    def save_lora(self, lora_name):
        paddle.save(self.lora.state_dict(), lora_name)


def gen_basic_data():
    seq_len = 32
    with open("fixed_couplets_in.txt", "r", encoding="utf-8") as f:
        train_data = f.readlines()

    with open("fixed_couplets_out.txt", "r", encoding="utf-8") as f:
        dev_data = f.readlines()

    train_data = [i.strip().split() for i in tqdm(train_data)]
    dev_data = [i.strip().split() for i in tqdm(dev_data)]

    train_data_list = []
    data_id_index = 0
    for i, j in tqdm(zip(train_data, dev_data)):
        one = i + ["。"] + j + list("|_{}_|".format(data_id_index))
        data_id_index += 1
        train_data_list += one
    seq_len_count = 1
    with open("train_data_list.txt", "a", encoding="utf-8") as f:

        voc = dict()
        for i in tqdm(range(0, len(train_data_list), seq_len)):
            if i > 0:
                j = i + seq_len
                one = train_data_list[i - seq_len_count:j - seq_len_count]
                seq_len_count += 1
            else:
                j = i + seq_len
                one = train_data_list[i:j]
            if len(one) == seq_len:
                f.write(str(one) + "\n")
            for k in one:
                voc[k] = ""
    del train_data_list
    del train_data
    del dev_data

    voc = ["<|pad|>"] + list(voc.keys())
    voc_dict = {k: v for v, k in enumerate(voc)}
    pd.to_pickle(voc, "voc_data.pandas_pickle")

    with open("train_data_list.txt", "r", encoding="utf-8") as f:
        train_data = f.readlines()

    train_data_list = [[voc_dict[j] for j in eval(i)] for i in tqdm(train_data)]
    pd.to_pickle(train_data_list, "train_data.pandas_pickle")


def train_data_func():
    voc_id = pd.read_pickle("voc_data.pandas_pickle")
    net = CvFo(len(voc_id) + 1, 128, 5, 8, 2)
    loss_func = paddle.nn.CrossEntropyLoss(ignore_index=-1)
    opt = paddle.optimizer.Adam(learning_rate=0.0001, parameters=net.parameters())
    bar = tqdm(range(1700))
    batch_size = 1200
    data_set = pd.read_pickle("train_data.pandas_pickle")

    # plt.ion()
    acc_list = []
    for epoch in bar:
        np.random.shuffle(data_set)
        for i in range(0, len(data_set), batch_size):
            j = i + batch_size

            data = paddle.to_tensor(data_set[i:j]).astype("int64")

            label = data[:, 1:]
            input_data = data[:, :-1]

            out = net(input_data)
            loss = loss_func(out.reshape([-1, out.shape[-1]]), label.reshape([-1]))
            acc = paddle.metric.accuracy(out.reshape([-1, len(voc_id) + 1]), label.reshape([-1, 1]))
            acc_list.append(acc.item())
            bar_name = "epoch___{}___step___{}_loss___{:.5f}_acc__{:.5f}"
            bar.set_description(
                bar_name.format(epoch, j, loss.item(),
                                np.mean(acc_list)))
            opt.clear_grad()
            loss.backward()
            opt.step()

        paddle.save(net.state_dict(), "model_{}.paddle".format(epoch))


def train_data_lora(lora_one_name):
    voc_id = pd.read_pickle("voc_data.pandas_pickle")
    net = CvFo(len(voc_id) + 1, 128, 2, 2, True)
    net.load_dict(paddle.load("basic.paddle"))
    loss_func = paddle.nn.CrossEntropyLoss(ignore_index=-1)
    opt = paddle.optimizer.Adam(learning_rate=0.00001, parameters=net.parameters())
    bar = tqdm(range(1700))
    batch_size = 1200
    data_set = pd.read_pickle("train_data.pandas_pickle")

    # plt.ion()
    acc_list = []
    for epoch in bar:
        np.random.shuffle(data_set)
        for i in range(0, len(data_set), batch_size):
            j = i + batch_size

            data = paddle.to_tensor(data_set[i:j]).astype("int64")

            label = data[:, -1:]
            input_data = data[:, :-1]

            out = net(input_data)
            loss = loss_func(out.reshape([-1, out.shape[-1]]), label.reshape([-1]))
            acc = paddle.metric.accuracy(out.reshape([-1, len(voc_id) + 1]), label.reshape([-1, 1]))
            acc_list.append(acc.item())
            bar_name = "epoch___{}___step___{}_loss___{:.5f}_acc__{:.5f}"
            bar.set_description(bar_name.format(epoch, j, loss.item(), np.mean(acc_list)))
            opt.clear_grad()
            loss.backward()
            opt.step()

        paddle.save(net.lora.state_dict(), "model_{}.paddle".format(lora_one_name))


if __name__ == '__main__':
    # gen_basic_data()
    train_data_func()
