
import torch

import pandas as pd
import polars as pl
import numpy as np
from jieba import lcut
from tqdm import tqdm

from infer_model import SamOut


def load_model_and_voc(device="cpu"):
    voc = pd.read_pickle("total_voc_new.pkl")["voc"]
    voc = ["<|sos|>", "<|user|>", "<|agent|>", "<|pad|>", "<|history|>", "<|unk|>", "<|end|>", "<|next|>"] + sorted(
        set(voc))
    voc_size = len(voc) - 8
    voc1 = pd.read_pickle("total_voc_new.pkl")["voc1"]
    voc1 = pd.DataFrame({"voc": voc1.keys(), "count": voc1.values()})
    voc1 = voc1[~voc1["voc"].isin(voc)]
    voc1 = voc1.sort_values("count", ascending=False)
    voc1["voc_id"] = np.array(list(range(len(voc1)))) % voc_size + 8
    voc1["voc_num"] = np.array(list(range(len(voc1)))) // voc_size + 9

    dl = pl.DataFrame(
        {"voc": voc + voc1["voc"].values.tolist(), "voc_id": list(range(len(voc))) + voc1["voc_id"].values.tolist(),
         "voc_num": [8] * len(voc) + voc1["voc_num"].values.tolist()})

    net = SamOut(len(voc), 1024 + 512, 64, 16)
    # net = SamOut(len(voc["voc"]), 512, 32, 8)
    print(sum([i.shape[0] * i.shape[1] for i in net.parameters() if len(i.shape) > 1]) + sum(
        [i.shape[0] for i in net.parameters() if len(i.shape) == 1]))

    # net.load_state_dict(torch.load("pretrain_768.pth", map_location=device))
    # net.load_state_dict(torch.load("pretrain_sft_single.pth", map_location=device))
    net.load_state_dict(torch.load("pretrain_sft_single_1024.pth", map_location=device))
    # net.load_state_dict(torch.load("pretrain.pth", map_location=device))
    net.to(device)
    net.eval()
    return net, dl


def gen_token(voc, voc_dict, model, prompt, max_len, rp=1.2, temp=0.13, top_k=16, device="cuda"):
    voc_list = pl.DataFrame({"voc": prompt}).join(voc, on="voc", how="left").to_numpy()
    prompt_list = voc_list[:, 1].tolist()
    print("agent:", end="", flush=True)
    model.to(device)
    current_voc = 8

    state = None
    # for _ in tqdm(range(max_len)):
    for _ in range(max_len):

        if state is None:

            out, state = model(torch.Tensor([prompt_list]).to(device).long())
        else:
            out, state = model(torch.Tensor([prompt_list[-1:]]).to(device).long(), state)

        out = out[:, -1:]
        # 重复抑制
        for token_id in enumerate(prompt_list):
            out[:, :, token_id] /= rp
        score = torch.softmax(out, -1)[0, 0]

        score, score_index = torch.sort(score, descending=True)
        if device == "cpu":

            score = score.detach().numpy()
            score_index = score_index.detach().numpy()
        else:
            score = score.cpu().detach().numpy()
            score_index = score_index.cpu().detach().numpy()
        score_sum = np.cumsum(score)

        score1 = score[score_sum < 0.9]
        if score1.size == 0:
            score = score[:1]
        else:
            score = score1

        score_index = score_index[:min(top_k, score.size)]

        out = score / temp

        v = out[:min(top_k, score.size)]
        # top  中限制词表大小方可 ok

        idx_next = torch.multinomial(torch.Tensor(v), num_samples=1, generator=None)
        idx_next = score_index[idx_next.item()]
        if prompt_list[-1] == 7:
            current_voc = idx_next
        if idx_next != 7:
            if idx_next == 6:
                break

            # out_item = voc["voc"].filter(
            #     voc["voc_id"].is_in([idx_next]) & voc["voc_num"].is_in([current_voc])).to_numpy().tolist()

            out_item = voc_dict.get("{}_{}".format(idx_next, current_voc))
            if out_item:
                prompt += [out_item]
                prompt_list.append(idx_next)

        else:
            prompt += ["<|next|>"]
            prompt_list.append(7)
            continue
        if prompt_list[-1] == 7 or prompt_list[-2] == 7:
            continue

        print(prompt[-1], end="", flush=True)


def t_infre():
    model, voc = load_model_and_voc()
    voc_dict = {"{}_{}".format(k, v): i for i, k, v in voc.to_numpy().tolist()}

    while True:
        text = input("user:")

        gen_token(voc, voc_dict, model,
                  ["<|next|>"] + ["\x00"] + ["<|user|>"] + lcut("{}".format(text)) + ["<|agent|>"], 256)
        print()


if __name__ == '__main__':
    t_infre()
