
import numpy as np
import pandas as pd
import torch
from tqdm import tqdm

from infer_model import SamOut




def load_model_and_voc(device="cpu"):
    voc = pd.read_pickle("total_voc.pkl")

    net = SamOut(len(voc["voc"]), 1024 + 512, 64, 16)
    # net = SamOut(len(voc["voc"]), 512, 32, 8)
    print(sum([i.shape[0] * i.shape[1] for i in net.parameters() if len(i.shape) > 1]) + sum(
        [i.shape[0] for i in net.parameters() if len(i.shape) == 1]))

    # net.load_state_dict(torch.load("pretrain_768.pth", map_location=device))
    # net.load_state_dict(torch.load("pretrain_sft_single.pth", map_location=device))
    net.load_state_dict(torch.load("pretrain_sft_single_1024.pth", map_location=device))
    # net.load_state_dict(torch.load("pretrain.pth", map_location=device))
    net.to(device)
    net.eval()
    return net, voc


def gen_token(voc, model, prompt, max_len, rp=1.2, temp=0.13, top_k=16, device="cuda"):
    print("agent:", end="", flush=True)
    model.to(device)

    state=None
    for _ in range(max_len):

        prompt_list = []
        for i in prompt:
            if i not in voc["voc"]:
                prompt_list += [voc["voc"].index(ii) for ii in voc["voc0"].get(i)]
            else:

                prompt_list.append(voc["voc"].index(i))
        if state is None:

            out, state = model(torch.Tensor([prompt_list]).to(device).long())
        else:
            out, state = model(torch.Tensor([prompt_list[-1:]]).to(device).long(),state)

        out = out[:, -1:]
        # 重复抑制
        for token_id in enumerate(prompt_list):
            out[:, :, token_id] /= rp
        score = torch.softmax(out, -1)[0, 0]

        score, score_index = torch.sort(score,descending=True)
        if device=="cpu":

            score=score.detach().numpy()
            score_index = score_index.detach().numpy()
        else:
            score = score.cpu().detach().numpy()
            score_index = score_index.cpu().detach().numpy()
        score_sum = np.cumsum(score)

        score1=score[score_sum<0.9]
        if score1.size==0:
            score=score[:1]
        else:
            score=score1
        score_index=score_index[:min(top_k, score.size)]



        out = score / temp

        v= out[:min(top_k, score.size)]



        idx_next = torch.multinomial(torch.Tensor(v), num_samples=1, generator=None)
        if voc["voc"][score_index[idx_next.item()]] == "<|sos|>":
            break
        prompt += [voc["voc"][score_index[idx_next.item()]]]
        print(prompt[-1], end="", flush=True)


def t_infre():
    model, voc = load_model_and_voc()

    while True:
        text = input("user:")
        gen_token(voc, model, ["<|user|>"] + list("{}".format(text)) + ["<|agent|>"], 64)
        print()


if __name__ == '__main__':


    t_infre()


