from glob import glob
import numpy as np
import pandas as pd
from cvfo_gpt import CvFo
from tqdm import tqdm
from config import config
from check_data import gen_hash
import paddle


# 控制程序
class Sample:
    def __init__(self):
        voc_id = pd.read_pickle(config["voc_data_path"])
        self.net = CvFo(len(voc_id) + 1, config["hidden_dim"], config["row_layers"],
                        config["heads"],
                        config["group_num"])
        self.net.load_dict(paddle.load(config["basic_model_path"]))
        seq_len_list = [0, 512]
        for i in range(config["lora_level"]):
            seq_len_list.append(seq_len_list[-1] + config["seq_len"] // 2)
        self.level_seq_len = seq_len_list
        lora_model_path_list = glob(config["lora_model_dir"] + "*")
        self.lora_model_path_dict = {"_".join(i.split("_")[-3:-1]): i for i in lora_model_path_list}
        self.voc_dict = {k: v for v, k in enumerate(voc_id)}
        self.voc_dict_id = {k: v for k, v in enumerate(voc_id)}
        self.text_token = []

    def sample(self, text, context_flag=False):
        path, level = self.get_lora_name(text)
        self.net.load_lora(path)
        text = self.sample_text(text, level)
        if context_flag is False:
            self.text_token = [self.voc_dict[i] for i in text]
        else:
            self.text_token += [self.voc_dict[text[-1]]]
        out = self.net(paddle.to_tensor(self.text_token).astype("int64").reshape([1, -1]))
        text += self.sample_func(out)
        return text

    def sample_func(self, two):
        two = paddle.argmax(two[:, -1:], -1)
        return self.voc_dict_id[two.item()]

    def get_lora_name(self, one):
        level = 0
        for i in range(len(self.level_seq_len)):
            if self.level_seq_len[i] <= len(one) < self.level_seq_len[i + 1]:
                level = i
                break

        name = "{}_{}".format(gen_hash(one[-1]), level)
        return self.lora_model_path_dict[name], level
        # 这里先随机选一个
        # return list(self.lora_model_path_dict.values())[0], level

    @staticmethod
    def gen_basic(data_id):
        if data_id == 0:
            return [config["seq_len"]], [config["seq_len"]]
        seq_len_list = []
        basic = [config["seq_len"]]
        seq_len_list.append(np.array(basic).copy().tolist())
        for _ in tqdm(range(data_id)):
            if len(basic) < 4:
                for j in range(len(basic)):
                    basic[j] = basic[j] // 2
                basic.append(config["seq_len"] // 2)
            else:
                basic.insert(0, 0)

            seq_len_list.append(np.array(basic).copy().tolist())
        basic = pd.DataFrame(seq_len_list).fillna(0).astype("int").values
        basic_len = np.sum(np.triu(basic), -1)
        basic_len = np.cumsum(basic_len)
        return basic_len, basic

    def sample_text(self, one_data, data_id):
        if data_id == 0:
            return one_data[:config["seq_len"]]
        basic_len, basic = self.gen_basic(data_id)
        if data_id == 1:
            two_data = one_data[:basic_len[0]][::2]
            two_data += one_data[basic_len[0]:basic_len[1]]
        elif data_id == 2:
            two_data = one_data[:basic_len[0]][::4]
            two_data += one_data[basic_len[0]:basic_len[1]][::2]
            two_data += one_data[basic_len[1]:basic_len[2]]
        elif data_id == 3:
            two_data = one_data[:basic_len[0]][::8]
            two_data += one_data[basic_len[0]:basic_len[1]][::4]
            two_data += one_data[basic_len[1]:basic_len[2]][::2]
            two_data += one_data[basic_len[2]:basic_len[3]]
        else:
            end = basic[-1]
            end_index = end.tolist().index(64)
            hash_text = one_data[:basic_len[end_index]]
            if hash_text == "":
                hash_text = ""
            else:
                hash_text = gen_hash(hash_text)
            two_data_list = []
            for one in range(end_index, basic_len.size - 1):
                if len(one_data) <= basic_len[one]:
                    break
                two_data = one_data[basic_len[one]:basic_len[one + 1]]
                two_data = two_data[::config["seq_len"] // end[one + 1] // 2]
                two_data_list.append(two_data)
            two_data = hash_text + "".join(two_data_list)
        return two_data


def sample_run(input_text="question:公共安全和四川省公共安全技术防范管理条例", seq_len_max=512):
    s = Sample()
    txt = s.sample(input_text)
    for _ in range(seq_len_max - len(input_text)):
        print(txt)
        txt = s.sample(txt, True)


if __name__ == '__main__':
    sample_run()
