# 直接使用T5
import torch
from utils.datasets import T5MLM
from torch.utils.data import DataLoader
import pickle
from transformers import T5Tokenizer, T5ForConditionalGeneration
from tqdm import tqdm
import json
from multiprocessing import Pool
import copy
from utils import stopwords, drug_names

from utils.datasets import BERTBin
from utils.models import Bin, OursBin
from torch.utils.data import DataLoader
from transformers import BertTokenizer
from confs import step1 as conf


# 合成语料1.5G,平均长度161，如果全部展开的话需要241.5G
# 如果这样做下去需要分块才行


class Func_:  # 用于获取分好短语的语料,pt文件
    def get_corp(self, corp, workernum, worker_id):
        drugnames = list(json.load(open("./drugeuphs.json", "r", encoding="utf-8")).keys())
        filelen = len(corp)
        block = filelen // workernum

        # worker_id是指当前worker的id
        l = worker_id * block
        r = (worker_id + 1) * block
        masked_sents = []
        for row in corp[l:r]:
            sentsplits = []
            for idx in range(0, len(row), conf.MAXLEN):
                sentsplits.append(row[idx:idx + conf.MAXLEN])
            for sentsplit in sentsplits:
                for wid in range(len(sentsplit)):
                    if sentsplit[wid] not in drugnames:
                        _ = copy.deepcopy(sentsplit)
                        _[wid] = "<extra_id_0>"
                        _ = " ".join(_)
                        # _ = _.replace("_", " ")
                        masked_sents.append([_, sentsplit[wid]])

        pickle.dump(masked_sents, open(f"./corpus/tmp/worker_id_{worker_id}.pt", "wb"))

    # @staticmethod
    # def call_back(cb):
    #     print(cb)

    @staticmethod
    def error_back(eb):
        print(f'error: {str(eb)}')


class Func_bert:  # 用于获取分好短语的语料,pt文件
    def get_corp(self, corp, workernum, worker_id):
        filelen = len(corp)
        block = filelen // workernum

        # worker_id是指当前worker的id
        l = worker_id * block
        r = (worker_id + 1) * block
        masked_sents = []
        for row in corp[l:r]:
            sentsplits = []
            for idx in range(0, len(row), conf.MAXLEN):
                sentsplits.append(row[idx:idx + conf.MAXLEN])
            for sentsplit in sentsplits:
                for wid in range(len(sentsplit)):
                    if sentsplit[wid] not in drug_names and sentsplit[wid] not in stopwords:
                        # 确保要打上mask的词语既不是已知的毒品名称，也不是停用词
                        _ = copy.deepcopy(sentsplit)
                        _[wid] = "[MASK]"
                        _ = " ".join(_)
                        masked_sents.append([_, sentsplit[wid]])

        pickle.dump(masked_sents, open(f"./corpus/tmp/worker_id_{worker_id}.pt", "wb"))

    # @staticmethod
    # def call_back(cb):
    #     print(cb)

    @staticmethod
    def error_back(eb):
        print(f'error: {str(eb)}')


def start_(func, workers, corp):
    working_thread = []
    with Pool(processes=workers) as pool:
        for _ in range(workers):
            w = pool.apply_async(
                func=func.get_corp,
                args=(corp, workers, _,),
                # callback=func.call_back,
                error_callback=func.error_back
            )
            working_thread.append(w)
        for _ in range(workers):
            working_thread[_].get()


def combine1(workers):
    out = []
    for worker_id in range(workers):
        phrase_corpus = pickle.load(open(f"./corpus/tmp/worker_id_{worker_id}.pt", "rb"))
        out += phrase_corpus
    return out


# 进程池实现

def getcorp(corp):
    workernum = 5
    func = Func_bert()
    start_(func, workernum, corp)
    out = combine1(workernum)
    return out


# ----------------------------------------------------------------------------------------------------------


class Func1():
    def inference(self, device, bindev, part_id, topn="USELESS"):
        tokenizer = T5Tokenizer.from_pretrained(conf.T5)
        model = T5ForConditionalGeneration.from_pretrained(conf.T5).to(device)
        model.load_state_dict(torch.load("./models/usefulModels/Epoch9_BinT5-1.pt", map_location=device))

        # 2. define dataloader
        devset = T5MLM(conf, tokenizer, bindev, type="inf")
        dev_loader = DataLoader(
            devset,
            batch_size=800,
            # num_workers=4, 因为子进程不可以再产生子进程，所以这里不能设置num_workers
            pin_memory=True,
        )

        inference_idx = []
        model.eval()
        with torch.no_grad():
            for idx, d in enumerate(dev_loader):
                for k in d[0].keys():
                    d[0][k] = d[0][k].squeeze(1).to(device)
                    # print(f"d[0][{k}].shape {d[0][k].shape}")
                gen = model.generate(**d[0])
                # print(f"_.shape {gen.shape}")
                for eachid in range(gen.shape[0]):
                    # print(f"eachid {eachid}")
                    _ = tokenizer.decode(gen[eachid], skip_special_tokens=True)
                    if "1" in _:
                        inference_idx.append(1)
                    else:
                        inference_idx.append(0)
        assert len(inference_idx) == len(bindev)

        inference_res = []
        for i in range(len(inference_idx)):
            if inference_idx[i] == 1:
                inference_res.append(bindev[i])
        pickle.dump(inference_res, open(f"./corpus/tmp/inference_res_part{part_id}.pt", "wb"))
        print(f"inference part {part_id} finished.")

    @staticmethod
    def error_back(eb):
        print(f'error: {str(eb)}')


class Func1bert():
    def inference(self, device, bindev, part_id, topn="USELESS"):
        tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
        # model = Bin(conf).to(device)
        model = OursBin(conf).to(device)
        model.load_state_dict(
            torch.load(open(f"./models/step1train/Epoch4_step1train_mlm2.pkl", "rb"), map_location=device))

        # 2. define dataloader  bindev【有mask的句子,被mask的词】
        devset = BERTBin(conf, tokenizer, bindev, type="inf")
        dev_loader = DataLoader(
            devset,
            batch_size=600,
            # num_workers=4, 因为子进程不可以再产生子进程，所以这里不能设置num_workers
            pin_memory=True,
        )

        preds = torch.Tensor().to(device)
        model.eval()
        with torch.no_grad():
            for d in dev_loader:
                for k in d[0].keys():
                    d[0][k] = d[0][k].squeeze(1).to(device)
                # d[1] = d[1].squeeze(-1).to(device)
                cls = model(d, type="inf")
                _, predicted = torch.max(cls.logits, dim=1)
                preds = torch.cat([preds, predicted], dim=0)

        assert len(preds) == len(devset.corp)
        inference_res = []
        for idx in range(len(devset.corp)):
            if preds[idx] == 1:
                inference_res.append(devset.corp[idx])
        pickle.dump(inference_res, open(f"./corpus/tmp/inference_res_part{part_id}.pkl", "wb"))
        print(f"inference_res part {part_id} finished.")

    @staticmethod
    def error_back(eb):
        print(f'error: {str(eb)}')


# -------------------------------------------------------------------------------------------------------


class Func2():
    """
    跳过第一步，直接使用合成语料进行第二步进行推理
    topn表示选择mask预测结果的前topn个
    """

    def inference(self, device, corpfile, part_id, topn, modelname):
        batchsize = 400
        tokenizer = T5Tokenizer.from_pretrained(conf.T5)
        drugjson = json.load(open("./drugeuphs.json", "r", encoding="utf-8"))
        drug_id = {}
        for k in drugjson.keys():
            input_ids = tokenizer(k).input_ids
            # print(f"{k}: {input_ids}")

            # ----涉及到T5解码第一个字符为空字符的问题---------
            # drug_id[k] = input_ids[0]
            if input_ids[0] != 3:
                drug_id[k] = input_ids[0]
            else:
                drug_id[k] = input_ids[1]
        model = T5ForConditionalGeneration.from_pretrained(conf.T5).to(device)
        model.load_state_dict(torch.load(f"./models/step2train/{modelname}.pt", map_location=device))

        # 2. define dataloader
        devset = T5MLM(conf, tokenizer, corpfile, type="inf")
        dev_loader = DataLoader(
            devset,
            batch_size=batchsize,
            shuffle=False
        )

        model.eval()
        output = []
        with torch.no_grad():
            for idx, _ in enumerate(dev_loader):
                for k in _[0].keys():
                    _[0][k] = _[0][k].squeeze(1).to(device)
                outputs = model.generate(**_[0], return_dict_in_generate=True, output_scores=True)
                first_char_pos = 2 if outputs.sequences[0][2].item() != 3 else 3
                target_score = outputs.scores[first_char_pos]
                _, target_score_rank = torch.sort(target_score, descending=True)
                for rank in range(target_score_rank.shape[0]):
                    for d in drug_id.keys():
                        if drug_id[d] in target_score_rank[rank, 0:topn]:
                            output.append(corpfile[idx * batchsize + rank])
                            break

        pickle.dump(output, open(f"./corpus/step2inf/{modelname}res_part{part_id}.pt", "wb"))
        print(f"inference part {part_id} finished.")

    @staticmethod
    def error_back(eb):
        print(f'error: {str(eb)}')


# ------------------------------------------------------------------------------------------------------------
# 这里的workers应该只能是3
def start(func, workers, corp_part_li, part, topn):
    working_thread = []
    with Pool(processes=workers) as pool:
        for _ in range(workers):
            w = pool.apply_async(
                func=func.inference,
                args=(f"cuda:{_}", corp_part_li[_], part - 2 + _, topn),
                # callback=func.call_back,
                error_callback=func.error_back
            )
            working_thread.append(w)
        for _ in range(workers):
            working_thread[_].get()


def start_inference(workernum, corp_part_li, part, inference="step_1", topn=None):
    if inference == "step1":
        func = Func1bert()
    elif inference == "step2":
        func = Func2()
    start(func, workernum, corp_part_li, part, topn)
