# -*- coding: utf-8 -*-
"""
pretrain 的时候negative sample 的方法可以改, 比如替换部分药物，或者按照test方法
试验replace_loss 和 positive_rate

"""

import argparse

parser = argparse.ArgumentParser()
parser.add_argument("--nsp_train_epoch", type=int, default=5)
parser.add_argument("--mlm_train_epoch", type=int, default=10)  # 可以修改
parser.add_argument("--batch_size", type=int, default=24)
parser.add_argument("--negative_rate", type=int, default=2)  # 可以修改
parser.add_argument("--positive_rate", type=int, default=1, help="正采样率，与药物顺序随机打乱有关")  #
parser.add_argument("--save_pretrained_model_path", type=str, default="./herb_pretrained")
parser.add_argument("--mlm_save_pretrained_model_path", type=str, default="./mlm_herb_pretrained")
parser.add_argument("--run_type", type=str, default="infer", choices=["train", "evaluate", "both", "infer"])
parser.add_argument("--input_type", type=str, default="both", choices=["text", "id", "both"])
parser.add_argument("--token_type_num", type=int, default=2)
parser.add_argument("--negative_sample_type", type=str, default="both",
                    choices=["random", "replace", "similar", "combine"])
parser.add_argument("--replace_loss", action="store_true")
parser.add_argument("--replace_all", action="store_true", help="替换所有药物时给出replace_label")
parser.add_argument("--share_cls", action="store_true")
parser.add_argument("--same_position", action="store_true")
# parser.add_argument("--log_path", type=str)

args = parser.parse_args()
device = 0

import json
import random
import copy
import pickle
import os
import numpy as np
from tqdm import tqdm
import torch
from torch import nn
from torch.optim import AdamW
import torch.nn.functional as F
from torch.utils.data import DataLoader
from transformers import AutoTokenizer, AutoModel
from transformers import BertTokenizerFast, BertTokenizer
from transformers import RobertaTokenizer, RobertaForMaskedLM
from transformers import BertForPreTraining, TrainingArguments
from transformers import get_linear_schedule_with_warmup
import datasets
from datasets import load_dataset

# 自己的依赖
from roberta import RobertaForPreTraining
import utils

np.random.seed(1234)
random.seed(1234)
torch.manual_seed(1234)
torch.cuda.manual_seed(1234)


def set_up_logging():
    # log为记录文件
    # config.log是记录的文件夹, 最后一定是/
    # opt.log是此次运行时记录的文件夹的名字
    if not os.path.exists("./logging/"):
        os.mkdir("./logging/")
    log_path = os.path.join("./logging/", args.save_pretrained_model_path)
    if not os.path.exists(log_path):
        os.mkdir(log_path)
    logging = utils.logging(os.path.join(log_path, 'log.txt'))  # 往这个文件里写记录
    logging_csv = utils.logging_csv(os.path.join(log_path, 'record.csv'))  # 往这个文件里写记录
    for k, v in vars(args).items():
        logging("%s:\t%s\n" % (str(k), str(v)))
    logging("\n")
    return logging, logging_csv, log_path


logging, logging_csv, log_path = set_up_logging()


class Dataset(torch.utils.data.Dataset):
    def __init__(self, tokenizer, herb_tokenizer, examples, dataset_type, compare_examples=None,
                 for_nsp=False, negative_rate=2, positive_rate=1, input_type="both"):
        self.tokenizer = tokenizer
        self.herb_tokenizer = herb_tokenizer
        # examples就是text（症状描述）和med（方剂的药物组成）两部分
        if dataset_type == "dev":
            # 随机选取开发集
            sample_ids = random.sample(range(len(self.text)), 1000)
            self.text = [self.text[i] for i in sample_ids]
            self.meds = [self.meds[i] for i in sample_ids]
        else:
            self.text = examples["text"]
            self.meds = examples["med"]
        self.sample_id = []
        self.input_ids = []
        self.token_type_ids = []
        self.attention_mask = []
        self.position_ids = []
        if dataset_type == "infer":
            self.build_inference_samples(input_type, for_nsp=True)
            return
        # 以下为训练独有
        self.cls_labels = []  # 症状、方剂匹配标签
        self.f1_score = []
        self.replace_labels = []  # 哪几个药物是构造的负样例
        self.is_eval = dataset_type == "eval"
        self.for_nsp = for_nsp
        if self.is_eval:
            assert positive_rate == 1
        self.build_positive_samples(positive_rate, input_type, for_nsp, self.is_eval)
        self.build_negative_samples(compare_examples, negative_rate, self.is_eval, input_type)

    def build_inference_samples(self, input_type="both", for_nsp=True):
        for i in range(len(self.text)):
            # 因为要构造负样例，所以需要copy，而不能改原始的
            text, meds = self.text[i], self.meds[i]
            tok_result = self.tokenize(self.tokenizer, self.herb_tokenizer, text, meds, input_type,
                                       same_med_position_id=args.same_position, for_nsp=self.for_nsp)
            self.append_tokenize_result(tok_result)

    def build_positive_samples(self, positive_rate, input_type, for_nsp, is_eval):
        for i in range(len(self.text)):
            # 因为要构造负样例，所以需要copy，而不能改原始的
            text, meds = self.text[i], copy.deepcopy(self.meds[i])
            if len(text) == 0 or len(meds) == 0:
                continue
            # 以下构造正例
            for _ in range(positive_rate):
                if positive_rate > 1:
                    # 如果多次正采样，那么对方剂内药物顺序进行随机打乱
                    random.shuffle(meds)
                tok_result = self.tokenize(self.tokenizer, self.herb_tokenizer, text, meds, input_type,
                                           same_med_position_id=args.same_position, for_nsp=self.for_nsp)
                self.append_tokenize_result(tok_result, cls_label=0)
                # the nsp label for BertFroPreTraining is 0 for sentence pair, 1 for random pair
                self.sample_id.append(i)
                if for_nsp and not is_eval:
                    # 初始化replace_labels
                    if args.replace_all:
                        # 对方剂级别的负采样也标注变化
                        self.replace_labels.append([0 for _ in tok_result['input_ids']])
                    else:
                        self.replace_labels.append([-100 for _ in tok_result['input_ids']])
                    assert len(self.replace_labels[-1]) == len(self.attention_mask[-1])
                self.f1_score.append((1., 1., 1.))

    def build_negative_samples(self, compare_examples, negative_rate, is_eval, input_type):
        # 以下构造负例
        if is_eval:
            total_dup_num = 0
            for i in range(len(self.text)):
                if len(self.text[i]) == 0 or len(self.meds[i]) == 0:
                    continue
                if compare_examples is not None:
                    # 对于eval集来说，不和自己对比，需要额外的对比数据
                    negative_samples, duplicate_num = self.get_similar_negative_samples(self.text[i], self.meds[i],
                                                                                        compare_examples["text"],
                                                                                        compare_examples["med"],
                                                                                        negative_rate)
                    total_dup_num += duplicate_num
                    for _, _, fake_med in negative_samples:
                        tok_result = self.tokenize(self.tokenizer, self.herb_tokenizer, self.text[i], fake_med,
                                                   input_type, for_nsp=self.for_nsp,
                                                   same_med_position_id=args.same_position)
                        self.append_tokenize_result(tok_result, cls_label=1)
                        self.sample_id.append(i)
                        self.f1_score.append(self.F1_score(fake_med, self.meds[i]))

                else:
                    for fake_med_id in range(len(self.meds)):
                        if fake_med_id == i:
                            continue
                        text, med = self.text[i], self.meds[fake_med_id]
                        tok_result = self.tokenize(self.tokenizer, self.herb_tokenizer, text, med, input_type,
                                                   same_med_position_id=args.same_position, for_nsp=self.for_nsp)
                        self.append_tokenize_result(tok_result, cls_label=1)
                        self.sample_id.append(i)
                        self.f1_score.append(self.F1_score(self.meds[fake_med_id], self.meds[i]))
            logging("duplicate number %d" % total_dup_num)
        else:
            if self.for_nsp:  # make some negative samples for cls
                omitted_empty_num = 0  # 有些症状或者方剂是缺失的
                for i in range(len(self.text)):
                    if len(self.meds[i]) == 0 or len(self.text[i]) == 0:
                        omitted_empty_num += 1
                        continue
                    flag = 0
                    if args.negative_sample_type == "combine":
                        # 两种策略各半
                        if random.random() < 0.5:
                            flag = 1
                        else:
                            flag = 2
                    if args.negative_sample_type == "random" or len(self.meds[i]) <= 2 or len(
                            self.text[i]) <= 3 or flag == 1:
                        # 随机整体替换,区分替换药物还是替换症状是因为有些项的药物或者症状是缺少的
                        for _ in range(negative_rate):
                            self.negative_sample_prescription(i, input_type)
                    elif args.negative_sample_type == "replace" or flag == 2:
                        # 替换部分药物
                        for _ in range(negative_rate):
                            text, med = self.text[i], self.meds[i]
                            med, to_replace_id = self.replace_random_med(copy.deepcopy(med))
                            tok_result = self.tokenize(self.tokenizer, self.herb_tokenizer, text, med, input_type,
                                                       rep_med_id=to_replace_id, for_nsp=self.for_nsp,
                                                       same_med_position_id=args.same_position)
                            self.append_tokenize_result(tok_result, cls_label=1, append_replace_labels=True)
                assert len(self.cls_labels) == len(self.replace_labels)
                logging("omitted empty sample number %d\n" % omitted_empty_num)
        if not self.for_nsp:
            assert len(self.replace_labels) == 0

    def negative_sample_prescription(self, real_index, input_type):
        if random.random() < 0.5:
            # 随机选择（一半概率）替换药物
            fake_med_id = random.choice(range(len(self.text)))
            while fake_med_id == real_index or len(self.meds[fake_med_id]) == 0:
                fake_med_id = random.choice(range(len(self.text)))
            text, med = self.text[real_index], self.meds[fake_med_id]
            real_med = self.meds[real_index]
        else:
            # 随机选择（一半概率）替换症状
            fake_text_id = random.choice(range(len(self.text)))
            while fake_text_id == real_index or len(self.text[fake_text_id]) == 0:
                fake_text_id = random.choice(range(len(self.text)))
            text, med = self.text[fake_text_id], self.meds[real_index]
            real_med = self.meds[fake_text_id]

        if args.replace_all:
            # 对方剂级别的负采样也标注变化
            to_replace_id = self.find_difference(real_med, med)
            tok_result = self.tokenize(self.tokenizer, self.herb_tokenizer, text, med,
                                       input_type, rep_med_id=to_replace_id, for_nsp=self.for_nsp,
                                       same_med_position_id=args.same_position)
        else:
            tok_result = self.tokenize(self.tokenizer, self.herb_tokenizer, text, med,
                                       input_type, for_nsp=self.for_nsp,
                                       same_med_position_id=args.same_position)
        self.append_tokenize_result(tok_result, cls_label=1, append_replace_labels=True)
        assert len(self.replace_labels[-1]) == len(self.attention_mask[-1])

    def append_tokenize_result(self, tok_result, cls_label=None, append_replace_labels=False):
        self.input_ids.append(tok_result['input_ids'])
        self.token_type_ids.append(tok_result['token_type_ids'])
        self.attention_mask.append(tok_result['attention_mask'])
        self.position_ids.append(tok_result["position_ids"])
        if append_replace_labels:
            if tok_result['replace_labels'] is not None:
                assert len(tok_result['replace_labels']) == len(tok_result['attention_mask'])
                self.replace_labels.append(tok_result['replace_labels'])
            else:
                self.replace_labels.append([-100 for _ in tok_result['input_ids']])
                assert len(self.replace_labels[-1]) == len(self.attention_mask[-1])
        if cls_label is not None:
            self.cls_labels.append(cls_label)

    def replace_random_med(self, med):
        if len(med) <= 4:
            rep_med = random.choice(range(len(self.tokenizer), len(self.herb_tokenizer)))
            to_replace_id = random.choice(range(len(med)))
            med[to_replace_id] = self.herb_tokenizer.convert_ids_to_tokens(rep_med)
            to_replace_id = [to_replace_id]
        else:
            replace_num = random.choice(range(2, round(len(med) / 2) + 1))
            to_replace_id = random.sample(range(len(med)), replace_num)
            rep_meds = random.sample(range(len(self.tokenizer), len(self.herb_tokenizer)), replace_num)
            for i, m in zip(to_replace_id, rep_meds):
                med[i] = self.herb_tokenizer.convert_ids_to_tokens(m)
        return med, to_replace_id

    @staticmethod
    def find_difference(med, fake):
        to_replace_id = []
        med_set = set(med)
        for i, m in enumerate(fake):
            if m not in med_set:
                to_replace_id.append(i)
        return to_replace_id

    @staticmethod
    def get_similar_negative_samples(text, med, compare_text, compare_meds, sample_num=20):
        candidates = []
        duplicate_num = 0
        for i in range(len(compare_text)):
            if len(compare_meds[i]) == 0:
                continue
            js = Dataset.jaccard(text, compare_text[i])
            if js == 1:
                duplicate_num += 1
            if js == 0 or js == 1 or med == compare_meds[i]:
                continue
            candidates.append((js, compare_text[i], compare_meds[i]))
            candidates.sort(key=lambda k: k[0], reverse=True)
            candidates = candidates[:sample_num]
        return candidates, duplicate_num

    @staticmethod
    def jaccard(t1, t2):
        list1, list2 = set(list(t1)), set(list(t2))
        intersection = list1.intersection(list2)
        union = list1.union(list2)
        return len(intersection) / float(len(union))

    @staticmethod
    def F1_score(pred, label):
        pred_set = set(pred)
        label_set = set(label)
        right_num = len(pred_set.intersection(label_set))
        if right_num == 0:
            return 0., 0., 0.
        precision = right_num / float(len(pred))
        recall = right_num / float(len(label))
        F1 = 2 * precision * recall / (precision + recall)
        return precision, recall, F1

    @staticmethod
    def tokenize(tokenizer, herb_tokenizer, text, meds, input_type, rep_med_id=None, for_nsp=False,
                 same_med_position_id=False):
        # 按照文字或者id编码
        assert len(text) > 0 and len(meds) > 0
        if input_type == "text":
            # CLS, text, SEP, med, SEP
            text_ids = tokenizer(text[:150], ",".join(meds), truncation=True, max_length=200, add_special_tokens=True,
                                 return_token_type_ids=True, return_attention_mask=True)
            input_ids = text_ids["input_ids"]
            attention_mask = text_ids["attention_mask"]
            token_type_ids = text_ids["token_type_ids"]
            assert len(input_ids) == len(attention_mask) and len(input_ids) == len(token_type_ids)
            position_ids = list(range(2, len(input_ids) + 2))
            return {"input_ids": input_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids,
                    "replace_labels": None, "position_ids": position_ids}
        elif input_type == "id":
            # CLS,text,SEP
            # 对症状和药物分别编码
            text_ids = tokenizer(text[:150], truncation=True, max_length=200, add_special_tokens=True,
                                 return_token_type_ids=True, return_attention_mask=True)
            real_herb_ids = herb_tokenizer(meds, is_split_into_words=True, truncation=True, max_length=50,
                                           return_attention_mask=True)
            replace_labels = Dataset.process_replace_labels(rep_med_id, real_herb_ids, text_ids, for_nsp,
                                                            adjust_sep=False)
            # [CLS, text, SEP] + [med] -> CLS, text, SEP, med, SEP
            input_ids = text_ids["input_ids"] + real_herb_ids["input_ids"] + [tokenizer.sep_token_id]
            attention_mask = text_ids["attention_mask"] + real_herb_ids["attention_mask"] + [1]
            token_type_ids = text_ids["token_type_ids"] + [1] * (len(real_herb_ids["input_ids"]) + 1)
            position_ids = Dataset.process_position_ids(real_herb_ids, text_ids, input_ids, same_med_position_id,
                                                        adjust_sep=False)
            assert len(position_ids) == len(input_ids)
            assert replace_labels is None or len(replace_labels) == len(input_ids)
            return {"input_ids": input_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids,
                    "replace_labels": replace_labels, "position_ids": position_ids}

        elif input_type == "both":
            # CLS, text, SEP, med_text, SEP + med_id -> CLS, text, SEP, med_text, med_id, SEP
            text_ids = tokenizer(text[:150], ",".join(meds), truncation=True, max_length=200, add_special_tokens=True,
                                 return_token_type_ids=True, return_attention_mask=True)
            real_herb_ids = herb_tokenizer(meds, is_split_into_words=True, truncation=True, max_length=50,
                                           return_attention_mask=True)
            replace_labels = Dataset.process_replace_labels(rep_med_id, real_herb_ids, text_ids, for_nsp,
                                                            adjust_sep=True)
            input_ids = text_ids["input_ids"][:-1] + real_herb_ids["input_ids"] + [tokenizer.sep_token_id]
            attention_mask = text_ids["attention_mask"][:-1] + real_herb_ids["attention_mask"] + [1]
            assert args.token_type_num == 2 or args.token_type_num == 3, args.token_type_num
            if args.token_type_num == 2:
                token_type_ids = text_ids["token_type_ids"] + [1] * len(real_herb_ids["input_ids"])
            else:  # args.token_type_num == 3
                token_type_ids = text_ids["token_type_ids"] + [2] * len(real_herb_ids["input_ids"])
            position_ids = Dataset.process_position_ids(real_herb_ids, text_ids, input_ids, same_med_position_id,
                                                        adjust_sep=True)
            assert len(position_ids) == len(input_ids)
            assert replace_labels is None or len(replace_labels) == len(input_ids)
            return {"input_ids": input_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids,
                    "replace_labels": replace_labels, "position_ids": position_ids}

    @staticmethod
    def process_position_ids(real_herb_ids, text_ids, input_ids, same_med_position_id, adjust_sep):
        if same_med_position_id:
            # 药物不区分先后顺序
            if adjust_sep:
                position_ids = list(range(2, len(text_ids["input_ids"]) + 1)) + [513] * (
                        len(real_herb_ids["input_ids"]) + 1)
            else:
                position_ids = list(range(2, len(text_ids["input_ids"]) + 2)) + [513] * (
                        len(real_herb_ids["input_ids"]) + 1)
        else:
            position_ids = list(range(2, len(input_ids) + 2))
        return position_ids

    @staticmethod
    def process_replace_labels(rep_med_id, real_herb_ids, text_ids, for_nsp, adjust_sep):
        if rep_med_id is not None and for_nsp:
            replace_labels = [0 for _ in real_herb_ids["input_ids"]]
            for med_id in rep_med_id:
                if med_id >= len(real_herb_ids["input_ids"]):
                    # truncation wise
                    continue
                replace_labels[med_id] = 1
            if adjust_sep:
                replace_labels = [-100 for _ in text_ids["input_ids"][:-1]] + replace_labels + [-100]
            else:
                replace_labels = [-100 for _ in text_ids["input_ids"]] + replace_labels + [-100]
        else:
            replace_labels = None
        return replace_labels

    def __getitem__(self, idx):
        if self.dataset_type == "eval":
            return {"input_ids": self.input_ids[idx], "attention_mask": self.attention_mask[idx],
                    "token_type_ids": self.token_type_ids[idx], "position_ids": self.position_ids[idx],
                    "cls_labels": self.cls_labels[idx], "sample_id": self.sample_id[idx],
                    "f1_score": self.f1_score[idx]}
        elif self.dataset_type == "infer":
            return {"input_ids": self.input_ids[idx], "attention_mask": self.attention_mask[idx],
                    "token_type_ids": self.token_type_ids[idx], "position_ids": self.position_ids[idx]}
        else:
            if len(self.replace_labels) > 0:
                assert self.for_nsp
                assert len(self.replace_labels[idx]) == len(self.attention_mask[idx]), (
                    len(self.replace_labels[idx]), len(self.attention_mask[idx]))
                return {"input_ids": self.input_ids[idx], "attention_mask": self.attention_mask[idx],
                        "token_type_ids": self.token_type_ids[idx], "position_ids": self.position_ids[idx],
                        "cls_labels": self.cls_labels[idx], "replace_labels": self.replace_labels[idx]}
            else:
                # mlm会用到
                return {"input_ids": self.input_ids[idx], "attention_mask": self.attention_mask[idx],
                        "token_type_ids": self.token_type_ids[idx], "position_ids": self.position_ids[idx],
                        "cls_labels": self.cls_labels[idx]}

    def __len__(self):
        return len(self.input_ids)


class MyCollator:
    def __init__(self, tokenizer, for_infer=False, for_nsp=False, mlm_probability=0.15, max_length=None,
                 pad_to_multiple_of=None, padding=True):
        self.tokenizer = tokenizer
        self.for_nsp = for_nsp
        self.for_infer = for_infer
        self.mlm_probability = mlm_probability
        self.max_length = max_length
        self.pad_to_multiple_of = pad_to_multiple_of
        self.padding = padding
        self.label_pad_token_id = -100

    def __call__(self, features):
        position_ids = [feature["position_ids"] for feature in features]
        if "sample_id" in features[0]:
            sample_ids = [feature["sample_id"] for feature in features]
            f1_scores = [feature["f1_score"] for feature in features]
        else:
            sample_ids = None
            f1_scores = None
        if self.for_nsp and not self.for_infer:
            cls_labels = [feature["cls_labels"] for feature in features]
            if "replace_labels" in features[0]:
                replace_labels = [feature["replace_labels"] for feature in features]
            else:
                replace_labels = None
        features = [{k: feature[k] for k in feature if
                     k != "cls_labels" and k != "sample_id" and k != "f1_score" and k != "replace_labels" and k != "position_ids"}
                    for feature in features]
        # 对于可以直接pad 0的features，直接用tokenizer.pad
        batch = self.tokenizer.pad(
            features,
            padding=self.padding,
            # padding='max_length',
            max_length=self.max_length,
            pad_to_multiple_of=self.pad_to_multiple_of,
            # Conversion to tensors will fail if we have labels as they are not of the same length yet.
            return_tensors="pt",
        )
        max_len = batch['input_ids'].shape[1]
        batch['position_ids'] = [list(pos)[:max_len] + [0] * (max_len - len(pos)) for pos in position_ids]
        # position_ids不在features里
        if not self.for_infer:
            if not self.for_nsp:  # mlm train
                flag = 0
                if args.mlm_type == "combine":
                    # 基于id和基于text随机轮换
                    if random.random() < 0.5:
                        flag = 1
                    else:
                        flag = 2
                if args.mlm_type == "sep" or flag == 1:
                    assert args.token_type_num == 2
                    batch["input_ids"], batch["labels"] = self.mask_tokens(batch["input_ids"], batch["token_type_ids"])
                elif args.mlm_type == "naive" or flag == 2:
                    batch["input_ids"], batch["labels"] = self.mask_tokens(batch["input_ids"])
                else:
                    raise Exception("Wrong mlm type")
            else:
                batch["labels"] = cls_labels
                assert self.tokenizer.padding_side == "right"
                if args.replace_loss and replace_labels is not None:
                    batch['replace_labels'] = [
                        list(label)[:max_len] + [self.label_pad_token_id] * (max_len - len(label))
                        for label in replace_labels]
        batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}
        if 'replace_labels' not in batch:
            batch['replace_labels'] = None
        if sample_ids is not None:
            batch["sample_ids"] = sample_ids
            batch["f1_score"] = f1_scores

        return batch

    def mask_tokens(self, inputs, token_type_ids=None):
        labels = inputs.clone()
        probability_matrix = torch.full(labels.shape, self.mlm_probability)
        special_tokens_mask = [self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in
                               labels.tolist()]
        special_tokens_mask = torch.tensor(special_tokens_mask, dtype=torch.bool)

        probability_matrix.masked_fill_(special_tokens_mask, value=0.)
        masked_indices = torch.bernoulli(probability_matrix).bool()
        if token_type_ids is not None:
            if random.random() < 0.5:
                masked_indices = masked_indices & token_type_ids.bool()
            else:
                masked_indices = masked_indices & ~token_type_ids.bool()
        labels[~masked_indices] = self.label_pad_token_id
        inputs[masked_indices] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
        return inputs, labels


def my_metrics(gold, pred, f1_scores):
    from irmetrics.topk import ndcg, rr
    from sklearn.metrics import average_precision_score as ap
    from sklearn.metrics import label_ranking_average_precision_score as mrr

    def score_to_rank(scores):
        # 这里给的其实不是rank而是order，就是irmetrics里需要的哪个应该放在前面
        scores = [(i, scores[i]) for i in range(len(scores))]
        scores.sort(key=lambda k: k[1], reverse=True)
        ranks = [s[0] for s in scores]
        return ranks

    rr_scores = []
    # sk_rr_scores = []
    ap_scores = []
    precision = []
    recall = []
    f1_score = []
    p_at_5 = []
    r_at_5 = []
    f_at_5 = []
    gold_scores = []
    pred_scores = []
    for k in gold:
        assert sum(gold[k]) == 1, sum(gold[k])
        gold_scores.append(gold[k])
        max_pred_score = max(pred[k])
        pred_score = []
        for s in pred[k]:
            if s == max_pred_score:
                pred_score.append(1)
            else:
                pred_score.append(0)
        pred_scores.append(pred_score)
    my_mrr = utils.MRR(gold_scores, pred_scores)
    my_map = utils.MAP(gold_scores, pred_scores)
    gold_scores = np.array(gold_scores)
    pred_scores = np.array(pred_scores)
    ap_score = ap(gold_scores, pred_scores, average='samples')
    rr_score = mrr(gold_scores, pred_scores)
    for k in gold:  # k is the sample id, the result is organized with sample ids
        gold_scores = gold[k]
        pred_scores = pred[k]
        pred_rank = score_to_rank(pred_scores)
        p, r, f = f1_scores[k][pred_rank[0]]
        precision.append(p)
        recall.append(r)
        f1_score.append(f)
        p_at_5.append(max([f1_scores[k][t][0] for t in pred_rank[:5]]))
        r_at_5.append(max([f1_scores[k][t][1] for t in pred_rank[:5]]))
        f_at_5.append(max([f1_scores[k][t][2] for t in pred_rank[:5]]))
        gold_id = np.argmax(gold_scores)
        rr_scores.append(rr(gold_id, pred_rank))
        # print(gold_scores, pred_scores)
        # sk_rr_scores.append(mrr(gold_scores, pred_scores))
        # ap_scores.append(ap(gold_scores, pred_scores))
    return {"my_mrr": my_mrr, "my_map": my_map, "ir_MRR": np.average(rr_scores), "sk_MRR": rr_score, "sk_MAP": ap_score,
            "macro_precision": np.average(precision), "macro_recall": np.average(recall),
            "macro_f1": np.average(f1_score), "macro_p@5": np.average(p_at_5), "macro_r@5": np.average(r_at_5),
            "macro_f@5": np.average(f_at_5)}


def inference(model, infer_loader, infer_examples):
    model.eval()
    predictions = {}
    for batch in infer_loader:
        input_ids = batch['input_ids'].to(device)
        attention_mask = batch['attention_mask'].to(device)
        token_type_ids = batch['token_type_ids'].to(device)
        position_ids = batch['position_ids'].to(device)
        sample_ids = batch['sample_ids']
        _, _, pooled_output = model(input_ids, attention_mask=attention_mask, position_ids=position_ids,
                                    token_type_ids=token_type_ids, return_dict=True)

        pooled_output = list(pooled_output.detach().cpu().numpy())
        for i in range(len(sample_ids)):
            sid = sample_ids[i]
            if sid not in predictions:
                predictions[sid] = (infer_examples[sid], pooled_output[i])
    pickle.dump(predictions, open(args.infer_path, "wb"))


def evaluate(model, eval_loader, herb_tokenizer=None):
    model.eval()
    predictions = {}
    labels = {}
    f1_scores = {}
    pred_input = {}
    pred_herbs = {}
    batch_num = 0
    for batch in eval_loader:
        input_ids = batch['input_ids'].to(device)
        attention_mask = batch['attention_mask'].to(device)
        token_type_ids = batch['token_type_ids'].to(device)
        # position_ids = batch['position_ids'].to(device)
        position_ids = None
        gold_labels = batch['labels']
        sample_ids = batch['sample_ids']
        f1_score = batch['f1_score']
        _, nsp_logits, _ = model(input_ids, attention_mask=attention_mask, position_ids=position_ids,
                                 token_type_ids=token_type_ids, return_dict=True)
        pred_labels = torch.softmax(nsp_logits, 1).detach().cpu().numpy().tolist()
        # labels.extend(gold_labels.numpy().tolist())
        for i in range(len(sample_ids)):
            sid = sample_ids[i]
            if sid not in predictions:
                predictions[sid] = []
                labels[sid] = []
                f1_scores[sid] = []
                pred_input[sid] = []
                pred_herbs[sid] = []
            assert 0 <= pred_labels[i][0] <= 1, pred_labels[i]
            predictions[sid].append(pred_labels[i][0])  # 0 is next sentence, 1 is random
            labels[sid].append(1. - gold_labels[
                i].item())  # 0 is next sentence, 1 is random, to calculate MRR and MAP we need to reverse it
            if herb_tokenizer is not None:
                pred_input[sid].append(herb_tokenizer.convert_ids_to_tokens(input_ids[i], skip_special_tokens=True))
                pred_herbs[sid].append(herb_tokenizer.convert_ids_to_tokens([m for m in input_ids[i] if m >= 27090],
                                                                            skip_special_tokens=True))
            f1_scores[sid].append(f1_score[i])
        batch_num += 1
    if herb_tokenizer is not None:
        writer = open(os.path.join(log_path, "text_output.jsonl"), "w")
        for k in pred_input:
            writer.write(json.dumps([{"input": "".join(pred_input[k][j]), "herbs": ",".join(pred_herbs[k][j]),
                                      "prediction_score": "%.4f" % predictions[k][j], "gold_score": labels[k][j]} for j
                                     in range(len(pred_input[k]))], ensure_ascii=False) + "\n")
        writer.close()
    result = my_metrics(labels, predictions, f1_scores)
    return result


def pre_train(model, optim, train_data_loader, train_type, epoch_num, scheduler=None, dev_data_loader=None,
              eval_data_loader=None):
    model.train()
    max_f1 = 0
    for epoch in range(epoch_num):
        logging("%d th epoch\n" % epoch)
        for batch in tqdm(train_data_loader):
            optim.zero_grad()
            input_ids = batch['input_ids'].to(device)
            attention_mask = batch['attention_mask'].to(device)
            labels = batch['labels'].to(device)
            token_type_ids = batch['token_type_ids'].to(device)
            # position_ids = batch['position_ids'].to(device)
            position_ids = None
            if "replace_labels" in batch and batch['replace_labels'] is not None and args.replace_loss:
                replace_labels = batch['replace_labels'].to(device)
            else:
                replace_labels = None
            # assert torch.max(input_ids) < 27090
            if train_type == "mlm":
                loss, _, _ = model(input_ids, attention_mask=attention_mask, mlm_labels=labels,
                                   position_ids=position_ids, token_type_ids=token_type_ids,
                                   output_hidden_states=False, return_dict=True)
                # outputs = model(input_ids, attention_mask=attention_mask, labels=labels, token_type_ids=token_type_ids, output_hidden_states=False, return_dict=True)
                # loss = outputs.loss
            elif train_type == "nsp":
                loss, _, _ = model(input_ids, attention_mask=attention_mask, sequence_label=labels,
                                   replace_label=replace_labels, position_ids=position_ids,
                                   token_type_ids=token_type_ids, output_hidden_states=False,
                                   return_dict=True)
            loss = torch.sum(loss)
            # when using data parallel, the loss will be of size [2]
            # print(len(outputs.hidden_states)) -> 13
            # print(outputs.hidden_states[-1].size()) -> (32, 200, 768)
            loss.backward()
            optim.step()
            if scheduler is not None:
                scheduler.step()
        if dev_data_loader is not None:
            result = evaluate(model, dev_data_loader)
            macro_f1 = result["macro_f1"]
            if macro_f1 > max_f1:
                max_f1 = macro_f1
                if torch.cuda.device_count() > 1:
                    model.module.save_pretrained(args.save_pretrained_model_path)
                else:
                    model.save_pretrained(args.save_pretrained_model_path)
            logging("evaluation at pretrain %s\n" % result)
        if eval_data_loader is not None:
            result = evaluate(model, eval_data_loader)
            logging("text book evaluation at %d th epoch pretrain %s\n" % (epoch, result))

    return max_f1


if __name__ == "__main__":
    device_ids = list(range(torch.cuda.device_count()))
    # 构建herb tokenizer
    tokenizer = BertTokenizerFast.from_pretrained("./guwenbert-base",
                                                  cache_dir="/data2/private/liwei/.cache/transformers")
    if os.path.exists("./herb_tokenizer_new"):
        herb_tokenizer = BertTokenizerFast.from_pretrained("./herb_tokenizer_new")
    else:
        herb_tokenizer = copy.deepcopy(tokenizer)
        # 这里加入药名
        herbs = []
        for line in open("data/herb_vocab.txt"):
            tem = line.strip().split(' ')
            if len(tem) != 2:
                continue
            herb, count = tem
            if int(count) >= 5:
                herbs.append(herb)
        assert len(herbs) > 0
        num_added_toks = herb_tokenizer.add_tokens(herbs)
        assert num_added_toks > 0
        herb_tokenizer.save_pretrained("./herb_tokenizer_new")

    print('loading dataset', flush=True)
    datasets = load_dataset("json", data_files={"train": ["./data/prescription_pairs_text_train_ex.json"],
                                                "eval": ["./data/prescription_pairs_text_eval.json"],
                                                "test": ["./data/prescription_pairs_text_test.json"],
                                                "infer": None})

    if args.run_type == "train" or args.run_type == "both":
        # 分成mlm训练和nsp训练两个部分
        if not os.path.exists(args.mlm_save_pretrained_model_path):
            # 对herb id进行mlm训练
            model = RobertaForPreTraining.from_pretrained("./mlm_plm", share_cls=args.share_cls)
            # model = RobertaForPreTraining.from_pretrained("./guwenbert-base")
            model.resize_token_embeddings(len(herb_tokenizer))
            model.resize_type_embeddings(new_num_types=args.token_type_num)
            model.tie_weights()
            optim = AdamW(model.parameters(), lr=5e-5)
            if torch.cuda.device_count() > 1:
                model = torch.nn.DataParallel(model, device_ids=device_ids)
            model.to(device)
            # mlm pre-training
            logging("preparing data for mlm\n")
            train_mlm_dataset = Dataset(tokenizer, herb_tokenizer, datasets["train"], dataset_type="train",
                                        for_nsp=False, input_type=args.input_type)
            data_collator_mlm = MyCollator(herb_tokenizer, max_length=200, for_nsp=False)
            train_mlm_loader = DataLoader(train_mlm_dataset, shuffle=True, batch_size=args.batch_size,
                                          collate_fn=data_collator_mlm)
            logging("start mlm training\n")
            pre_train(model, optim, train_mlm_loader, train_type="mlm", epoch_num=args.mlm_train_epoch)
            if torch.cuda.device_count() > 1:
                model.module.save_pretrained(args.mlm_save_pretrained_model_path)
            else:
                model.save_pretrained(args.mlm_save_pretrained_model_path)
        else:
            model = RobertaForPreTraining.from_pretrained(args.mlm_save_pretrained_model_path, share_cls=args.share_cls)
            optim = AdamW(model.parameters(), lr=5e-5)
            if torch.cuda.device_count() > 1:
                model = torch.nn.DataParallel(model, device_ids=device_ids)
            model.to(device)

        # nsp pre-training
        optim = AdamW(model.parameters(), lr=5e-5)
        logging("preparing data for nsp\n")
        train_nsp_dataset = Dataset(tokenizer, herb_tokenizer, datasets["train"], dataset_type="train",
                                    compare_examples=datasets["train"],
                                    for_nsp=True, negative_rate=args.negative_rate, positive_rate=args.positive_rate,
                                    input_type=args.input_type)
        dev_nsp_dataset = Dataset(tokenizer, herb_tokenizer, datasets["train"], dataset_type="dev",
                                  compare_examples=datasets["train"],
                                  for_nsp=True, negative_rate=20, input_type=args.input_type)
        data_collator_nsp = MyCollator(herb_tokenizer, max_length=200, for_nsp=True)
        train_nsp_loader = DataLoader(train_nsp_dataset, shuffle=True, batch_size=args.batch_size,
                                      collate_fn=data_collator_nsp)
        dev_nsp_loader = DataLoader(dev_nsp_dataset, shuffle=True, batch_size=args.batch_size,
                                    collate_fn=data_collator_nsp)
        logging("start nsp training\n")
        logging("preparing data for text book evaluation\n")
        eval_nsp_dataset = Dataset(tokenizer, herb_tokenizer, datasets["eval"], dataset_type="eval",
                                   compare_examples=datasets["train"], negative_rate=20, input_type=args.input_type)
        eval_nsp_loader = DataLoader(eval_nsp_dataset, shuffle=True, batch_size=args.batch_size,
                                     collate_fn=data_collator_nsp)
        # max_f1 = pre_train(model, optim, train_nsp_loader, train_type="nsp", epoch_num=args.nsp_train_epoch)
        num_training_steps = int(args.nsp_train_epoch * len(train_nsp_dataset) / args.batch_size)
        scheduler = get_linear_schedule_with_warmup(optimizer=optim, num_warmup_steps=int(0.1 * num_training_steps),
                                                    num_training_steps=num_training_steps)
        max_f1 = pre_train(model, optim, train_nsp_loader, train_type="nsp", epoch_num=args.nsp_train_epoch,
                           scheduler=scheduler, dev_data_loader=dev_nsp_loader, eval_data_loader=eval_nsp_loader)
        logging("max f1 on dev set %.4f\n" % max_f1)
        # the model is saved inside pre_train 
    if args.run_type == "evaluate" or args.run_type == "both":
        # 主要是evaluate函数
        model = RobertaForPreTraining.from_pretrained(args.save_pretrained_model_path, share_cls=args.share_cls)
        if torch.cuda.device_count() > 1:
            model = torch.nn.DataParallel(model, device_ids=device_ids)
        model.to(device)
        data_collator_nsp = MyCollator(herb_tokenizer, max_length=200, for_nsp=True)
        logging("preparing data for text book evaluation\n")
        eval_nsp_dataset = Dataset(tokenizer, herb_tokenizer, datasets["eval"], dataset_type="eval",
                                   compare_examples=datasets["train"], negative_rate=20, input_type=args.input_type)
        eval_nsp_loader = DataLoader(eval_nsp_dataset, shuffle=True, batch_size=args.batch_size,
                                     collate_fn=data_collator_nsp)
        logging("text book test result %s\n" % evaluate(model, eval_nsp_loader, herb_tokenizer))
        logging("preparing data for test\n")
        test_nsp_dataset = Dataset(tokenizer, herb_tokenizer, datasets["test"], dataset_type="eval",
                                   compare_examples=datasets["train"], negative_rate=20, input_type=args.input_type)
        test_nsp_loader = DataLoader(test_nsp_dataset, shuffle=True, batch_size=args.batch_size,
                                     collate_fn=data_collator_nsp)
        logging("crawl data test result %s\n" % evaluate(model, test_nsp_loader, herb_tokenizer))
    if args.run_type == "infer":
        model = RobertaForPreTraining.from_pretrained(args.save_pretrained_model_path, share_cls=args.share_cls)
        if torch.cuda.device_count() > 1:
            model = torch.nn.DataParallel(model, device_ids=device_ids)
        model.to(device)
        data_collator_nsp = MyCollator(herb_tokenizer, max_length=200, for_nsp=True, for_infer=True)
        infer_dataset = Dataset(tokenizer, herb_tokenizer, datasets["infer"], dataset_type="infer",
                                input_type=args.input_type)
        infer_loader = DataLoader(infer_dataset, shuffle=False, batch_size=args.batch_size,
                                  collate_fn=data_collator_nsp)
        inference(model, infer_loader, datasets["infer"])
