import torch
from tqdm import tqdm
from torch.utils.data import RandomSampler,DataLoader
import torch.nn as nn
from transformers import BertModel, BertTokenizer,BertForMaskedLM
import copy,re,pickle,random
import numpy as np
random.seed(2021)

tokenizer = BertTokenizer.from_pretrained('chinese_L-12_H-768_A-12')
# tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
vocab_size=len(tokenizer.vocab)

with open(r"data/med_map.pkl",'rb') as file:
    med_map=pickle.load(file)
with open("data/standard_herb_list.txt",'r',encoding='utf-8') as file:
    content = file.readlines()
    herbs = [i.strip() for i in content]  # 缺少了盐
print("len(herbs):", len(herbs))

class MyDataset():
    def __init__(self,path):
        self.lines=[]
        ff = open(path,'r',encoding='utf-8')
        for line in ff.readlines():
            self.lines.append((line.strip().split('\t\t')[0],line.strip().split('\t\t')[1]))
        ff.close()
    def __getitem__(self,idx):
        return self.lines[idx]
    def __len__(self):
        return len(self.lines)

# 药名部分id:[21128, 21662] 盐的id
def mask_token(input_ids):
    herb_list = list(range(21128, 21663)) # 标准药名
    herb_list_special = herb_list + [100,101,102, 0, 1]  #[UNK,CLS,SEP,PAD,unused]
    max_herb, max_cand = 15, 5  # max tokens of prediction
    labels = []  # 记录被mask的id计算loss
    masked_pos, masked_pos_1 = [], []  # 记录被mask的部分是哪些
    input_ids = list(input_ids.squeeze(0).numpy())
    input = copy.deepcopy(input_ids)
    # all_herb_id = [input_ids[i] for i in range(len(input_ids)) if input_ids[i] in herb_list]  # mask完整药名532个部分去预测
    # herb_ = list(set(all_herb_id))
    # n_herb = min(max_herb, max(1, int(len(herb_) * 0.3)))  # 标准药名数目的50% mask
    # random.shuffle(herb_)
    # mask_herb = herb_[:n_herb]
    # herb_maked_pos = [i for i in range(len(input_ids)) if input_ids[i] in mask_herb]  # 60%被mask的herb的id
    # for pos in herb_maked_pos:  # 如果是标准药名id
    #     masked_pos.append(pos)
    #     input_ids[pos] = 103  
    
    
    herb_maked_pos = [i for i in range(len(input_ids)) if input_ids[i] in herb_list]  # mask完整药名532个部分去预测
    n_herb = min(max_herb, max(1, int(len(herb_maked_pos) * 0.3)))  # 标准药名数目的50% mask
    random.shuffle(herb_maked_pos)
    for pos in herb_maked_pos:  # 如果是标准药名id
        if len(masked_pos) > n_herb:
            break
        masked_pos.append(pos)
        input_ids[pos] = 103  
        
    cand_maked_pos = [i for i in range(len(input_ids)) if input_ids[i] not in herb_list_special]  # 随机mask的部分不包括标准药名和特殊字符
    n_cand = min(max_cand, max(1, int(len(herb_list_special) * 0.15)))  # 除了标准药名和特殊字符之外的token部分的15% mask
    random.shuffle(cand_maked_pos)
    for pos in cand_maked_pos:  # 非标准药名部分按照15%被mask
        if len(masked_pos_1) > n_cand:
            break
        masked_pos_1.append(pos)
        input_ids[pos] = 103 
    masked_pos += masked_pos_1
    labels=[input[i] if i in masked_pos else -100 for i in range(len(input))]  # labels是只保留被mask的token的正确id，其余是-100,只计算被mask部分的token的loss
    return torch.LongTensor(input_ids).unsqueeze(0), torch.LongTensor(labels).unsqueeze(0)

def make_data(batch_data):
    input_ids, token_type_ids, attention_mask, labels = [], [], [], []
    for sent in batch_data:
        input_mask, token_type_id, tokens_1, tokens_2, tokens = [], [], [], [], []
        tokens_1.extend(tokenizer.tokenize(sent[0]))
        for item in re.split('[\[\]]', sent[1]):
            if item.strip('|') in herbs:  # 标准药名
                tokens_2.append(item.strip('|'))  # 将标准药名加入token
                tokens_2.extend([i for i in item.strip('|')])
                tokens_2.append(item.strip('|'))  # 将标准药名加入token
            # elif item.strip('|') in med_map and med_map[item.strip('|')] in herbs:  # 如果修正之后是标准药名
            #     tokens_2.append(random.choice(herbs))  # 将修正之后的标准药名加入token
            #     tokens_2.extend([i for i in item.strip('|')])
            #     tokens_2.append(random.choice(herbs))  # 将修正之后的标准药名加入token
            elif '|' not in item:  # 如果不属于标准药名范围,将原本的字加入token,并且加入特殊字符表示去预测该药名
                tokens_2.extend([i for i in item])
            else:  # 是药名但是不是标准的或者未知的
                tokens_2.append('[unused1]')
                tokens_2.extend([i for i in item.strip('|')])
                tokens_2.append('[unused1]')
        tokens = tokens_1 + tokens_2
        if len(tokens_1) + len(tokens_2) > 509:
            tokens_2 = tokens_2[0:(509-len(tokens_1)-3)]
        tokens = ["[CLS]"] + tokens_1 + ["[SEP]"] + tokens_2 + ["[SEP]"]
        input_id = tokenizer.convert_tokens_to_ids(tokens) 
        token_type_id.extend([0] * (len(tokens_1) + 2))  
        token_type_id.extend([1]*(len(tokens_2)+1))
        input_mask.extend([1]*len(tokens))
        while len(input_id) < 512:
            input_id.append(0)
            input_mask.append(0)
            token_type_id.append(0)
        assert len(input_id) == len(token_type_id)
        assert len(input_id) == len(input_mask)
        input_id = torch.LongTensor(input_id).unsqueeze(0)  #[1*512]
        token_type_id = torch.LongTensor(token_type_id).unsqueeze(0)
        input_mask = torch.LongTensor(input_mask).unsqueeze(0)
        input_id, label = mask_token(input_id)  # 准备mask的数据label & input_ids

        input_ids.append(input_id) 
        token_type_ids.append(token_type_id)
        attention_mask.append(input_mask)
        labels.append(label)
    input_ids = torch.cat(input_ids, dim=0)
    token_type_ids = torch.cat(token_type_ids, dim=0)
    attention_mask = torch.cat(attention_mask, dim=0)
    labels = torch.cat(labels, dim=0)  # torch.Size([12, 512])
    # print(input_ids.shape, token_type_ids.shape, attention_mask.shape, labels.shape)
    return input_ids, token_type_ids, attention_mask, labels

#################################预测部分数据构造###################################################################################################################

def mask_pred_token(input_ids):
    record = dict()
    herb_list = list(range(21128, 21663))  # 药名部分id:[21128, 21661]
    labels = []  # 记录被mask的id计算loss
    masked_pos = []  # 记录被mask的部分是哪些
    input_ids = list(input_ids.squeeze(0).numpy())
    input = copy.deepcopy(input_ids)
    herb_maked_pos = [i for i in range(len(input_ids)) if input_ids[i] in herb_list]  # mask完整药名部分去预测
    for pos in herb_maked_pos:  # 如果是标准药名id一律mask，让其去预测,这边只有med_map里的药名左右两边才包含有标准药名id
        masked_pos.append(pos)
        input_ids[pos] = 103  # make mask
    labels=[input[i] if i in masked_pos else -100 for i in range(len(input))]  # labels是只保留被mask的token的正确id，其余是-100
    for i in range(len(input)):
        if i in masked_pos:
            record[i] = input[i]
    return torch.LongTensor(input_ids).unsqueeze(0), torch.LongTensor(labels).unsqueeze(0), record


def make_pred_data(batch_data):  # 预测部分只有med_map里的药名左右两边有id被mask去预测,其余编码保持不变,按照字去mask
    input_ids, token_type_ids, attention_mask, labels, records = [], [], [], [], []
    for sent in batch_data:
        input_mask, token_type_id, tokens_1, tokens_2, tokens = [], [], [], [], []
        tokens_1.extend(tokenizer.tokenize(sent[0]))
        for item in re.split('[\[\]]', sent[1]):
            if item.strip('|') in med_map and med_map[item.strip('|')] in herbs:  # 非标准药名且在med_map里,左右id标上正确的药名,作为预测计算准确率
                tokens_2.append(med_map[item.strip('|')])  # 将修正之后的标准药名加入token
                tokens_2.extend([i for i in item.strip('|')])
                tokens_2.append(med_map[item.strip('|')])  # 将修正之后的标准药名加入token
            if item.strip('|') in herbs:  # 标准药名按照字token处理
                tokens_2.extend([i for i in item.strip('|')])
            elif '|' not in item:
                tokens_2.extend([i for i in item])
        tokens = tokens_1 + tokens_2
        if len(tokens_1) + len(tokens_2) > 509:
            tokens_2 = tokens_2[:(509-len(tokens_1)-3)]
        tokens = ["[CLS]"] + tokens_1 + ["[SEP]"] + tokens_2 + ["[SEP]"]
        input_id = tokenizer.convert_tokens_to_ids(tokens) 
        token_type_id.extend([0] * (len(tokens_1) + 2))  
        token_type_id.extend([1]*(len(tokens_2)+1))
        input_mask.extend([1]*len(tokens))
        while len(input_id) < 512:
            input_id.append(0)
            input_mask.append(0)
            token_type_id.append(0)
        assert len(input_id) == len(token_type_id)
        assert len(input_id) == len(input_mask)
        input_id = torch.LongTensor(input_id).unsqueeze(0)
        token_type_id = torch.LongTensor(token_type_id).unsqueeze(0)
        input_mask = torch.LongTensor(input_mask).unsqueeze(0)
        input_id, label, record = mask_pred_token(input_id)  # 准备mask的数据label & input_ids

        input_ids.append(input_id)
        token_type_ids.append(token_type_id)
        attention_mask.append(input_mask)
        labels.append(label)
        records.append(record)
    input_ids = torch.cat(input_ids, dim=0)
    token_type_ids = torch.cat(token_type_ids, dim=0)
    attention_mask = torch.cat(attention_mask, dim=0)
    labels = torch.cat(labels, dim=0)
    return input_ids, token_type_ids, attention_mask, labels, records



