import torch
import numpy as np
from transformers import BertTokenizer
import pickle, random, copy

random.seed(2021)
tokenizer = BertTokenizer.from_pretrained('chinese_wwm_pytorch')

class Mytest():
    def __init__(self, path):
        with open(path, 'rb') as out_file:
            self.train_data = pickle.load(out_file)
            print("len(sent_relp):", len(self.train_data))
            
    def __getitem__(self, idx):
        return self.train_data[idx]

    def __len__(self):
        return len(self.train_data)


def make_data(batch_data,max_seq_length=512):
    input_ids, token_type_ids, attention_mask, fenci, head_idx,org_tokens,triple,pos = [], [],[],[],[],[],[],[]
    for item in batch_data:
        encoded_dict = tokenizer(item[0], return_tensors="pt",padding='max_length', max_length=512, truncation=True)
        input_ids.append(encoded_dict['input_ids'])
        token_type_ids.append(encoded_dict['token_type_ids'])
        attention_mask.append(encoded_dict['attention_mask'])
        org_tokens.append(item[0])
        fenci.append(item[1])
        head_idx.append(item[2])
        triple.append(item[3])
        pos.append(item[4])
    input_ids = torch.cat(input_ids, dim=0)
    token_type_ids = torch.cat(token_type_ids, dim=0)
    attention_mask = torch.cat(attention_mask, dim=0)
    # print(input_ids.size(), token_type_ids.size(), attention_mask.size(), labels.size(),isnext.size())
    return input_ids, token_type_ids, attention_mask,org_tokens,fenci,head_idx,triple,pos
'''
make_data
batch[3] :'抓住这一环，就为提高产品质量打下了基础。'
batch[4] :[[0, 1], [2], [3], [4], [5], [6], [7], [8, 9]] 
batch[5] :[(8, 'sbj', 6), (8, 'obj', 14)]
batch[6] :[('抓住', 'obj', '环'), ('打下', 'obj', '基础')]
batch[7] :['抓住/v', '这/r', '一/m', '环/n', '，/w', '就/d', '为/p', '提高/v', '产品/n', '质量/n', '打下/v', '了/y', '基础/n', '。/w']
'''

def make_predict(batch_data,max_seq_length=512):
    input_ids, token_type_ids, attention_mask, fenci, head_idx,org_tokens,triple,pos = [], [],[],[],[],[],[],[]
    for item in batch_data:
        encoded_dict = tokenizer(item[0], return_tensors="pt",padding='max_length', max_length=512, truncation=True)
        input_ids.append(encoded_dict['input_ids'])
        token_type_ids.append(encoded_dict['token_type_ids'])
        attention_mask.append(encoded_dict['attention_mask'])
        org_tokens.append(item[0])
        fenci.append(item[1])
        head_idx.append(item[2])
        pos.append(item[3])
    input_ids = torch.cat(input_ids, dim=0)
    token_type_ids = torch.cat(token_type_ids, dim=0)
    attention_mask = torch.cat(attention_mask, dim=0)
    # print(input_ids.size(), token_type_ids.size(), attention_mask.size(), labels.size(),isnext.size())
    return input_ids, token_type_ids, attention_mask,org_tokens,fenci,head_idx,pos
'''
make_data
batch[3] :'抓住这一环，就为提高产品质量打下了基础。'
batch[4] :[[0, 1], [2], [3], [4], [5], [6], [7], [8, 9]] 
batch[5] :[8, 6]
batch[6] :['抓住/v', '这/r', '一/m', '环/n', '，/w', '就/d', '为/p', '提高/v', '产品/n', '质量/n', '打下/v', '了/y', '基础/n', '。/w']
'''




















# def make_train(batch_data):
#     input_ids, token_type_ids, attention_mask, fenci_idx, triple,org_tokens,appendix = [],[],[],[],[],[],[]
#     for item in batch_data:
#         input_ids.append(item[0])
#         token_type_ids.append(item[1])
#         attention_mask.append(item[2])
#         org_tokens.append(item[3])   # len=8['','',...,'']
#         fenci_idx.append(item[4])    # len=8[[],[],...,[]]
#         triple.append(item[5])       # len=8[(),(),...,()]
#         appendix.append(item[6])     # len=8[(),(),...,()]
        
#     input_ids = torch.cat(input_ids, dim=0)            # [8*512]
#     token_type_ids = torch.cat(token_type_ids, dim=0)  # [8*512]
#     attention_mask = torch.cat(attention_mask, dim=0)  # [8*512]  
#     return input_ids, token_type_ids, attention_mask,org_tokens,fenci_idx,triple,appendix
