#!/usr/bin/env python  
#-*- coding:utf-8 _*-  
""" 
@author:hello_life 
@license: Apache Licence 
@file: CLUE_dataset.py 
@time: 2022/05/05
@software: PyCharm 
description:
"""
import logging

import torch
from torch.utils.data import Dataset
from transformers import BertTokenizer

from utils.data_utils import load_data,labels_to_ids
from parameters.bert_crf_ner_config import Config


logger=logging.getLogger(__name__)
config=Config()
tokenizer=BertTokenizer.from_pretrained(config.pretrained_model_path)

class CLUE_Dataset(Dataset):
    def __init__(self,config,mode="train"):
        self.config=config
        if mode=="train":
            self.data=load_data(config.processed_train_data_path)
        elif mode=="test":
            self.data=load_data(config.processed_test_data_path)
        else:
            pass

    def __len__(self):
        assert len(self.data["content"])==len(self.data["label"])
        return len(self.data["content"])

    def __getitem__(self, item):
        y=self.data["label"][item]
        x=self.data["content"][item]
        label_len=len(self.data["label"][item])
        return x,y,label_len


def collate_fn(batch):
    """
    dataloader对批次进行处理
    :param batch:([context1,context2],[[1,3],[2,4]])
    :return:(token_ids,masks,)
    """
    token_ids,masks,labels,label_lens=[],[],[],[]
    for x,y,label_len in batch:
        token_id,mask_=ner_tokenizer(x)
        token_ids.append(token_id)
        masks.append(mask_)
        labels.append(labels_to_ids(y,config))
        label_lens.append(label_len)


    return torch.Tensor(token_ids).to(torch.int64).to(config.device),\
           torch.Tensor(masks).to(torch.int64).to(config.device),\
           torch.Tensor(labels).to(torch.int64).to(config.device),\
           label_lens


################由于BertTokenizer和AutoTokenizer分词都属于wordpiece，会对单词进行分词，如"CSOL"会被分为“CS,##OL”，需要分为"C,S,O,L"
def ner_tokenizer(content):
    """
    1，转化为token_ids
    2,padding
    3,truncation
    4,返回input_id,attention_mask,
    :param content:
    :return:
    """
    tokens=list(content.lower())

    #truncation 因为又cls和sep,因此只能取到seq_len-2的文本
    tokens=["[CLS]"]+tokens[:config.seq_len-2]+["[SEP]"]

    #1,转化为id
    token_ids=tokenizer.convert_tokens_to_ids(tokens)

    #padding
    token_ids.extend([0]*(config.seq_len-len(tokens)))

    #返回mask
    mask=[1]*len(tokens)
    mask=mask[:config.seq_len]
    mask.extend([0]*(config.seq_len-len(tokens)))

    return token_ids,mask


