def load_dict(dict_path):
    vocab = {}
    i = 0
    with open(dict_path, 'r', encoding='utf-8') as f:
        for line in f:
            key = line.strip('\n')
            vocab[key] = i
            i +=1
    return vocab



from functools import partial
import paddle
from paddlenlp.datasets import MapDataset
from paddlenlp.data import Stack,Pad,Tuple
from paddlenlp.transformers import ErnieTokenizer, ErnieForTokenClassification
from paddlenlp.metrics import ChunkEvaluator

label_vocab = load_dict('tag.dic')
model_name = 'ernie-1.0'
tokenizer = ErnieTokenizer.from_pretrained(model_name)

def load_dataset(datafiles):
    def read(data_path):
        with open(data_path,'r',encoding='utf-8') as fp:
            next(fp)
            for line in fp.readlines():
                words, labels = line.strip('\n').split('\t')
                words = words.split('\002')
                labels = labels.split('\002')
                yield words, labels

    if isinstance(datafiles, str):
        return MapDataset(list(read(datafiles)))
    elif isinstance(datafiles, list) or isinstance(datafiles, tuple):
        return [MapDataset(list(read(datafiles))) for datafile in datafiles]

train_ds, dev_ds, test_ds = load_dataset(datafiles=('train.txt','dev.txt','test.txt'))

print(train_ds[:5])

label_vocab = load_dict('tag.dic')



def convert_example(example,tokenizer,label_vocab):
    tokens, labels = example
    tokenized_input = tokenizer(
        tokens, return_length=True, is_split_into_words=True
    )
    lables = ['O'] + labels + ['O']
    tokenized_input['labels'] = 

trans_func = partial(convert_example)

