from fastNLP import Vocabulary

# from fastNLP.transformers.torch import BertTokenizer
from fastNLP.io import DataBundle
from transformers import AutoTokenizer


class BertPipe:
    def __init__(self, model_name="bert-base-uncased") -> None:
        self.tokenizer = AutoTokenizer.from_pretrained(model_name)

    def bpe(self, raw_words):
        bpes = [self.tokenizer.cls_token_id]
        first = [0]
        first_index = 1  # 记录第一个bpe的位置
        for word in raw_words:
            bpe = self.tokenizer.encode(word, add_special_tokens=False)
            bpes.extend(bpe)
            first.append(first_index)
            first_index += len(bpe)
        bpes.append(self.tokenizer.sep_token_id)
        first.append(first_index)
        return {
            "input_ids": bpes,
            "input_len": len(bpes),
            "first": first,
            "seq_len": len(raw_words),
        }

    def process(
        self, data_bundle, input_field_name="raw_words", target_field_name="raw_target"
    ) -> DataBundle:
        data_bundle.apply_field_more(self.bpe, field_name=input_field_name, num_proc=4)

        # tag的词表，由于这是词表，所以不需要有padding和unk
        tag_vocab = Vocabulary(padding=None, unknown=None)
        # 从 train 数据的 raw_target 中获取建立词表
        tag_vocab.from_dataset(
            data_bundle.get_dataset("train"), field_name=target_field_name
        )
        # 使用词表将每个 dataset 中的raw_target转为数字，并且将写入到target这个field中
        tag_vocab.index_dataset(
            data_bundle.datasets.values(),
            field_name=target_field_name,
            new_field_name="target",
        )

        # 可以将 vocabulary 绑定到 data_bundle 上，方便之后使用。
        data_bundle.set_vocab(tag_vocab, field_name="target")
        return data_bundle, self.tokenizer
