from transformers import BertTokenizer

local_bert_model_path = "../bert_model/bert-base-uncased"
bert_tokenizer = BertTokenizer.from_pretrained(local_bert_model_path)


# 测试 tokenizer 最开始和最后的内容
if __name__ == '__main__':
    print(bert_tokenizer.encode("This is Louis!"))
    # [101, 2023, 2003, 3434, 999, 102]
    print(bert_tokenizer.encode("Hello, Louis!"))
    # [101, 7592, 1010, 3434, 999, 102]
    print(bert_tokenizer.encode("You are my super star."))
    # [101, 2017, 2024, 2026, 3565, 2732, 1012, 102]
    print(bert_tokenizer.encode("Find the first aspect term and corresponding opinion term in the text"))
    print(len("Find the first aspect term and corresponding opinion term in the text".split()))  # 12
    print(len(bert_tokenizer.encode("Find the first aspect term and corresponding opinion term in the text")))  # 14
    pass

# 测试标点符号
if __name__ == '__main__':
    a_sentence = "This is Louis! Hello, Louis! You are my super star."
    print(bert_tokenizer.encode(a_sentence))
    print(a_sentence.split())
    for sub_sentence in a_sentence.split():
        print(bert_tokenizer.encode(sub_sentence)[1:-1])
        pass
    # [101, 2023, 2003, 3434, 999, 7592, 1010, 3434, 999, 2017, 2024, 2026, 3565, 2732, 1012, 102]
    # ['This', 'is', 'Louis!', 'Hello,', 'Louis!', 'You', 'are', 'my', 'super', 'star.']
    # [2023]
    # [2003]
    # [3434, 999]
    # [7592, 1010]
    # [3434, 999]
    # [2017]
    # [2024]
    # [2026]
    # [3565]
    # [2732, 1012]
    pass


# 测试一下对于形如 co-worker 这样的单词的结果
if __name__ == '__main__':
    print(bert_tokenizer.encode("co-worker")[1:-1])  # [2522, 1011, 7309]
    pass
