from transformers import BertTokenizer
import numpy

bert_tokenizer = BertTokenizer.from_pretrained("/home/Dyf/code/models/pretrain_models/bert")
text = """ 腾讯云_S_L_B_青海-新疆-图片前端_43.136.204.63 已10分钟无上报数据"""

# text = "Replace me by 098098lksajdlkfjsladjf0 88 daflkjsdsadfsdfal any text you'd like."
encoded_input = bert_tokenizer(text, return_tensors='pt')

word2id_bert = bert_tokenizer.get_vocab()
id2word_bert = {word2id_bert[key]: key for key in word2id_bert}

tokens = [id2word_bert[int(i)] for i in encoded_input["input_ids"][0]]
# 初始化一个空列表来存储包含"##"的元素的索引
print("tokens", tokens)

# 初始化一个新的列表来存储处理后的tokens
new_tokens = []
# 初始化一个变量来存储当前正在构建的token（如果有的话）
current_token = ''

# 遍历tokens列表
for token in tokens:
    # 检查token是否以'##'开始
    if token.startswith('##'):
        # 如果是，并且current_token不为空，则添加token（去掉'##'前缀）到current_token
        if current_token:
            current_token += token[2:]
    else:
        # 如果token不是以'##'开始，先检查current_token是否有内容
        if current_token:
            # 如果有，将current_token添加到new_tokens，并重置current_token
            new_tokens.append(current_token)
            current_token = ''
            # 将当前token添加到current_token（因为下一个token可能是以'##'开始的）
        current_token = token

    # 检查循环结束后是否有剩余的current_token
if current_token:
    new_tokens.append(current_token)

# 输出新的tokens列表
print(new_tokens)
# sequence = index_set
#
# continuous_sequences = []
# current_sequence = []
#
# for i in range(len(sequence)):
#     if i == 0 or sequence[i] - sequence[i - 1] == 1:
#         current_sequence.append(sequence[i])
#     else:
#         continuous_sequences.append(current_sequence)
#         current_sequence = [sequence[i]]
#
# continuous_sequences.append(current_sequence)
#
# # 输出结果
# for i, seq in enumerate(continuous_sequences):
#     # print(f"连续子序列 {i + 1}: {seq}")
#     first = int(seq[0] - 1)
#     last = int(seq[-1] + 1)
#     print("".join([i.replace("##", "") for i in tokens[first:last]]))

# for index in index_set:
#     if first:
#         indices_with_double_hash = ""
#         indices_with_double_hash += tokens[index - 1].replace("##", "")
#         first = False
#     if last_index == 0:
#         last_index = index
#         indices_with_double_hash += tokens[index].replace("##", "")
#     if last_index + 1 == index:
#         last_index = index
#         indices_with_double_hash += tokens[index].replace("##", "")
#     else:
#         first = True
#         last_index == 0
#         print(indices_with_double_hash)
