import jieba
import re

def split_chinese(sentence):
    # 使用jieba进行分词
    words = jieba.lcut(sentence)
    # 利用正则表达式过滤汉字以外的字符
    pattern = re.compile(r'[\w\s]')
    words = [word for word in words if pattern.search(word)]
    return words

# 假设已经有了词汇表vocab和相关的编码函数
training_data = [("你因错过太阳而流泪", "你也将错过群星"), ("夜幕降临", "群星闪烁")]

# 创建词汇表
vocab = {}
index = 0
for input_text, target_text in training_data:

    for word in split_chinese(input_text):
        if word not in vocab:
            vocab[word] = index
            index += 1
    for word in split_chinese(target_text):
        if word not in vocab:
            vocab[word] = index
            index += 1

print("Vocabulary:", vocab)



# 将训练数据转换为整数序列
def encode_text_sequence(text, vocab):
    return [vocab[word] for word in text.split()]



# 对训练数据进行编码
encoded_training_data = []
for input_text, target_text in training_data:
    encoded_input = encode_text_sequence(input_text, vocab)
    encoded_target = encode_text_sequence(target_text, vocab)
    encoded_training_data.append((encoded_input, encoded_target))

print("encoded_training_data=",encoded_training_data)
# 现在encoded_training_data包含了整数编码后的训练数据，可以用于模型的训练


if __name__ == "__main__":
    print("===over===")