import os
import random

# 预定义一些中英文句子
sentences = [
    # 添加更多的样本句子以增加多样性
    "科技的进步为我们的生活带来了前所未有的便利。",
    "Technological advancements have brought unprecedented convenience to our lives.",
    "保持持续的学习态度是适应快速变化世界的关键。",
    "Maintaining a continuous learning attitude is key to adapting to a rapidly changing world.",
    "环境保护和经济发展之间应该取得平衡。",
    "A balance should be struck between environmental protection and economic development.",
    "体育运动对于提高学生的身体素质非常重要。",
    "Physical exercise is very important for improving students' physical fitness.",
    "艺术和音乐教育对于培养孩子的创造力至关重要。",
    "Art and music education are crucial for cultivating children's creativity.",
    # ... 更多句子
]

def randomly_modify_sentence(sentence, chance=0.3):
    """随机修改句子，通过插入、删除或替换词语"""
    words = sentence.split()
    if random.random() < chance:
        # 随机删除一个词语
        if len(words) > 1:  # 确保句子至少有两个词
            words.pop(random.randrange(len(words)))
    if random.random() < chance:
        # 随机重复一个词语
        index = random.randrange(len(words))
        words.insert(index, words[index])
    return ' '.join(words)

def generate_similar_documents(file_path_1, file_path_2, sentences, target_word_count):
    word_count = 0
    with open(file_path_1, 'w', encoding='utf-8') as file1, open(file_path_2, 'w', encoding='utf-8') as file2:
        while word_count < target_word_count:
            sentence = random.choice(sentences)
            modified_sentence = randomly_modify_sentence(sentence)

            # 写入原始句子到第一个文件
            file1.write(sentence + "\n")
            # 写入修改后的句子到第二个文件
            file2.write(modified_sentence + "\n")

            # 更新字数计数，假设中文和英文平均每个词三个字节
            word_count += (len(sentence.encode('utf-8')) + len(modified_sentence.encode('utf-8'))) // 3

generate_similar_documents('orig_1k.txt', 'orig_add_1k.txt', sentences, 1000)
print("Files created successfully.")