import json

# 1️⃣ 读取 JSON 文件
with open("D:\\yuanbei\\客服问答机器人需求表\\清洗数据\\data_process\\train.json", "r", encoding="utf-8") as f:
    data = json.load(f)

# 2️⃣ 需要过滤掉的关键词（可自行扩展）
blocked_keywords = [
    '@DP_Adddda',
    '@DP_Adksl',
    '@DP_Akl',
    '@DP_Arh',
    '@DP_BOO',
    '@DP_Babee', '@DP_Bana', '@DP_Bing', '@DP_Biu',
    '@DP_Bom', '@DP_Cae', '@DP_CaePlease', '@DP_Caleb',
    '@DP_Calem', '@DP_Cami', '@DP_Casey', '@DP_Cecil',
    '@DP_Cici', '@DP_Click', '@DP_Cola', '@DP_Colin',
    '@DP_Cos', '@DP_Cross', '@DP_Doo', '@DP_EMOQ',
    '@DP_Emoqq', '@DP_Flora', '@DP_GVv', '@DP_Gali',
    '@DP_Gbfw', '@DP_Ggbond', '@DP_Gideon', '@DP_Gkoo',
    '@DP_GodV', '@DP_Greg', '@DP_Gtrr', '@DP_Gungun',
    '@DP_Guuo', '@DP_Jejue5', '@DP_Joy', '@DP_Joyeee5',
    '@DP_Kimi', '@DP_Lacey', '@DP_Laj', '@DP_Lana',
    '@DP_Lara', '@DP_Leahh', '@DP_Liberty', '@DP_Llai',
    '@DP_Lore', '@DP_Lucia', '@DP_Lucius', '@DP_Lutee',
    '@DP_Mald', '@DP_Mdl', '@DP_Monjietonoo', '@DP_Nance',
    '@DP_Nance1', '@DP_Nax', '@DP_Nicole', '@DP_QKimi',
    '@DP_Qcxd', '@DP_QcxdA', '@DP_Qkid', '@DP_Rip1',
    '@DP_Ryan', '@DP_Saimen', '@DP_Taaaaaaa', '@DP_Tbbbb', '@DP_Tcccccc', '@DP_Tddddd', '@DP_Teeee', '@DP_Tffff',
    '@DP_Tgggg', '@DP_Wawa', '@DP_Wtttttttttt', '@DP_YOo', '@DP_YY1', '@DP_YYYYYYY', '@DP_York', '@DP_Zara', '@DP_Zcc',
    '@DP_Zed', '@DP_Zero1', '@DP_Zing', '@DP_Zj1', '@DP_Zola1', '@DP_Zooper', '@DP_Zpppp', '@DP_aaaaaaaaa', '@DP_ada',
    '@DP_adaa', '@DP_ade', '@DP_aii', '@DP_baobao', '@DP_clala', '@DP_cre', '@DP_cri', '@DP_crystal', '@DP_eli',
    '@DP_flo', '@DP_gekko1', '@DP_hui', '@DP_hui2', '@DP_jac', '@DP_king2', '@DP_lad', '@DP_ladPlease', '@DP_lda',
    '@DP_lior', '@DP_lior1', '@DP_lior3', '@DP_lior4', '@DP_lior5', '@DP_lior6', '@DP_lior8', '@DP_lris', '@DP_lucy',
    '@DP_lutee', '@DP_lynn', '@DP_olala', '@DP_relax', '@DP_rigjt', '@DP_rittt3', '@DP_seven1223s', '@DP_whhhhhhh',
    '@dp_Andre', '@dp_Atsuki', '@dp_a000', '@dp_alj', '@dp_annu', '@dp_ass', '@dp_jj12', '@dp_june11', '@dp_just',
    '@dp_lewie14', '@dp_nanci', '@dp_rittt', '@dp_rittt2','@ABIN3950','@rbl','@BlackWolf_011','@XiaoWenn6666','@Talise01'
]

# 3️⃣ 执行过滤
filtered_data = [
    item for item in data
    if not any(keyword in item[1]["content"] for keyword in blocked_keywords)
]

# 4️⃣ 输出过滤后条数对比
print(f"原始数据条数: {len(data)}")
print(f"过滤后数据条数: {len(filtered_data)}")

# 5️⃣ 保存为新文件
# output_path = "D:\\yuanbei\\客服问答机器人需求表\\清洗数据\\data_process\\train_filtered.json"
# with open(output_path, "w", encoding="utf-8") as f:
#     json.dump(filtered_data, f, ensure_ascii=False, indent=4)


output_path = 'D:\\yuanbei\\客服问答机器人需求表\\清洗数据\\data_process\\train_filtered.json'
with open(output_path, 'w', encoding='utf-8') as f:
    # 写入开头
    f.write("[\n")
    # 写入每个对话项
    for i, item in enumerate(filtered_data):
        # 转换为字符串
        item_str = json.dumps(item, ensure_ascii=False)
        # 最后一项不加逗号
        if i == len(filtered_data) - 1:
            f.write(f"    {item_str}\n")
        else:
            f.write(f"    {item_str},\n")
    # 写入结尾
    f.write("]")

print(f"✅ 已保存过滤后的数据到: {output_path}")
