# -*- coding: utf-8 -*-
# 分句 & 关键词筛选

import jieba, re, json, config, tqdm
from pathlib import Path

def split_sentences(text):
    return re.split(r"(?<=[。！？])", text)

def risk_filter(sent):
    for cat, kws in config.RISK_KEYWORDS.items():
        for kw in kws:
            if kw in sent:
                return True
    return False

Path("data/filtered").mkdir(exist_ok=True)
for txt_file in tqdm.tqdm(list(config.TXT_DIR.glob("*.txt"))):
    text = txt_file.read_text(encoding="utf-8")
    sents = [s.strip() for s in split_sentences(text) if s.strip()]
    risk_sents = [s for s in sents if risk_filter(s)]
    out_path = Path("data/filtered") / f"{txt_file.stem}.json"
    json.dump(risk_sents, open(out_path, "w", encoding="utf-8"), ensure_ascii=False)
print("✅ 风险句子提取完成")

