"""
# 批量覆盖章节标题为：
#     简述 第{chapter_number}章
# - 简述来源：优先从正文里选取“与其他章节不同”的一句（去掉小说级公共前缀/模板），并过滤占位/模板句。
# - 若正文无法提取唯一短语，则退化为原标题清洗；仍无则跳过不改。
# 运行： python -m rag数据源.mysql_lx
"""
import asyncio
import re
from tortoise import Tortoise
from tool.sessing import TORTOISE_ORM
from models.m import Novel, Chapter, AuditStatus
from typing import List, Set

# 语句与字符处理
SENT_SPLIT = re.compile(r"[。！？!?\n\r]+")
PUNCT_RE = re.compile(r"[\s\u3000，。,．.！!？?；;：:、,‘’“”\"'\-—_（）()【】\[\]《》<>…]+")
ANY_CHAPTER_RE = re.compile(r"^第[\s0-9一二三四五六七八九十百千两零]+章[：:，,\.\s]*", re.IGNORECASE)
ONLY_CHAPTER_RE = re.compile(r"^第[\s0-9一二三四五六七八九十百千两零]+章$", re.IGNORECASE)
ANY_CHAPTER_TOKEN = re.compile(r"第[\s0-9一二三四五六七八九十百千两零]+章", re.IGNORECASE)
KEEP_CHARS_RE = re.compile(r"[\u4e00-\u9fa5A-Za-z0-9]+")
# 占位/模板句黑名单（出现则跳过）
BLACKLIST_PATTERNS = [
    r"占位内容",
    r"为了测试数据库容量",
    r"为了测试数据库",
    r"检索效果",
    r"主角推进剧情发生冲突",
    r"主角推进剧情",
    r"这是[\u4e00-\u9fa5A-Za-z0-9]+的占位",
    r"这是[\u4e00-\u9fa5A-Za-z0-9]+的第",
]
BLACKLIST_RE = re.compile("|".join(BLACKLIST_PATTERNS))


def _first_nonempty_line(text: str) -> str:
    if not text:
        return ""
    for line in str(text).replace("\r", "\n").split("\n"):
        l = line.strip()
        if l:
            return l
    return ""


def _lcp(a: str, b: str) -> str:
    n = min(len(a), len(b))
    i = 0
    while i < n and a[i] == b[i]:
        i += 1
    return a[:i]


def _novel_common_prefix(lines: List[str]) -> str:
    cleaned = []
    for line in lines:
        s = ANY_CHAPTER_RE.sub("", line)
        s = PUNCT_RE.sub("", s)
        if s:
            cleaned.append(s)
    if not cleaned:
        return ""
    prefix = cleaned[0]
    for s in cleaned[1:]:
        prefix = _lcp(prefix, s)
        if not prefix:
            break
    return prefix if len(prefix) >= 4 else ""


def _split_sentences(text: str) -> List[str]:
    return [s.strip() for s in SENT_SPLIT.split(text or "") if s and s.strip()]


def _clean_sentence(sent: str) -> str:
    s = ANY_CHAPTER_RE.sub("", sent or "").strip()
    s = PUNCT_RE.sub(" ", s).strip().replace(" ", "")
    s = ANY_CHAPTER_TOKEN.sub("", s)
    return s


def _is_bad_phrase(phrase: str, novel_title: str) -> bool:
    if not phrase:
        return True
    if len(phrase) < 4:
        return True
    if BLACKLIST_RE.search(phrase):
        return True
    # 句子若大部分是书名/包含书名长子串，则跳过
    if novel_title and novel_title[:4] in phrase:
        return True
    return False


def pick_unique_phrase(content: str, used: Set[str], common_prefix: str, novel_title: str, max_len: int = 16) -> str:
    if not content:
        return ""
    for sent in _split_sentences(content):
        s = _clean_sentence(sent)
        if common_prefix and s.startswith(common_prefix):
            s = s[len(common_prefix):]
        s = s[:max_len]
        if _is_bad_phrase(s, novel_title):
            continue
        if s not in used:
            return s
    # 退化：全文抽取
    full = PUNCT_RE.sub(" ", content).strip().replace(" ", "")
    if common_prefix and full.startswith(common_prefix):
        full = full[len(common_prefix):]
    full = ANY_CHAPTER_TOKEN.sub("", full)[:max_len]
    if not _is_bad_phrase(full, novel_title) and full not in used:
        return full
    return ""


def clean_title(raw: str) -> str:
    if not raw:
        return ""
    s = raw.strip()
    s = ANY_CHAPTER_RE.sub("", s)
    s = re.sub(r"^[\-—\s·•:：]+", "", s)
    s = PUNCT_RE.sub(" ", s).strip().replace(" ", "")
    s = ANY_CHAPTER_TOKEN.sub("", s)
    return s


async def rewrite_titles(batch_sleep: float = 0.05, phrase_len: int = 16):
    await Tortoise.init(config=TORTOISE_ORM)
    updated = 0
    total = 0
    try:
        novels = await Novel.filter(audit_status=AuditStatus.APPROVED).all()
        for novel in novels:
            first_lines = []
            chs = await Chapter.filter(novel=novel, audit_status=AuditStatus.APPROVED).order_by('chapter_number').all()
            for ch in chs:
                first_lines.append(_first_nonempty_line(ch.content or ch.title or ""))
            common_prefix = _novel_common_prefix(first_lines)

            used_phrases: Set[str] = set()
            for ch in chs:
                total += 1
                num = ch.chapter_number
                phrase = pick_unique_phrase(ch.content or "", used_phrases, common_prefix, novel.title or "", max_len=phrase_len)
                if not phrase:
                    phrase = clean_title(ch.title or "")[:phrase_len]
                if not phrase or ONLY_CHAPTER_RE.fullmatch(phrase):
                    continue
                new_title = f"{phrase} 第{num}章"
                used_phrases.add(phrase)
                if new_title != ch.title:
                    ch.title = new_title
                    await ch.save(update_fields=["title"])
                    updated += 1
                    await asyncio.sleep(batch_sleep)
        print(f"完成：检查 {total} 条，更新 {updated} 条 → 过滤占位句+去模板+去重复")
    finally:
        await Tortoise.close_connections()


if __name__ == "__main__":
    print("开始批量覆盖章节标题（过滤占位句+去模板+去重复）…")
    asyncio.run(rewrite_titles())
