import os
import shutil
import requests
import json
import mysql.connector
import time
import re
from pathlib import Path

# 硅基流动API配置（从环境变量读取，避免硬编码泄露）
SILICONFLOW_API_KEY = 'sk-chgexjoqsmegzmxolopmmivwpwwmgwfoqvyphglkyiolbokl'
SILICONFLOW_BASE_URL = 'https://api.siliconflow.cn/v1'

def generate_word_info_with_siliconflow(word):
    """
    使用硅基流动API生成单词的详细信息
    """
    # 构造API请求
    url = f"{SILICONFLOW_BASE_URL}/chat/completions"
    headers = {
        'Content-Type': 'application/json',
        'Authorization': f'Bearer {SILICONFLOW_API_KEY}'
    }

    # 构造提示词，要求返回JSON格式
    prompt = f"""请为英语单词 '{word}' 生成详细的语言学信息，包括：
    1. 词性 (part_of_speech)
    2. 基本词义 (meaning)
    3. 一词多义 (multiple_meanings) - 列表格式
    4. 时态变形 (tense_forms) - 字典格式
    5. 短语搭配 (phrases) - 列表格式
    6. 常用词组 (common_phrases) - 列表格式
    7. 相关句子 (sentences) - 列表格式，包含英文和中文翻译
    8. 图片描述 (image) - 描述与单词相关的视觉元素
    9. 音标 (phonetic) - 需要分别标记美式和英式音标，并区分元音和辅音
    10. 常见错误用法及其提示 (common_mistakes) - 列表格式
    11. 同义词 (synonyms) - 列表格式
    12. 反义词 (antonyms) - 列表格式
    13. 低中高频数据 (frequency) - 显示高频/中频/低频，并包含具体频次，如"高频304次中频21次"
    14. 谐音梗 (homophone) - 提供更有趣和相关的谐音梗，模仿示例格式

    请严格按照以下JSON格式返回结果，不要包含其他内容：
    {{
        "word": "{word}",
        "part_of_speech": "",
        "meaning": "",
        "multiple_meanings": [],
        "tense_forms": {{}},
        "phrases": [],
        "common_phrases": [],
        "sentences": [],
        "phonetic_us": "",
        "phonetic_uk": "",
        "vowels": [],
        "consonants": [],
        "common_mistakes": [],
        "synonyms": [],
        "antonyms": [],
        "frequency": "",
        "homophone": ""
    }}

    对于音标部分，请分别提供美式音标(phonetic_us)和英式音标(phonetic_uk)，并单独列出元音(vowels)和辅音(consonants)。
    对于频率部分，请使用如"高频304次中频21次"的格式。
    对于谐音梗，请提供更有趣和相关的谐音梗，模仿示例格式。
    """

    data = {
        "model": "Qwen/Qwen3-8B",
        "messages": [
            {"role": "user", "content": prompt}
        ],
        "temperature": 0.7
    }

    try:
        response = requests.post(url, headers=headers, json=data)
        response.raise_for_status()
        result = response.json()

        # 解析API返回的JSON
        content = result['choices'][0]['message']['content']
        # 提取JSON部分
        start = content.find('{')
        end = content.rfind('}') + 1
        if start != -1 and end != -1:
            json_str = content[start:end]
            word_info = json.loads(json_str)
            return word_info
        else:
            print(f"无法解析API返回的内容: {content}")
            return None
    except Exception as e:
        print(f"调用硅基流动API失败: {str(e)}")
        return None

def update_word_info_in_db(cursor, word_info_list):
    """
    使用 AI 生成的结构化信息更新 t_edu_word；避免语义重复字段，保留已有非空数据
    word_info_list: [(word, ai_json), ...]
    字段映射规范：
      - pronunciation <- part_of_speech（词性字符串）
      - cn_name <- meaning（中文释义）
      - means_parts <- multiple_meanings（JSON）
      - example <- sentences（JSON）
      - tags <- common_phrases（JSON）
      - yinbiao_us <- phonetic_us
      - yinbiao_en <- phonetic_uk
      - tense_forms <- tense_forms（JSON）
      - phrases <- phrases（JSON）
      - common_mistakes <- common_mistakes（JSON）
      - synonyms <- synonyms（JSON）
      - antonyms <- antonyms（JSON）
      - frequency <- frequency
      - homophone <- homophone
      - vowels <- vowels（JSON）
      - consonants <- consonants（JSON）
    去重与兼容：如果库中该字段已有非空值，则优先保留旧值；仅在旧值为空时用AI值覆盖
    """
    try:
        updated_count = 0
        for word, word_info in word_info_list:
            try:
                # 取出当前库中值
                cursor.execute("""
                    SELECT pronunciation, cn_name, means_parts, example, tags,
                           yinbiao_us, yinbiao_en, tense_forms, phrases, common_mistakes,
                           synonyms, antonyms, frequency, homophone, vowels, consonants
                    FROM t_edu_word WHERE LOWER(word) = LOWER(%s) LIMIT 1
                """, (word,))
                row = cursor.fetchone()
                if not row:
                    continue

                def val(old, new):
                    return old if (old is not None and str(old).strip() != '') else new

                new_vals = {
                    'pronunciation': (word_info.get('part_of_speech') or '').strip(),
                    'cn_name': (word_info.get('meaning') or '').strip(),
                    'means_parts': json.dumps(word_info.get('multiple_meanings', []), ensure_ascii=False),
                    'example': json.dumps(word_info.get('sentences', []), ensure_ascii=False),
                    'tags': json.dumps(word_info.get('common_phrases', []), ensure_ascii=False),
                    'yinbiao_us': (word_info.get('phonetic_us') or '').strip(),
                    'yinbiao_en': (word_info.get('phonetic_uk') or '').strip(),
                    'tense_forms': json.dumps(word_info.get('tense_forms', {}), ensure_ascii=False),
                    'phrases': json.dumps(word_info.get('phrases', []), ensure_ascii=False),
                    'common_mistakes': json.dumps(word_info.get('common_mistakes', []), ensure_ascii=False),
                    'synonyms': json.dumps(word_info.get('synonyms', []), ensure_ascii=False),
                    'antonyms': json.dumps(word_info.get('antonyms', []), ensure_ascii=False),
                    'frequency': (word_info.get('frequency') or '').strip(),
                    'homophone': (word_info.get('homophone') or '').strip(),
                    'vowels': json.dumps(word_info.get('vowels', []), ensure_ascii=False),
                    'consonants': json.dumps(word_info.get('consonants', []), ensure_ascii=False),
                }

                merged = [
                    val(row[0], new_vals['pronunciation']),
                    val(row[1], new_vals['cn_name']),
                    val(row[2], new_vals['means_parts']),
                    val(row[3], new_vals['example']),
                    val(row[4], new_vals['tags']),
                    val(row[5], new_vals['yinbiao_us']),
                    val(row[6], new_vals['yinbiao_en']),
                    val(row[7], new_vals['tense_forms']),
                    val(row[8], new_vals['phrases']),
                    val(row[9], new_vals['common_mistakes']),
                    val(row[10], new_vals['synonyms']),
                    val(row[11], new_vals['antonyms']),
                    val(row[12], new_vals['frequency']),
                    val(row[13], new_vals['homophone']),
                    val(row[14], new_vals['vowels']),
                    val(row[15], new_vals['consonants']),
                    word,
                ]

                update_sql = """
                UPDATE t_edu_word
                SET pronunciation=%s, cn_name=%s, means_parts=%s, example=%s, tags=%s,
                    yinbiao_us=%s, yinbiao_en=%s, tense_forms=%s, phrases=%s, common_mistakes=%s,
                    synonyms=%s, antonyms=%s, frequency=%s, homophone=%s, vowels=%s, consonants=%s
                WHERE LOWER(word) = LOWER(%s)
                """
                cursor.execute(update_sql, merged)
                updated_count += 1
            except Exception as e:
                print(f"更新单词 '{word}' 信息失败: {str(e)}")
        print(f"成功更新 {updated_count} 个单词")
        return updated_count
    except Exception as e:
        print(f"更新单词信息失败: {str(e)}")
        return 0

def clear_import_tables(cursor):
    """
    清空需要导入的数据库表
    注意顺序：先清空关联表，再清空主表
    """
    try:
        tables = [
            't_edu_word_video_rel',
            't_edu_catalog_word_rel',
            't_edu_word_video',
            't_edu_catalog',
            't_edu_textbook',
            't_edu_word',
        ]
        for t in tables:
            cursor.execute(f'TRUNCATE TABLE {t}')
        if not _db_commit(cursor):
            print('[WARN] 清空表后提交失败，可能连接不支持自动提交')
        print('已清空导入相关表')
    except Exception as e:
        try:
            cursor.connection.rollback()
        except Exception:
            pass
        print(f'清空表失败: {str(e)}')


def _extract_phonetics_and_audio(fa_yin_list):
    """从fa_yin字段提取美/英音标及音频url"""
    yinbiao_us = ''
    yinbiao_en = ''
    read_us = ''  # 这里复用为音频URL
    read_en = ''
    try:
        for item in fa_yin_list or []:
            t = (item.get('type') or '').strip().lower()
            phon = item.get('phonetic') or ''
            audio = item.get('audio_url') or ''
            if '美' in t or 'am' in t or 'us' in t or 'american' in t:
                yinbiao_us = phon
                read_us = audio
            elif '英' in t or 'en' in t or 'uk' in t or 'british' in t:
                yinbiao_en = phon
                read_en = audio
            else:
                # 未标注，美音优先为空时填充
                if not yinbiao_us:
                    yinbiao_us = phon
                    read_us = audio
                elif not yinbiao_en:
                    yinbiao_en = phon
                    read_en = audio
    except Exception:
        pass
    return yinbiao_en, read_en, yinbiao_us, read_us

# 通用DB提交/回滚（避免直接依赖全局 conn）
def _db_commit(cursor):
    try:
        for attr in ('connection', '_connection'):
            conn = getattr(cursor, attr, None)
            if conn and hasattr(conn, 'commit'):
                conn.commit()
                return True
    except Exception:
        pass
    return False

# ---------- 表结构自省与动态INSERT构造工具 ----------

def _introspect_columns(cursor, table_name: str) -> dict:
    """返回 {lower_name: actual_name} 的列名映射，用于兼容下划线/驼峰命名差异。"""
    try:
        cursor.execute(
            """
            SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS
            WHERE TABLE_SCHEMA = DATABASE() AND TABLE_NAME = %s
            """,
            (table_name,)
        )
        rows = cursor.fetchall() or []
        return {str(r[0]).lower(): str(r[0]) for r in rows}
    except Exception:
        # 回退：DESCRIBE
        try:
            cursor.execute(f"DESCRIBE {table_name}")
            rows = cursor.fetchall() or []
            # DESCRIBE 返回 (Field, Type, Null, Key, Default, Extra)
            return {str(r[0]).lower(): str(r[0]) for r in rows}
        except Exception:
            return {}


def _pick_col(columns_map: dict, candidates: list[str]) -> str | None:
    """从 columns_map 中挑选第一个存在的候选列名，返回实际列名；若均不存在返回 None。"""
    for c in candidates:
        real = columns_map.get(c.lower())
        if real:
            return real
    return None


def _build_insert_sql(table: str, cols: list[str]) -> str:
    placeholders = ", ".join(["%s"] * len(cols))
    collist = ", ".join(cols)
    return f"INSERT INTO {table} ({collist}) VALUES ({placeholders})"



def _db_rollback(cursor):
    try:
        for attr in ('connection', '_connection'):
            conn = getattr(cursor, attr, None)
            if conn and hasattr(conn, 'rollback'):
                conn.rollback()
                return True
    except Exception:
        pass
    return False


def _determine_dir_level(name: str) -> int:
    """根据目录名猜测层级：返回 1/2/3（越小级别越高）"""
    n = (name or '').strip().lower()
    # 常见关键词层级映射
    l1 = ['unit', 'chapter', 'module', 'book', 'grade', 'level']
    l2 = ['lesson', 'part', 'topic', 'section']
    l3 = ['story', 'dialog', 'dialogue', 'page', 'task', 'exercise']
    for k in l1:
        if n.startswith(k + ' ') or n == k:
            return 1
    for k in l2:
        if n.startswith(k + ' ') or n == k:
            return 2
    for k in l3:
        if n.startswith(k + ' ') or n == k:
            return 3
    # 默认：作为次级目录
    return 2


def import_textbooks_from_import_test(cursor):
    """
    读取 crawler/english/lers/import_test 下的最新JSON数据并入库
    - 每个JSON文件 = 一本教材(textbook)
    - 目录(catalog)来自 JSON 的 directories
    - 单词去重：t_edu_word 不允许重复（按英文单词 word 去重）
    - 特殊结构（例如：先出现 Unit X 且 words 为空，随后 Lesson Y 有词）：
      扁平化为两层目录名：UnitX-Lesson Y
    """
    import_dir = Path(__file__).parent.parent / 'import_test'
    if not import_dir.exists():
        print(f'未找到导入目录: {import_dir}')
        return

    try:
        for json_file in sorted(import_dir.glob('*.json')):
            with open(json_file, 'r', encoding='utf-8') as f:
                data = json.load(f)

            course_name = data.get('course_name') or json_file.stem
            total_words = int(data.get('total_words') or 0)

            # 1) 插入教材
            cursor.execute(
                """
                INSERT INTO t_edu_textbook (textbook_name, word_count, deleted_flag)
                VALUES (%s, %s, 0)
                """,
                (course_name, total_words)
            )
            textbook_id = cursor.lastrowid
            print(f'教材: {course_name} -> textbook_id={textbook_id}, total_words={total_words}')

            # 2) 遍历目录（动态父子结构，使用 parent_id 链接）
            directories = data.get('directories') or []
            catalog_order = 0
            parent_stack = []  # 动态父链：[(level, catalog_id)]

            # 预扫描：根据名称估算层级，构造扁平->层级序列
            enriched = []
            for d in directories:
                dir_name = (d.get('directory_name') or '').strip()
                words = d.get('words') or []
                word_count = int(d.get('word_count') or len(words))
                expected_total = d.get('expected_total')
                level = _determine_dir_level(dir_name)
                enriched.append({
                    'name': dir_name,
                    'words': words,
                    'word_count': word_count,
                    'expected_total': expected_total,
                    'level': level
                })

            # 逐项入库：
            for d in enriched:
                dir_name = d['name']
                words = d['words']
                word_count = d['word_count']
                expected_total = d['expected_total']
                level = d['level']

                # 维护父链栈，使其长度为 level-1
                while len(parent_stack) >= level:
                    parent_stack.pop()
                parent_id = parent_stack[-1][1] if parent_stack else None

                # 如果该目录无词且可能是纯分组（如Unit），保留 expected_total 以便统计
                catalog_order += 1
                cursor.execute(
                    """
                    INSERT INTO t_edu_catalog (
                        catalog_name, catalog_value_id, textbook_id,
                        course_id, parent_id, parent_value_id, word_count, orders, type, deleted_flag
                    ) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, 0)
                    """,
                    (dir_name, None, textbook_id, None, parent_id, None,
                     int(expected_total or word_count or 0), catalog_order, None)
                )
                catalog_id = cursor.lastrowid
                parent_stack.append((level, catalog_id))

                for idx, w in enumerate(words, start=1):
                    word_en = (w.get('word_name') or '').strip()
                    if not word_en:
                        continue
                    cursor.execute("SELECT word_id FROM t_edu_word WHERE LOWER(word) = LOWER(%s) LIMIT 1", (word_en,))
                    row = cursor.fetchone()
                    if row:
                        word_id = row[0]
                    else:
                        yinbiao_en, read_en, yinbiao_us, read_us = _extract_phonetics_and_audio(w.get('fa_yin'))
                        cn_name = (w.get('zh_explain') or '').strip()
                        pic = (w.get('word_img_url') or '').strip()
                        video = ''
                        means_parts = (w.get('raw_text_blocks', {}).get('definitions_text') or '').strip()
                        example = ''
                        tags = (w.get('phrases') or '').strip()
                        cursor.execute(
                            """
                            INSERT INTO t_edu_word (
                                word_value_id, word, yinbiao_en, read_en, yinbiao_us, read_us,
                                pronunciation, cn_name, pic, video, means_parts, example, tags, deleted_flag
                            ) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, 0)
                            """,
                            (
                                '', word_en, yinbiao_en, read_en, yinbiao_us, read_us,
                                '', cn_name, pic, video, means_parts, example, tags
                            )
                        )
                        word_id = cursor.lastrowid

                    cursor.execute(
                        """
                        INSERT INTO t_edu_catalog_word_rel (
                            catalog_id, catalog_value_id, word_id, word_value_id, orders, deleted_flag
                        ) VALUES (%s, %s, %s, %s, %s, 0)
                        """,
                        (catalog_id, None, word_id, None, idx)
                    )

            _db_commit(cursor)
            print(f'[OUTPUT] 教材导入完成: {course_name}')
    except Exception as e:
        _db_rollback(cursor)
        print(f'[OUTPUT] 导入失败: {str(e)}')


# 基于 words 是否为空来动态推断层级：连续若干无词目录 + 一个有词的目录 = 若干父级 + 一个叶子
# 返回 levels 与 is_leaf 列表（与 directories 一一对应）
def _compute_levels_from_directories(directories: list[dict]):
    levels: list[int] = []
    is_leaf: list[bool] = []
    parent_stack_len = 0
    last_chain_len = 0
    after_leaf = False
    for d in directories:
        words = d.get('words') or []
        has_words = len(words) > 0
        if not has_words:
            if after_leaf:
                parent_stack_len = max(0, last_chain_len - 1)
                after_leaf = False
            level = parent_stack_len + 1
            levels.append(level)
            is_leaf.append(False)
            parent_stack_len += 1
        else:
            level = parent_stack_len + 1
            levels.append(level)
            is_leaf.append(True)
            last_chain_len = parent_stack_len
            after_leaf = True
    return levels, is_leaf


def analyze_output_levels():
    """
    主动读取 crawler/english/lers/output 下的所有 JSON，统计目录层级情况并打印：
      - 各层级（1/2/3）被识别的目录数量
      - 每本教材的最大层级深度分布
      - 层级集合模式分布（如 {1,2}、{1,2,3}）
    """
    import_dir = Path(__file__).parent.parent / 'output'
    if not import_dir.exists():
        print(f'未找到目录: {import_dir}')
        return
    level_counts = {}  # 每个层级出现的目录项计数
    max_depth_hist = {}  # 每本教材最大层数 -> 数量
    pattern_hist = {}    # 层级集合模式(如(1,), (1,2), (1,2,3)) -> 数量
    example_for_pattern = {}

    files = sorted(import_dir.glob('*.json'))
    print(f'输出目录文件数: {len(files)}')
    for jf in files:
        try:
            with open(jf, 'r', encoding='utf-8') as f:
                data = json.load(f)
        except Exception as e:
            print(f'读取失败: {jf.name}: {e}')
            continue
        dirs = data.get('directories') or []
        levels, _ = _compute_levels_from_directories(dirs)
        for lv in levels:
            level_counts[lv] = level_counts.get(lv, 0) + 1
        if levels:
            max_depth = max(levels)
            max_depth_hist[max_depth] = max_depth_hist.get(max_depth, 0) + 1
            pattern = tuple(sorted(set(levels)))
            pattern_hist[pattern] = pattern_hist.get(pattern, 0) + 1
            if pattern not in example_for_pattern:
                example_for_pattern[pattern] = jf.name

    print('层级目录计数:', dict(sorted(level_counts.items())))
    print('每本教材最大层级分布:', dict(sorted(max_depth_hist.items())))
    print('共发现不同的层级集合模式数:', len(pattern_hist))
    print('层级集合模式分布 (pattern -> count) 及示例:')
    for pattern, cnt in sorted(pattern_hist.items(), key=lambda x: (len(x[0]), x[0])):
        ex = example_for_pattern.get(pattern, '-')
        print('  ', pattern, '->', cnt, '| 示例:', ex)

    # 详细展示：挑选 2-3 种不同的模式，打印实例教材的结构（目录名、是否叶子、层级、父子关系）
    shown = 0
    for pattern, ex_file in example_for_pattern.items():
        if shown >= 3:
            break
        try:
            with open(import_dir / ex_file, 'r', encoding='utf-8') as f:
                data = json.load(f)
            dirs = data.get('directories') or []
            levels, is_leaf = _compute_levels_from_directories(dirs)
            print('\n示例模式', pattern, '=>', ex_file)
            # 打印目录结构
            parent_ids = []  # 用于模拟父子链打印（仅展示层次，不涉及数据库）
            for i, d in enumerate(dirs):
                name = (d.get('directory_name') or '').strip()
                lw = len(d.get('words') or [])
                level = levels[i]
                leaf = is_leaf[i]
                # 维护一个栈以便可视化父子关系
                while len(parent_ids) >= level:
                    parent_ids.pop()
                parent = parent_ids[-1] if parent_ids else None
                print(f"  - level={level} leaf={leaf} name='{name}' words={lw} parent='{parent or '-'}'")
                if not leaf:
                    parent_ids.append(name)
            shown += 1
        except Exception as e:
            print('展示示例失败:', ex_file, e)



def import_textbooks_from_dir(cursor, src_folder: str = 'crawler/english/lers/output_nested_enriched', commit_every: int | None = None):
    """
    从指定目录读取所有基于“3.0-nested-tree”结构的 JSON，并按真实表结构（自省）批量导入：
      - t_edu_textbook：插入教材记录
      - t_edu_catalog：按 children_directory 递归插入父子目录，通过 parentId/parent_id 建立父子关系
      - t_edu_word：按需插入单词（若不存在），并写入 n_* 与 v_info/n_info 等扩展字段
      - t_edu_catalog_word_rel：建立目录-单词关系
    细节：
      - schema_version 不做强校验
      - course_name 仅取去除尾部“[共xxx个]”后的教材名
      - total_words 重新计算（递归统计所有叶子 words 数）
      - commit_every 控制批次提交（None=每本教材一次提交；整数=每累计该数量的 INSERT 提交一次）
    """
    import_dir = Path(src_folder)
    if not import_dir.exists():
        print(f'未找到导入目录: {import_dir}')
        return

    # 读取表结构
    tb_cols = _introspect_columns(cursor, 't_edu_textbook')
    cg_cols = _introspect_columns(cursor, 't_edu_catalog')
    wd_cols = _introspect_columns(cursor, 't_edu_word')
    rel_cols = _introspect_columns(cursor, 't_edu_catalog_word_rel')

    # 映射常用列名（教材）
    tb_name = _pick_col(tb_cols, ['textbook_name', 'textbookName', 'name'])
    tb_wc = _pick_col(tb_cols, ['word_count', 'wordCount'])
    tb_deleted = _pick_col(tb_cols, ['deleted_flag', 'deletedFlag', 'deleted'])

    tb_insert_cols = [c for c in [tb_name, tb_wc, tb_deleted] if c]
    tb_insert_sql = _build_insert_sql('t_edu_textbook', tb_insert_cols)

    # 目录表列映射
    cg_name = _pick_col(cg_cols, ['catalog_name', 'catalogName', 'name'])
    cg_val_id = _pick_col(cg_cols, ['catalog_value_id', 'catalogValueId'])
    cg_textbook_id = _pick_col(cg_cols, ['textbook_id', 'textbookId'])
    cg_course_id = _pick_col(cg_cols, ['course_id', 'courseId'])
    cg_parent_id = _pick_col(cg_cols, ['parent_id', 'parentId'])
    cg_parent_val_id = _pick_col(cg_cols, ['parent_value_id', 'parentValueId'])
    cg_word_count = _pick_col(cg_cols, ['word_count', 'wordCount'])
    cg_orders = _pick_col(cg_cols, ['orders', 'order', 'sort', 'seq'])
    cg_type = _pick_col(cg_cols, ['type', 'catalog_type'])
    cg_deleted = _pick_col(cg_cols, ['deleted_flag', 'deletedFlag', 'deleted'])

    cg_cols_to_use = [c for c in [cg_name, cg_val_id, cg_textbook_id, cg_course_id, cg_parent_id,
                                  cg_parent_val_id, cg_word_count, cg_orders, cg_type, cg_deleted] if c]
    cg_insert_sql = _build_insert_sql('t_edu_catalog', cg_cols_to_use)

    # 单词表列映射
    wd_id_col = _pick_col(wd_cols, ['word_id', 'wordId', 'id']) or 'word_id'
    wd_word_col = _pick_col(wd_cols, ['word', 'word_name', 'wordName']) or 'word'
    wd_word_value_id = _pick_col(wd_cols, ['word_value_id', 'wordValueId'])
    wd_yb_en = _pick_col(wd_cols, ['yinbiao_en', 'yinbiaoEn'])
    wd_read_en = _pick_col(wd_cols, ['read_en', 'readEn'])
    wd_yb_us = _pick_col(wd_cols, ['yinbiao_us', 'yinbiaoUs'])
    wd_read_us = _pick_col(wd_cols, ['read_us', 'readUs'])
    wd_pron = _pick_col(wd_cols, ['pronunciation'])
    wd_cn = _pick_col(wd_cols, ['cn_name', 'cnName'])
    wd_pic = _pick_col(wd_cols, ['pic', 'picture'])
    wd_video = _pick_col(wd_cols, ['video'])
    wd_means = _pick_col(wd_cols, ['means_parts', 'meansParts'])
    wd_example = _pick_col(wd_cols, ['example'])
    wd_phrases_col = _pick_col(wd_cols, ['phrases'])
    wd_tags = _pick_col(wd_cols, ['tags'])
    wd_deleted = _pick_col(wd_cols, ['deleted_flag', 'deletedFlag', 'deleted'])
    # 确保扩展列存在：n_en_explanation, n_en_sentence, n_cn_translation, n_audio_src, n_en_translation, v_info, n_info
    # 注意：仅当实际表中不存在时才新增
    def ensure_column(table: str, col: str, col_def: str):
        cursor.execute(f"SHOW COLUMNS FROM {table} LIKE %s", (col,))
        if not cursor.fetchone():
            cursor.execute(f"ALTER TABLE {table} ADD COLUMN {col} {col_def}")
            print(f"[IMPORT] 添加列: {table}.{col} {col_def}")

    try:
        ensure_column('t_edu_word', 'n_en_explanation', 'TEXT NULL')
        ensure_column('t_edu_word', 'n_en_sentence', 'TEXT NULL')
        ensure_column('t_edu_word', 'n_cn_translation', 'TEXT NULL')
        ensure_column('t_edu_word', 'n_audio_src', 'VARCHAR(255) NULL')
        ensure_column('t_edu_word', 'n_en_translation', 'TEXT NULL')
        ensure_column('t_edu_word', 'v_info', 'TEXT NULL')
        ensure_column('t_edu_word', 'n_info', 'TEXT NULL')
    except Exception as e:
        print('[IMPORT] 确保扩展列存在失败（可忽略若权限不足）:', e)


    # 将扩展列拼到插入列集合末尾（若存在于表结构）
    ext_cols = []
    for c in ['n_en_explanation', 'n_en_sentence', 'n_cn_translation', 'n_audio_src', 'n_en_translation', 'v_info', 'n_info']:
        if c in wd_cols:
            ext_cols.append(c)
    wd_cols_to_use = [c for c in [wd_word_value_id, wd_word_col, wd_yb_en, wd_read_en, wd_yb_us, wd_read_us,
                                  wd_pron, wd_cn, wd_pic, wd_video, wd_means, wd_example,
                                  (wd_phrases_col or wd_tags), wd_deleted] if c] + ext_cols
    wd_insert_sql = _build_insert_sql('t_edu_word', wd_cols_to_use)

    # 目录-单词关系表
    rel_cg_id = _pick_col(rel_cols, ['catalog_id', 'catalogId'])
    rel_cg_val_id = _pick_col(rel_cols, ['catalog_value_id', 'catalogValueId'])
    rel_wd_id = _pick_col(rel_cols, ['word_id', 'wordId'])
    rel_wd_val_id = _pick_col(rel_cols, ['word_value_id', 'wordValueId'])
    rel_orders = _pick_col(rel_cols, ['orders', 'order', 'sort', 'seq'])
    rel_deleted = _pick_col(rel_cols, ['deleted_flag', 'deletedFlag', 'deleted'])

    rel_cols_to_use = [c for c in [rel_cg_id, rel_cg_val_id, rel_wd_id, rel_wd_val_id, rel_orders, rel_deleted] if c]
    rel_insert_sql = _build_insert_sql('t_edu_catalog_word_rel', rel_cols_to_use)


    try:
        json_files = sorted(import_dir.glob('*.json'))
        total_files = len(json_files)
        for index, json_file in enumerate(json_files):
            print(f'[IMPORT] 处理教材 {index+1}/{total_files}: {json_file.name}')
            with open(json_file, 'r', encoding='utf-8') as f:
                data = json.load(f)

            course_name = data.get('course_name') or json_file.stem
            total_words = int(data.get('total_words') or 0)

            # 1) 插入教材
            # 批次控制
            op_counter = {'ops': 0}
            def _maybe_commit(bump=1):
                op_counter['ops'] += bump
                if commit_every and op_counter['ops'] >= commit_every:
                    _db_commit(cursor)
                    op_counter['ops'] = 0

            tb_vals = []
            if tb_name:
                tb_vals.append(course_name)
            if tb_wc:
                tb_vals.append(total_words)
            if tb_deleted:
                tb_vals.append(0)
            cursor.execute(tb_insert_sql, tb_vals)
            _maybe_commit()
            textbook_id = cursor.lastrowid
            print(f'[IMPORT] 教材: {course_name} -> textbook_id={textbook_id}, total_words={total_words}')

            # 2) 递归插入目录与单词
            directories = data.get('directories') or []

            def insert_catalog_node(node: dict, parent_id: int | None, order_idx: int) -> int:
                name = (node.get('directory_name') or '').strip()
                words = node.get('words') or []
                word_count = int(node.get('word_count') or len(words))
                expected_total = node.get('expected_total')
                wc = int(expected_total or word_count or 0)

                cg_vals = []
                # 按列清单顺序压入值
                for col in cg_cols_to_use:
                    if col == cg_name:
                        cg_vals.append(name)
                    elif col == cg_val_id:
                        cg_vals.append(None)
                    elif col == cg_textbook_id:
                        cg_vals.append(textbook_id)
                    elif col == cg_course_id:
                        cg_vals.append(None)
                    elif col == cg_parent_id:
                        cg_vals.append(parent_id)
                    elif col == cg_parent_val_id:
                        cg_vals.append(None)
                    elif col == cg_word_count:
                        cg_vals.append(wc)
                    elif col == cg_orders:
                        cg_vals.append(order_idx)
                    elif col == cg_type:
                        cg_vals.append(None)
                    elif col == cg_deleted:
                        cg_vals.append(0)
                    else:
                        cg_vals.append(None)
                cursor.execute(cg_insert_sql, cg_vals)
                _maybe_commit()
                catalog_id = cursor.lastrowid

                # 插入单词与关系
                for idx, w in enumerate(words, start=1):
                    word_en = (w.get('word_name') or '').strip()
                    if not word_en:
                        continue
                    # 查询是否已存在
                    cursor.execute(f"SELECT {wd_id_col} FROM t_edu_word WHERE LOWER({wd_word_col}) = LOWER(%s) LIMIT 1", (word_en,))
                    row = cursor.fetchone()
                    if row:
                        word_id = row[0]
                        # 若已存在记录，补充更新扩展字段（不改变其他列）
                        try:
                            set_parts = []
                            set_vals = []
                            n0 = (w.get('n') or [])
                            n0 = n0[0] if isinstance(n0, list) and n0 else {}
                            mappings = {
                                'n_en_explanation': (n0.get('en_explanation') or '').strip(),
                                'n_en_sentence': (n0.get('en_sentence') or '').strip(),
                                'n_cn_translation': (n0.get('cn_translation') or '').strip(),
                                'n_audio_src': (n0.get('audio_src') or '').strip(),
                                'n_en_translation': (n0.get('en_translation') or '').strip(),
                                'v_info': (w.get('v_info') or '').strip(),
                                'n_info': (w.get('n_info') or '').strip(),
                            }
                            for col, val in mappings.items():
                                if col in wd_cols and val:
                                    set_parts.append(f"{col}=%s")
                                    set_vals.append(val)
                            if set_parts:
                                set_vals.append(word_id)
                                sql = f"UPDATE t_edu_word SET {', '.join(set_parts)} WHERE {wd_id_col}=%s"
                                cursor.execute(sql, set_vals)
                                _maybe_commit()
                        except Exception as e:
                            print('[IMPORT] 扩展字段更新失败:', e)
                    else:
                        yinbiao_en, read_en, yinbiao_us, read_us = _extract_phonetics_and_audio(w.get('fa_yin'))
                        cn_name = (w.get('zh_explain') or '').strip()
                        pic = (w.get('word_img_url') or '').strip()
                        video = ''
                        # 改为：means_parts 取 tense_info；phrases 取 words 中的 phrases
                        means_parts = (w.get('tense_info') or '').strip() if isinstance(w.get('tense_info'), str) else json.dumps(w.get('tense_info') or {}, ensure_ascii=False)
                        example = ''
                        phrases_val = (w.get('phrases') or '').strip()
                        # 取 n[0] 的扩展字段（如存在）
                        n0 = (w.get('n') or [])
                        n0 = n0[0] if isinstance(n0, list) and n0 else {}
                        n_en_explanation = (n0.get('en_explanation') or '').strip()
                        n_en_sentence = (n0.get('en_sentence') or '').strip()
                        n_cn_translation = (n0.get('cn_translation') or '').strip()
                        n_audio_src = (n0.get('audio_src') or '').strip()
                        n_en_translation = (n0.get('en_translation') or '').strip()
                        v_info = (w.get('v_info') or '').strip()
                        n_info = (w.get('n_info') or '').strip()

                        wd_vals = []
                        for col in wd_cols_to_use:
                            if col == wd_word_value_id:
                                wd_vals.append('')
                            elif col == wd_word_col:
                                wd_vals.append(word_en)
                            elif col == wd_yb_en:
                                wd_vals.append(yinbiao_en)
                            elif col == wd_read_en:
                                wd_vals.append(read_en)
                            elif col == wd_yb_us:
                                wd_vals.append(yinbiao_us)
                            elif col == wd_read_us:
                                wd_vals.append(read_us)
                            elif col == wd_pron:
                                wd_vals.append('')
                            elif col == wd_cn:
                                wd_vals.append(cn_name)
                            elif col == wd_pic:
                                wd_vals.append(pic)
                            elif col == wd_video:
                                wd_vals.append(video)
                            elif col == wd_means:
                                wd_vals.append(means_parts)
                            elif col == wd_example:
                                wd_vals.append(example)
                            elif col == (wd_phrases_col or wd_tags):
                                wd_vals.append(phrases_val)
                            elif col == wd_deleted:
                                wd_vals.append(0)
                            elif col == 'n_en_explanation':
                                wd_vals.append(n_en_explanation)
                            elif col == 'n_en_sentence':
                                wd_vals.append(n_en_sentence)
                            elif col == 'n_cn_translation':
                                wd_vals.append(n_cn_translation)
                            elif col == 'n_audio_src':
                                wd_vals.append(n_audio_src)
                            elif col == 'n_en_translation':
                                wd_vals.append(n_en_translation)
                            elif col == 'v_info':
                                wd_vals.append(v_info)
                            elif col == 'n_info':
                                wd_vals.append(n_info)
                            else:
                                wd_vals.append(None)
                        cursor.execute(wd_insert_sql, wd_vals)
                        _maybe_commit()
                        word_id = cursor.lastrowid

                    # 目录-单词关系
                    rel_vals = []
                    for col in rel_cols_to_use:
                        if col == rel_cg_id:
                            rel_vals.append(catalog_id)
                        elif col == rel_cg_val_id:
                            rel_vals.append(None)
                        elif col == rel_wd_id:
                            rel_vals.append(word_id)
                        elif col == rel_wd_val_id:
                            rel_vals.append(None)
                        elif col == rel_orders:
                            rel_vals.append(idx)
                        elif col == rel_deleted:
                            rel_vals.append(0)
                        else:
                            rel_vals.append(None)
                    cursor.execute(rel_insert_sql, rel_vals)
                    _maybe_commit()

                # 递归子目录
                children = node.get('children_directory') or []
                for child_idx, child in enumerate(children, start=1):
                    insert_catalog_node(child, catalog_id, child_idx)
                return catalog_id

            # 统计 total_words（递归累加所有叶子 words 数）
            def count_words(node: dict) -> int:
                ch = node.get('children_directory') or []
                if ch:
                    return sum(count_words(c) for c in ch)
                return len(node.get('words') or [])
            computed_total = sum(count_words(n) for n in directories)

            # 规范化教材名：去掉末尾的 [共xxx个] 或 (2024)[共xxx个] 等尾部
            def normalize_course_name(name: str) -> str:
                s = name.strip()
                # 去掉最后一个中括号及其内容
                s = re.sub(r"\[[^\]]*\]\s*$", "", s)
                # 再去一次尾随空白
                return s.strip()

            norm_name = normalize_course_name(course_name)

            # 若教材名需要更新，且 t_edu_textbook 有相应列，则更新名称与总词数
            if norm_name != course_name or (tb_wc is not None):
                try:
                    set_cols = []
                    set_vals = []
                    if tb_name is not None and norm_name != course_name:
                        set_cols.append(f"{tb_name}=%s"); set_vals.append(norm_name)
                    if tb_wc is not None:
                        set_cols.append(f"{tb_wc}=%s"); set_vals.append(computed_total)
                    if set_cols:
                        sql = f"UPDATE t_edu_textbook SET {', '.join(set_cols)} WHERE {('id' if 'id' in tb_cols else 'textbook_id')}=%s"
                        set_vals.append(textbook_id)
                        cursor.execute(sql, set_vals)
                except Exception:
                    pass

            # 处理顶层节点
            for top_idx, node in enumerate(directories, start=1):
                insert_catalog_node(node, None, top_idx)

            _db_commit(cursor)
            print(f'[IMPORT] 导入完成: {norm_name} (words={computed_total})')
    except Exception as e:
        _db_rollback(cursor)
        print(f'[IMPORT] 导入失败: {str(e)}')


def import_textbooks_from_output(cursor):
    """
    已废弃：请使用 import_textbooks_from_dir 指向 output_nested。
    该函数保留空壳以避免旧调用报错。
    """
    print('[WARN] import_textbooks_from_output 已废弃，请改用 import_textbooks_from_dir(cursor, "crawler/english/lers/output_nested")')
    return


# ========== 预览补充：从 definitions_text 中为 n 属性补齐中译 ==========

def _extract_chinese_only(s: str) -> str:
    try:
        # 保留中日韩字符与常见中文标点和空白
        m = re.findall(r"[\u4e00-\u9fff，。；：、“”‘’（）《》…！？，、\s]+", s)
        return ''.join(m).strip()
    except Exception:
        return s.strip()


def _find_after_sentence_dot(text: str, start_from: int) -> int:
    """从 start_from 起找到下一个句号 '.' 的后一个位置；若找不到则返回 start_from。"""
    i = text.find('.', start_from)
    if i == -1:
        return start_from
    # 包含后续空格
    j = i + 1
    while j < len(text) and text[j] == ' ':
        j += 1
    return j


def preview_fill_cn_translations_from_definitions(sample_paths: list[str] | None = None, per_file_max: int = 8):
    """
    兼容入口：转调新版 v2（支持多义项按“1、2、3、...”切分对齐），仅打印不写回。
    """
    return preview_fill_cn_translations_from_definitions_v2(sample_paths=sample_paths, per_file_max=per_file_max)


# 内部编号切分工具（供 v2 使用）
def _split_definitions_by_numbers(text: str) -> list[str]:
    s = (text or '').strip()
    if not s:
        return []
    matches = list(re.finditer(r"(?:(?<=^)|(?<=\s))(\d{1,2})、", s))
    if not matches:
        m2 = list(re.finditer(r"(?:(?<=^)|(?<=\s))2、", s))
        if m2:
            idx = m2[0].start()
            return [s[:idx].strip(), s[idx:].strip()]
        return []
    segs = []
    for i, m in enumerate(matches):
        start = m.start()
        end = matches[i+1].start() if i+1 < len(matches) else len(s)
        segs.append(s[start:end].strip())
    return segs


# 新版预览：支持多义项按“1、2、3、”切分对齐
def preview_fill_cn_translations_from_definitions_v2(sample_paths: list[str] | None = None, per_file_max: int = 8):
    base = Path(__file__).parent.parent / 'output_nested'
    files = sample_paths or [str(p) for p in sorted(base.glob('*.json'))[:1]]
    if not files:
        print('[PREVIEW v2] 没有可用的 JSON 文件')
        return

    def split_defs(text: str, expected_count: int) -> list[str]:
        s = (text or '').strip()
        if not s:
            return []
        matches = list(re.finditer(r"(?:(?<=^)|(?<=\s))(\d{1,2})、", s))
        if not matches:
            m2 = list(re.finditer(r"(?:(?<=^)|(?<=\s))2、", s))
            if m2:
                idx = m2[0].start()
                return [s[:idx].strip(), s[idx:].strip()]
            return []
        segs = []
        for i, m in enumerate(matches):
            start = m.start()
            end = matches[i+1].start() if i+1 < len(matches) else len(s)
            segs.append(s[start:end].strip())
        return segs

    printed_total = 0
    for fp in files:
        try:
            with open(fp, 'r', encoding='utf-8') as f:
                data = json.load(f)
        except Exception as e:
            print('[PREVIEW v2] 打开失败:', fp, e)
            continue
        printed = 0
        print('\n[PREVIEW v2] 文件:', Path(fp).name)

        def walk(nodes):
            nonlocal printed, printed_total
            for d in nodes or []:
                for w in d.get('words', []) or []:
                    defs_text = (((w.get('raw_text_blocks') or {}).get('definitions_text')) or '').strip()
                    senses = w.get('n') or []
                    numbered = _split_definitions_by_numbers(defs_text) if defs_text and len(senses) > 1 else []
                    for si, sn in enumerate(senses):
                        seg_text = numbered[si] if (numbered and si < len(numbered)) else defs_text
                        en_exp = (sn.get('en_explanation') or '').strip()
                        en_sent = (sn.get('en_sentence') or '').strip()
                        en_tr = ''
                        if seg_text and en_exp and en_sent:
                            s0 = seg_text.find(en_exp)
                            s1 = seg_text.find(en_sent)
                            if s0 != -1 and s1 != -1 and s1 > s0:
                                mid = seg_text[s0 + len(en_exp): s1]
                                p = mid.find('|')
                                en_tr = (mid[p:].strip() if p != -1 else _extract_chinese_only(mid))
                        cn_tr = ''
                        if seg_text and en_sent:
                            pos = seg_text.find(en_sent)
                            if pos != -1:
                                start = _find_after_sentence_dot(seg_text, pos + len(en_sent))
                                tokens = [' n. ', ' v. ', ' adj. ', ' adv. ', ' pron. ', ' prep. ', ' conj. ', ' det. ', ' num. ', ' art. ', ' interj. ', ' int. ']
                                ends = [seg_text.find(tk, start) for tk in tokens]
                                ends = [i for i in ends if i != -1]
                                end = min(ends) if ends else len(seg_text)
                                cn_tr = _extract_chinese_only(seg_text[start:end])
                        if (en_tr or cn_tr) and printed < per_file_max:
                            printed += 1
                            printed_total += 1
                            print(f"  - word='{w.get('word_name','')}' sense#{si+1}")
                            if en_tr:
                                print('    en_explanation -> en_translation:', en_tr)
                            if cn_tr:
                                print('    en_sentence -> cn_translation:', cn_tr)
                            if printed >= per_file_max:
                                return
                if printed >= per_file_max:
                    return
                walk(d.get('children_directory') or [])

        walk(data.get('directories') or [])
        if printed == 0:
            print('  (未找到可补充的样本，可能该文件不含 n 或 definitions_text)')
    if printed_total == 0:
        print('[PREVIEW v2] 没有任何可打印的样例')




# ========== 离线工具：AI补全到JSON & 目录一致性报告 ==========

def _map_ai_into_word_dict(word_obj: dict, ai: dict):
    """将AI结果字段合并到单词字典中（仅填补缺失，不覆盖已有非空）"""
    def put(k, v):
        if v is None:
            return
        old = word_obj.get(k)
        if old is None or (isinstance(old, str) and old.strip() == '') or (isinstance(old, (list, dict)) and len(old) == 0):
            word_obj[k] = v
    put('part_of_speech', (ai.get('part_of_speech') or '').strip())
    put('meaning', (ai.get('meaning') or '').strip())
    put('multiple_meanings', ai.get('multiple_meanings') or [])
    put('sentences', ai.get('sentences') or [])
    put('common_phrases', ai.get('common_phrases') or [])
    put('phonetic_us', (ai.get('phonetic_us') or '').strip())
    put('phonetic_uk', (ai.get('phonetic_uk') or '').strip())
    put('tense_forms', ai.get('tense_forms') or {})
    put('phrases', ai.get('phrases') or [])
    put('common_mistakes', ai.get('common_mistakes') or [])
    put('synonyms', ai.get('synonyms') or [])
    put('antonyms', ai.get('antonyms') or [])
    put('frequency', (ai.get('frequency') or '').strip())
    put('homophone', (ai.get('homophone') or '').strip())
    put('vowels', ai.get('vowels') or [])
    put('consonants', ai.get('consonants') or [])
    word_obj['ai_enriched'] = True


def enrich_textbook_json_file(json_path: Path, out_dir: Path, max_words_per_book: int | None = None) -> dict:
    """对指定教材 JSON 进行AI补全（仅对缺失字段的单词），保存到 out_dir，返回统计信息"""
    out_dir.mkdir(parents=True, exist_ok=True)
    with open(json_path, 'r', encoding='utf-8') as f:
        data = json.load(f)
    course_name = data.get('course_name') or json_path.stem
    enriched = 0
    attempted = 0
    print(f'----开始进行 ai 补全------')
    # 遍历目录和单词
    for d in data.get('directories', []):
        for w in d.get('words', []):
            if max_words_per_book is not None and attempted >= max_words_per_book:
                break
            # 缺失判定：任一关键字段缺失
            need = False
            for key in ('meaning', 'phonetic_us', 'phonetic_uk', 'part_of_speech'):
                v = w.get(key)
                if v is None or (isinstance(v, str) and v.strip() == ''):
                    need = True
                    break
            if not need:
                continue
            attempted += 1
            ai = generate_word_info_with_siliconflow(w.get('word_name', ''))
            print(f'---- ai 生成的结果: {ai} ------')
            if ai:
                _map_ai_into_word_dict(w, ai)
                enriched += 1
    # 重算总词数（仅用于校验）
    data['total_words'] = sum(len(d.get('words', [])) for d in data.get('directories', []))
    # 保存
    out_path = out_dir / json_path.name
    with open(out_path, 'w', encoding='utf-8') as fo:
        json.dump(data, fo, ensure_ascii=False, indent=2)
    print(f'[AI] {course_name}: enriched {enriched}/{attempted}, saved -> {out_path}')
    return {'course_name': course_name, 'file': json_path.name, 'attempted': attempted, 'enriched': enriched, 'output': str(out_path)}



# 写回 JSON：为 words.n[*] 补充 en_translation/cn_translation 并保存到新目录
from typing import Tuple

def enrich_cn_translations_to_json(sample_paths: list[str] | None = None,
                                   src_dir: str | None = None,
                                   out_dir: str | None = None,
                                   max_files: int = 2) -> Tuple[int, list[str]]:
    root_dir = Path(__file__).parent.parent
    base = Path(src_dir) if src_dir else (root_dir / 'output_nested')
    all_files = [str(p) for p in sorted(base.glob('*.json'))]
    files = sample_paths or (all_files if (max_files is None) else all_files[:max_files])
    # 统一以代码所在目录为锚点，避免工作目录影响
    if out_dir is None:
        out_base = root_dir / 'output_nested_enriched'
    else:
        out_base = Path(out_dir)
        if not out_base.is_absolute():
            out_base = root_dir / out_base
    out_base.mkdir(parents=True, exist_ok=True)
    written = []
    updated_words_total = 0

    def process_word(w: dict) -> int:
        defs_text = (((w.get('raw_text_blocks') or {}).get('definitions_text')) or '').strip()
        senses = w.get('n') or []
        if not isinstance(senses, list):
            return 0
        numbered = _split_definitions_by_numbers(defs_text) if defs_text and len(senses) > 1 else []
        updated = 0
        for si, sn in enumerate(senses):
            if not isinstance(sn, dict):
                continue
            seg_text = numbered[si] if (numbered and si < len(numbered)) else defs_text
            en_exp = (sn.get('en_explanation') or '').strip()
            en_sent = (sn.get('en_sentence') or '').strip()
            # en_translation
            en_tr = (sn.get('en_translation') or '').strip()
            if not en_tr and seg_text and en_exp and en_sent:
                s0 = seg_text.find(en_exp)
                s1 = seg_text.find(en_sent)
                if s0 != -1 and s1 != -1 and s1 > s0:
                    mid = seg_text[s0 + len(en_exp): s1]
                    p = mid.find('|')
                    en_tr = (mid[p:].strip() if p != -1 else _extract_chinese_only(mid))
                    if en_tr:
                        sn['en_translation'] = en_tr
                        updated += 1
            # cn_translation
            cn_tr = (sn.get('cn_translation') or '').strip()
            if cn_tr == '.':
                cn_tr = ''
            if not cn_tr and seg_text and en_sent:
                pos = seg_text.find(en_sent)
                if pos != -1:
                    start = _find_after_sentence_dot(seg_text, pos + len(en_sent))
                    tokens = [' n. ', ' v. ', ' vt. ', ' vi. ', ' adj. ', ' adv. ', ' pron. ', ' prep. ', ' conj. ', ' det. ', ' num. ', ' art. ', ' interj. ', ' int. ']
                    ends = [seg_text.find(tk, start) for tk in tokens]
                    ends = [i for i in ends if i != -1]
                    end = min(ends) if ends else len(seg_text)
                    cn_tr = _extract_chinese_only(seg_text[start:end])
                    if cn_tr:
                        sn['cn_translation'] = cn_tr
                        updated += 1
        return updated

    def process_nodes(nodes: list[dict]) -> int:
        cnt = 0
        for d in nodes or []:
            for w in d.get('words', []) or []:
                cnt += process_word(w)
            cnt += process_nodes(d.get('children_directory') or [])
        return cnt

    for fp in files:
        try:
            data = json.load(open(fp, 'r', encoding='utf-8'))
        except Exception as e:
            print('[ENRICH] 打开失败:', fp, e)
            continue
        updated = process_nodes(data.get('directories') or [])
        updated_words_total += updated
        out_path = out_base / Path(fp).name
        try:
            with open(out_path, 'w', encoding='utf-8') as fo:
                json.dump(data, fo, ensure_ascii=False, indent=2)
            written.append(str(out_path))
            print(f"[ENRICH] 写回: {out_path} (updated_senses={updated})")
        except Exception as e:
            print('[ENRICH] 写回失败:', out_path, e)

    # 写入后做完整性校验：重新打开并解析，校验关键结构
    try:
        with open(out_path, 'r', encoding='utf-8') as fi:
            _chk = json.load(fi)
        # 校验必须字段
        assert isinstance(_chk.get('directories'), list)
        # 递归检查叶子 words 是否为列表
        def _check_dirs(nodes):
            for nd in nodes or []:
                wd = nd.get('words')
                if wd is not None and not isinstance(wd, list):
                    raise AssertionError('words 不是 list')
                _check_dirs(nd.get('children_directory') or [])
        _check_dirs(_chk.get('directories') or [])
    except Exception as e:
        print(f"[ENRICH][ERROR] 写入后校验失败: {out_path} -> {e}")


    return updated_words_total, written

# 添加 v_info / n_info 字段到 JSON（基于已分段文本的原始片段保存）

# 全量校验 enriched 目录下的 JSON 完整性
def check_enriched_outputs(enriched_dir: str | None = None) -> dict:
    base = Path(enriched_dir) if enriched_dir else (Path(__file__).parent.parent / 'output_nested_enriched')
    if not base.exists():
        print('[CHECK] 目录不存在:', base)
        return {'dir': str(base), 'total': 0, 'ok': 0, 'errors': 0, 'error_files': []}
    files = sorted(base.glob('*.json'))
    total = len(files)
    ok = 0
    errs = []
    for p in files:
        try:
            with open(p, 'r', encoding='utf-8') as f:
                data = json.load(f)
            # 结构校验
            if not isinstance(data.get('directories'), list):
                raise AssertionError('directories 不是 list')
            def _check(nodes):
                for nd in nodes or []:
                    wd = nd.get('words')
                    if wd is not None and not isinstance(wd, list):
                        raise AssertionError('words 不是 list')
                    _check(nd.get('children_directory') or [])
            _check(data.get('directories') or [])
            ok += 1
        except Exception as e:
            print(f"[CHECK][ERROR] {p.name}: {e}")
            errs.append(p.name)
    print(f"[CHECK] 完成：dir={base} total={total} ok={ok} errors={len(errs)}")
    return {'dir': str(base), 'total': total, 'ok': ok, 'errors': len(errs), 'error_files': errs}

def add_vn_info_to_json(in_path: str, out_path: str) -> int:
    data = json.load(open(in_path, 'r', encoding='utf-8'))
    def process_word(w: dict) -> int:
        changed = 0
        defs_text = (((w.get('raw_text_blocks') or {}).get('definitions_text')) or '').strip()
        if not defs_text:
            return 0
        # 简单抽取：从 definitions_text 中截取以 "v. " 和 "n. " 开头到下一词性标记前的片段
        def extract_block(prefix: str) -> str:
            s = defs_text
            pos = s.find(prefix)
            if pos == -1:
                return ''
            start = pos + len(prefix)
            tokens = [' n. ', ' v. ', ' adj. ', ' adv. ', ' pron. ', ' prep. ', ' conj. ', ' det. ', ' num. ', ' art. ', ' interj. ', ' int. ']
            ends = [s.find(tk, start) for tk in tokens]
            ends = [i for i in ends if i != -1]
            end = min(ends) if ends else len(s)
            return s[pos:end].strip()
        v_block = extract_block(' v. ')
        n_block = extract_block(' n. ')
        if v_block and not w.get('v_info'):
            w['v_info'] = v_block
            changed += 1
        if n_block and not w.get('n_info'):
            w['n_info'] = n_block
            changed += 1
        return changed
    def walk(nodes):
        cnt = 0
        for d in nodes or []:
            for w in d.get('words', []) or []:
                cnt += process_word(w)
            cnt += walk(d.get('children_directory') or [])
        return cnt
    changed = walk(data.get('directories') or [])
    json.dump(data, open(out_path, 'w', encoding='utf-8'), ensure_ascii=False, indent=2)
    return changed

    return updated_words_total, written

def enrich_first_two_output_textbooks(out_folder: str = 'crawler/english/lers/output_enriched', max_words_per_book: int | None = 30):
    """读取 output 目录前两个教材进行AI补全，写入新文件夹 out_folder"""
    out_dir = Path(out_folder)
    src_dir = Path(__file__).parent.parent / 'output_nested'
    files = sorted(src_dir.glob('*.json'))[:2]
    if not files:
        print(f'[AI] 未在 {src_dir} 找到教材 JSON')
        return []
    if not SILICONFLOW_API_KEY:
        print('[AI] 未设置 SILICONFLOW_API_KEY，无法调用AI。请先在环境变量中配置。')
        return []
    results = []
    for jf in files:
        results.append(enrich_textbook_json_file(jf, out_dir, max_words_per_book=max_words_per_book))
    return results


def generate_output_mismatch_report(report_path: str = 'crawler/english/lers/output_reports/mismatch_report.json') -> str:
    """扫描 output 所有教材，找出 expected_total 与 word_count 不一致的目录，保存成规范JSON报告"""
    src_dir = Path(__file__).parent.parent / 'output'
    report_file = Path(report_path)
    report_file.parent.mkdir(parents=True, exist_ok=True)
    issues = []
    for jf in sorted(src_dir.glob('*.json')):
        try:
            with open(jf, 'r', encoding='utf-8') as f:
                data = json.load(f)
        except Exception as e:
            print(f'[REPORT] 读取失败: {jf.name}: {e}')
            continue
        dirs = data.get('directories') or []
        mismatched = []
        for d in dirs:
            et = d.get('expected_total')
            wc = d.get('word_count')
            if isinstance(et, int) and isinstance(wc, int) and et != wc:
                mismatched.append({
                        'directory_name': d.get('directory_name'),
                    'directory_name': d.get('directory_name'),
                    'expected_total': et,
                    'word_count': wc
                })
        if mismatched:
            computed_total = sum(len(d.get('words', [])) for d in dirs)
            issues.append({
                'file': jf.name,
                'course_name': data.get('course_name') or jf.stem,
                'reported_total_words': data.get('total_words'),
                'computed_total_words': computed_total,
                'mismatched_directories': mismatched
            })
    payload = {
        'generated_at': time.strftime('%Y-%m-%d %H:%M:%S'),
        'root': str(src_dir),
        'issue_count': len(issues),
        'issues': issues
    }
    with open(report_file, 'w', encoding='utf-8') as fo:
        json.dump(payload, fo, ensure_ascii=False, indent=2)
    print(f'[REPORT] 保存: {report_file} (issues={len(issues)})')
    return str(report_file)
if __name__ == '__main__':
    # Demo: 预览补充 translations（不写回 JSON）- 使用新版 v2（支持编号切分）
    # try:
    #     # 全量处理：不限制 max_files
    #     preview_fill_cn_translations_from_definitions_v2(per_file_max=10)
    #     updated, outputs = enrich_cn_translations_to_json(max_files=None)
    #     print(f'[MAIN] enrich done (ALL), updated_senses_total={updated}, files={len(outputs)}')
    #     for op in outputs[:5]:
    #         print('[MAIN] written (sample):', op)
    #     # 为首个文件追加 v_info / n_info 样例
    #     if outputs:
    #         src = outputs[0]
    #         dst = str((Path(__file__).parent.parent / 'output_nested_enriched' / ('with_vn_' + Path(src).name)))
    #         changed = add_vn_info_to_json(src, dst)
    #         print(f'[MAIN] add_vn_info (sample): changed={changed}, out={dst}')
    # except Exception as e:
    #     print('[MAIN] 预览/写回失败:', e)

    # ========================= 配置区 =========================
    DB_CONFIG = {
        'host': 'localhost',
        'port': '63306',
        'user': 'root',
        'password': 'root',
        'database': 'shixue-les'
    }
    # ========================= 离线工具（默认注释，按需取消注释） =========================
    # 1) 打印 output 目录层级统计与示例教材
    # analyze_output_levels()

    # 2) 生成 output 下的不匹配报告（expected_total vs word_count）
    # generate_output_mismatch_report('crawler/english/lers/output_reports/mismatch_report.json')

    # generate_output_mismatch_report('crawler/english/lers/output_reports/mismatch_report.json')

    # 3) 对 output 前两个教材做 AI 补全，写入新目录（需 SILICONFLOW_API_KEY）
    # enrich_first_two_output_textbooks(out_folder='crawler/english/lers/output_enriched', max_words_per_book=10)

    # ========================= 数据库导入（默认注释，按需取消注释） =========================
    # 4) 从指定目录入库（支持 output 或 output_enriched）
    conn = mysql.connector.connect(**DB_CONFIG)
    cur = conn.cursor()
    print('DB connected:', conn.is_connected())
    clear_import_tables(cur)  # 谨慎：如需清空相关表，请取消注释
    src_dir = str((Path(__file__).parent.parent / 'output_nested_enriched').resolve())
    import_textbooks_from_dir(cur, src_dir, commit_every=500)
    cur.close(); conn.close()

    # 5) 简单统计（验证导入）
    # conn = mysql.connector.connect(**DB_CONFIG)
    # cur = conn.cursor()
    # for t in ['t_edu_textbook', 't_edu_catalog', 't_edu_word', 't_edu_catalog_word_rel']:
    #     cur.execute(f'SELECT COUNT(*) FROM {t}')
    #     print(t, cur.fetchone()[0])
    # cur.close(); conn.close()

    # 6) 按需测试 DB 内缺失词的 AI 补全（慎用；将直接更新库内 t_edu_word）
    # conn = mysql.connector.connect(**DB_CONFIG)
    # cur = conn.cursor()
    # enrich_missing_words_with_ai(cur, batch_size=20, max_rows=100)
    # cur.close(); conn.close()


# 批量 AI 补全函数（可在需要时手动调用）
# 使用示例：
#   conn = mysql.connector.connect(**DB_CONFIG)
#   cur = conn.cursor()
#   enrich_missing_words_with_ai(cur, batch_size=20, max_rows=200)
#   cur.close(); conn.close()


def clean_non_english_mismatch_records(mismatch_report_path: str = '/Users/pikabu/my/work/project/python-project/my/crawler/english/lers/util/_mismatch_report.json') -> bool:
    """
    删除_mismatch_report.json中不属于英文方面的课程记录
    过滤掉包含语文、数学、历史、生物、地理、化学等非英语学科的课程记录
    
    Args:
        mismatch_report_path: mismatch报告文件的路径
        
    Returns:
        bool: 删除操作是否成功
    """
    try:
        # 定义非英语学科关键词
        non_english_keywords = [
            # 中文学科
            '语文', '数学', '历史', '生物', '地理', '化学', '物理', '政治', 
            '中国文化', '文化常识', '古诗词', '文言文', '成语', 
            # 马克思主义相关
            '马克思', '马列', '概论', '基本原理',
            # 英文学科名
            'chinese', 'math', 'mathematics', 'history', 'biology', 
            'geography', 'chemistry', 'physics', 'politics', 'culture',
            'marxism', 'marx',
            # 其他可能的关键词
            '必修', '选修', '高考', '中考', '小升初'
        ]
        
        # 读取mismatch报告文件
        if not os.path.exists(mismatch_report_path):
            print(f'文件不存在: {mismatch_report_path}')
            return False
            
        with open(mismatch_report_path, 'r', encoding='utf-8') as f:
            data = json.load(f)
        
        if 'mismatches' not in data:
            print('文件格式错误：缺少mismatches字段')
            return False
            
        original_count = len(data['mismatches'])
        print(f'原始记录数: {original_count}')
        
        # 过滤掉非英语课程
        filtered_mismatches = []
        removed_courses = []
        
        for mismatch in data['mismatches']:
            course_name = mismatch.get('course', '')
            directory_name = mismatch.get('directory', '')
            
            # 检查课程名称是否包含非英语关键词
            is_non_english = False
            
            # 转换为小写进行匹配
            course_lower = course_name.lower()
            directory_lower = directory_name.lower()
            
            for keyword in non_english_keywords:
                if keyword.lower() in course_lower or keyword.lower() in directory_lower:
                    is_non_english = True
                    break
            
            # 如果不是非英语课程，保留该记录
            if not is_non_english:
                filtered_mismatches.append(mismatch)
            else:
                removed_courses.append(course_name)
        
        # 更新数据
        data['mismatches'] = filtered_mismatches
        final_count = len(filtered_mismatches)
        removed_count = original_count - final_count
        
        print(f'过滤后记录数: {final_count}')
        print(f'删除记录数: {removed_count}')
        
        # 显示被删除的课程（去重）
        unique_removed = list(set(removed_courses))
        if unique_removed:
            print(f'\n删除的非英语课程 ({len(unique_removed)} 个):')
            for course in sorted(unique_removed):
                print(f'  - {course}')
        
        # 写回文件
        with open(mismatch_report_path, 'w', encoding='utf-8') as f:
            json.dump(data, f, ensure_ascii=False, indent=2)
        
        print(f'\n清理完成！已将过滤后的数据写回: {mismatch_report_path}')
        return True
        
    except Exception as e:
        print(f'清理非英语记录失败: {str(e)}')
        return False


def enrich_missing_words_with_ai(cursor, batch_size=50, max_rows=200):
    """
    Batch update missing word info using AI; preserve existing non-empty fields.
    Missing criteria: any of cn_name/pronunciation/yinbiao_us/yinbiao_en is empty.
    """
    try:
        cursor.execute(
            """
            SELECT word FROM t_edu_word
            WHERE (cn_name IS NULL OR cn_name='')
               OR (pronunciation IS NULL OR pronunciation='')
               OR (yinbiao_us IS NULL OR yinbiao_us='')
               OR (yinbiao_en IS NULL OR yinbiao_en='')
            LIMIT %s
            """,
            (max_rows,)
        )
        words = [r[0] for r in cursor.fetchall()]
        print(f'AI补全候选单词: {len(words)} 个')
        if not words:
            return 0

        total_updated = 0
        for i in range(0, len(words), batch_size):
            batch = words[i:i+batch_size]
            to_update = []
            for w in batch:
                info = generate_word_info_with_siliconflow(w)
                if info:
                    to_update.append((w, info))
            if to_update:
                total_updated += update_word_info_in_db(cursor, to_update)
                try:
                    cursor.connection.commit()
                except Exception:
                    pass
                print(f'已补全: {total_updated} 个')
        return total_updated
    except Exception as e:
        print('AI 批量补全失败:', e)
        return 0