#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
纯 API 版英语课程爬虫（不启用浏览器）
流程：
1) 登录（沿用 pa.login()，返回 requests.Session）
2) GET 课程页 HTML，解析每个课程卡片上的“学习本课”onclick 参数
3) 对每个课程发送 POST: LoadCourseGroupClass，解析返回 Html，提取“显示明细”onclick 参数
4) 发送 POST: LoadCourseGroupDetail，解析返回 Html，提取目录里的 GotoUrl('ubrowse.aspx?...')
5) 复用现有的 GetBrowseRecord POST 逻辑，逐页拉取单词，解密+解析
6) 每处理一个目录即时保存课程 JSON
注意：为便于自测，默认只处理“总词数<200”的第一个课程
"""

import os
import re
import json
import time
import logging
import urllib.parse as urlp
from typing import List, Dict, Any


import requests
from bs4 import BeautifulSoup

from pa import login, get_html_content  # type: ignore
from crypto import decrypt_nenver  # type: ignore

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

BASE = "https://les.znjiyi.com"
UCOURSE_URL = f"{BASE}/a/ucourse.aspx"
HANDLE_URL = f"{BASE}/a/Handle/ExamLib_Process.aspx"

class APICrawler:
    def __init__(self):
        self.session = None  # requests.Session
        self.output_dir = "output"
        self._processed: set[str] = set()
        self._skip_dir = os.path.join(self.output_dir, '.skip')
        os.makedirs(self._skip_dir, exist_ok=True)

        os.makedirs(self.output_dir, exist_ok=True)

    # ---------- 基础 ----------
    def _headers(self, referer: str) -> Dict[str, str]:
        return {
            'accept': 'text/html, */*; q=0.01',
            'accept-language': 'zh-CN,zh;q=0.9',
            'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
            'origin': BASE,
            'priority': 'u=1, i',
            'referer': referer,
            'sec-ch-ua': '"Not)A;Brand";v="8", "Chromium";v="138", "Google Chrome";v="138"',
            'sec-ch-ua-mobile': '?0',
            'sec-ch-ua-platform': '"macOS"',
            'sec-fetch-dest': 'empty',
            'sec-fetch-mode': 'cors',
            'sec-fetch-site': 'same-origin',
            'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36',
            'x-requested-with': 'XMLHttpRequest',
        }

    def _double_json_loads(self, text: str) -> Dict[str, Any]:
        # 服务器常返回字符串化的 JSON
        try:
            return json.loads(json.loads(json.dumps(text)))
        except Exception:
            try:
                return json.loads(text)
            except Exception:
                return {}

    def _seed_cookies(self):
        # 访问课程页，拿 les 域下的 ASP.NET_SessionId 等 cookie
        self.session.get(UCOURSE_URL, timeout=6000, verify=False)
        # 确保课堂 cookie 存在
        cj = self.session.cookies
        cj.set('last_classroom_enabbr', 'les', domain='les.znjiyi.com', path='/')
        cj.set('last_classroom_name', urlp.quote('乐尔思', safe=''), domain='les.znjiyi.com', path='/')
        cj.set('HasGetTopMenu', '0', domain='.znjiyi.com', path='/')
        cj.set('HasGetSliderMenu', '0', domain='.znjiyi.com', path='/')
        # 调用一次 LoadMenuTop 初始化（出现在页面脚本里）
        common = json.dumps([{ 'IsMobile': False, 'IsTablet': False, 'ScreenWidth': 1512, 'ScreenHeight': 701,
                               'WindowScreenHeight': 701, 'IsIOS': False, 'ReturnUrl': UCOURSE_URL,
                               'CurrentUrl': UCOURSE_URL, 'IsInWeixin': False,
                               'IP': '', 'Location': '{"region":"","city":"","country":""}' }],
                             separators=(',', ':'), ensure_ascii=False)
        data = { 'CommonJsonPara': common, 'processFlag': 'LoadMenuTop' }
        try:
            self.session.post(HANDLE_URL, headers=self._headers(UCOURSE_URL), data=data, timeout=6000, verify=False)
        except Exception:
            pass
        # 再次访问课程页
    def _is_login_or_fallback_html(self, html: str) -> bool:
        if not html:
            return True
        h = html.strip().lower()
        return ('login.aspx' in h) or ('__viewstate' in h and 'examl   ib_process.aspx' in h.replace(' ', '')) or ('<form' in h and 'examl' in h and 'process.aspx' in h)
    def _log_session_cookies(self, hint: str = ""):
        try:
            logging.info(f"[SESSION] {hint} cookies:")
            for c in self.session.cookies:
                logging.info(f"  - {c.domain} {c.name}={c.value}")
        except Exception as e:
            logging.warning(f"[SESSION] 打印cookies失败: {e}")


    def _relogin_and_seed(self):
        try:
            self.session = login()
            self._seed_cookies()
            # 显式禁用代理以解决ProxyError
            self.session.proxies = {'http': None, 'https': None}
        except Exception:
            pass

        self.session.get(UCOURSE_URL, timeout=6000, verify=False)

    # ---------- 第0步：课程列表（POST: LoadUCourse） ----------
    def fetch_courses(self, page_size: int = 100, max_pages: int = 50) -> List[Dict[str, Any]]:
        """通过 POST LoadUCourse 获取“我的课程”列表并解析每个课程的 LoadCourseGroupClass 参数；
        同时记录总条数与总页数（每页page_size条）"""
        self.total_courses = 0
        self.page_size = page_size
        self.total_pages = 1
        courses: List[Dict[str, Any]] = []
        # 统计累积（用于调试与一致性校验）
        class_total = 0
        detail_total = 0

        def enc(v: Any) -> str:
            return urlp.quote(urlp.quote(json.dumps(v, separators=(',', ':'), ensure_ascii=False), safe=''), safe='')

        current_page = 1
        while current_page <= self.total_pages and current_page <= max_pages:
            params_fore_obj = [{
                "IsFirstLoad": current_page == 1,
                "TabIndex": "1",
                "TabIdSuffix": "navtab",
                "CourseName": "",
                "CourseGradeKind": "undefined",
                "GroupClassID": "",
                "IsDIY": False
            }]
            common_json_obj = [{
                "IsMobile": False,
                "IsTablet": False,
                "ScreenWidth": 1512,
                "ScreenHeight": 322,
                "WindowScreenHeight": 322,
                "IsIOS": False,
                "ReturnUrl": UCOURSE_URL,
                "CurrentUrl": UCOURSE_URL,
                "IsInWeixin": False,
                "IP": "",
                "Location": '{"region":"","city":"","country":""}'
            }]
            page_info_obj = [{
                "IFlag": 0,
                "CurrentPage": current_page,
                "PageSize": page_size,
                "OrderByDefault": "LastUseTime desc, UCourseLastUpTime desc",
                "StrCondition": "",
                "TotalCount": -1,
                "ShowInDiv": "datacontent",
                "ShowInDivPaging": "",
                "IsPreLoad": 0,
                "IsAppendHtml": False,
                "PageJsCommon": "NenverLoadHtml"
            }]

            data = (
                'ParamsBack=&'
                f'ParamsFore={enc(params_fore_obj)}&'
                'ParamOrderByList=&'
                'ParamsOther=&'
                f'CommonJsonPara={urlp.quote(json.dumps(common_json_obj, separators=(",", ":"), ensure_ascii=False), safe="")}&'
                f'ParamsPageInfo={enc(page_info_obj)}&'
                'PageJs=LoadUCourse&'
                'processFlag=LoadUCourse'
            )
            res = self.session.post(HANDLE_URL, headers=self._headers(UCOURSE_URL), data=data.encode('utf-8'), timeout=6000, verify=False)
            res.encoding = 'utf-8'
            parsed = self._double_json_loads(res.text)
            html = parsed.get('Html') or parsed.get('html') or ''
            # 调试：返回内容长度
            logging.info(f"LoadUCourse: HtmlLen={len(html)}, PagingHtmlLen={len(html)}")

            # 解析分页信息优先从 PagingHtml 提取“共X条/Y页”
            m_xy = re.search(r"共\s*(\d+)\s*条\s*/\s*(\d+)\s*页", html)
            if m_xy:
                self.total_courses = int(m_xy.group(1))
                self.total_pages = int(m_xy.group(2))

            soup = BeautifulSoup(html, 'html.parser')
            # 兜底：从 Html 内的分页条再尝试一次
            if not m_xy:
                page_info_div = soup.select_one(".div-center-center")
                if page_info_div:
                    txt = page_info_div.get_text(' ', strip=True)
                    m_xy2 = re.search(r"共\s*(\d+)\s*条\s*/\s*(\d+)\s*页", txt)
                    if m_xy2:
                        self.total_courses = int(m_xy2.group(1))
                        self.total_pages = int(m_xy2.group(2))

            # 课程容器选择器与关键元素计数（调试）
            cnt_cards_datacontent = len(soup.select('#datacontent div.card'))
            cnt_cards_all = len(soup.select('div.card'))
            cnt_btn_class = len(soup.select("button[onclick*='LoadCourseGroupClass']"))
            cnt_btn_detail = len(soup.select("[onclick*='LoadCourseGroupDetail']"))
            logging.info(
                f"LoadUCourse: cards(#datacontent)={cnt_cards_datacontent}, cards(all)={cnt_cards_all}, "
                f"class_btn={cnt_btn_class}, detail_btn={cnt_btn_detail}"
            )

            # 解析课程卡片（去重 + 兼容默认已展开，仅取容器内第一个 detail 按钮）
            page_count_this = 0
            if not hasattr(self, '_seen_course_keys'):
                self._seen_course_keys = set()
            class_page = 0
            detail_page = 0

            def parse_id_from_detail(call: str) -> str | None:
                # 从 LoadCourseGroupDetail 调用中提取第一个参数（ID）作为课程键
                qa = re.findall(r"'([^']*)'", call)
                hx = re.findall(r"[0-9A-F]{16,}", call, flags=re.I)
                if qa:
                    return qa[0]
                if len(hx) >= 1:
                    return hx[0]
                return None

            containers = soup.select('#datacontent div.card')
            if not containers:
                logging.warning("  ⚠️ 未匹配到 #datacontent div.card，回退到 div.card")
                containers = soup.select('div.card')

            for container in containers:
                try:
                    title = container.select_one('div.card-header.p-2 > div.card-title')
                    if not title:
                        continue
                    show_name = title.get_text(strip=True)
                    # 先找“学习本课”按钮
                    btn = container.select_one("div[id^='div_groupdetail_'] button[onclick*='LoadCourseGroupClass']") or \
                          container.select_one("button[onclick*='LoadCourseGroupClass']")
                    if btn:
                        onclick = btn.get('onclick', '')
                        m = re.search(r"LoadCourseGroupClass\('([^']+)'\s*,\s*'([^']*)'\s*,\s*'[^']*'\s*,\s*'([^']*)'\s*,\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)", onclick)
                        if not m:
                            continue
                        cid, usestate, cname, wordcount, courseclassid, studydegreeid = m.groups()
                        from urllib.parse import unquote
                        display_name = show_name or unquote(cname)
                        key = f"class:{cid}"
                        if key in self._seen_course_keys:
                            continue
                        self._seen_course_keys.add(key)
                        total = int(wordcount) if wordcount.isdigit() else None
                        courses.append({
                            'mode': 'class',
                            'name': display_name,
                            'course_name': cname,
                            'id': cid,
                            'usestate': usestate,
                            'word_total': total,
                            'courseclassid': courseclassid,
                            'studydegreeid': studydegreeid,
                        })
                        page_count_this += 1
                        class_page += 1
                        continue
                    # 没有“学习本课”，但容器内已出现“显示明细”onclick（首个默认展开场景）
                    detail_el = container.select_one("[onclick*='LoadCourseGroupDetail']")
                    if detail_el:
                        onclick_detail = detail_el.get('onclick', '')
                        cid2 = parse_id_from_detail(onclick_detail) or show_name
                        key = f"detail:{cid2}"
                        if key in self._seen_course_keys:
                            continue
                        self._seen_course_keys.add(key)
                        courses.append({
                            'mode': 'detail',
                            'name': show_name,
                            'course_name': show_name,
                            'detail_onclick': onclick_detail,
                            'id': cid2 or '',
                            'usestate': '071F06C7DB04AB16',
                            'word_total': None,
                            'courseclassid': '1',
                            'studydegreeid': '2',
                        })
                        page_count_this += 1
                        detail_page += 1
                except Exception:
                    continue

            # 本页统计日志，含模式细分
            logging.info(f"📄 第 {current_page}/{self.total_pages} 页，解析到 {page_count_this} 条（class={class_page}, detail={detail_page}），本次累计 {len(courses)} 条")
            current_page += 1

        # 汇总校验
        if self.total_courses:
            if len(courses) != self.total_courses:
                logging.warning(
                    f"  ⚠️ 课程总量与分页不一致：分页共 {self.total_courses} 条/{self.total_pages} 页，实际解析 {len(courses)} 条；"
                    f"差异={len(courses) - self.total_courses}，class累计={class_total}, detail累计={detail_total}"
                )
            else:
                logging.info(f"📚 课程总数：{self.total_courses} 条；总页数：{self.total_pages}（每页 {self.page_size} 条）；class累计={class_total}, detail累计={detail_total}")
        else:
            logging.info(f"📚 课程总数：{len(courses)} 条（分页条缺失或为空，按解析结果为准）；class累计={class_total}, detail累计={detail_total}")
        return courses

    # ---------- 第1步：LoadCourseGroupClass ----------
    def load_course_group_class(self, course: Dict[str, Any]) -> str:
        data = {
            'ID': course['id'],
            'UseState': course['usestate'],
            'BuyHtml': '',
            'CourseName': urlp.quote(course['course_name'], safe=''),
            'WordCount': str(course['word_total'] or ''),
            'CourseClassID': str(course['courseclassid']),
            'StudyDegreeID': str(course['studydegreeid']),
            'IsDIY': 'false',
            'WordBaseKind': '0',
            'IsDefCourse': '0',
            'CommonJsonPara': json.dumps([{
                'IsMobile': False,
                'IsTablet': False,
                'ScreenWidth': 1512,
                'ScreenHeight': 701,
                'WindowScreenHeight': 701,
                'IsIOS': False,
                'ReturnUrl': UCOURSE_URL,
                'CurrentUrl': UCOURSE_URL,
                'IsInWeixin': False,
                'IP': '',
                'Location': '{"region":"","city":"","country":""}'
            }], separators=(',', ':'), ensure_ascii=False),
            'IsMobile': 'false',
            'IsRandomGet': '1',
            'FuncSwitch': '0',
            'QuesByDef': '0',
            'PageJs': 'LoadCourseGroupClass',
            'processFlag': 'LoadCourseGroupClass'
        }
        res = self.session.post(HANDLE_URL, headers=self._headers(UCOURSE_URL), data=data, timeout=6000, verify=False)
        res.encoding = 'utf-8'
        html = res.text  # 该接口返回值即为纯 HTML
        logging.info(f"  📥 LoadCourseGroupClass 返回 HTML 长度={len(html)}")
        if len(html) < 1000:
            logging.warning("  ⚠️ LoadCourseGroupClass 返回异常短，完整HTML如下：\n" + html)
        try:
            os.makedirs('tmp_api_html', exist_ok=True)
            with open(os.path.join('tmp_api_html', f"class_{course['id']}.html"), 'w', encoding='utf-8') as f:
                f.write(html)
        except Exception:
            pass
        return html



    # ---------- 第2步：LoadCourseGroupDetail ----------
    def load_course_group_detail(self, class_html: str, course: Dict[str, Any]) -> str:
        # 从 Html 或已提供的 onclick 参数解析“显示明细”的调用（多策略）
        onclick_calls = []
        if class_html:
            soup = BeautifulSoup(class_html, 'html.parser')
            for el in soup.select("[onclick*='LoadCourseGroupDetail']"):
                oc = el.get('onclick', '')
                if oc:
                    onclick_calls.append(oc)
            if not onclick_calls:
                m_call = re.findall(r"LoadCourseGroupDetail\([^)]*\)", class_html)
                onclick_calls.extend(m_call or [])
        # 若课程对象自带 detail_onclick（默认展开场景），拼入首位
        if course.get('mode') == 'detail' and course.get('detail_onclick'):
            onclick_calls.insert(0, course['detail_onclick'])
        if not onclick_calls:
            logging.warning("  ⚠️ 未找到 LoadCourseGroupDetail 按钮/参数，当前页面所有[onclick]如下：")
            # 打印页面中所有 onclick 以便分析
            try:
                soup_all = BeautifulSoup(class_html, 'html.parser')
                all_oc = [el.get('onclick','') for el in soup_all.select('[onclick]') if el.get('onclick')]
                for i, oc in enumerate(all_oc[:50], 1):
                    logging.info(f"    onclick[{i}]: {oc}")
                if len(all_oc) > 50:
                    logging.info(f"    ...共 {len(all_oc)} 个 onclick（已截断）")
            except Exception:
                pass
            return ''

        def parse_id_gid(call: str) -> tuple[str, str] | tuple[None, None]:
            quoted_args = re.findall(r"'([^']*)'", call)
            hex_args = re.findall(r"[0-9A-F]{16,}", call, flags=re.I)
            if len(quoted_args) >= 3:
                id0 = quoted_args[0]
                gid = hex_args[1] if len(hex_args) >= 2 else quoted_args[2]
                return id0, gid
            if len(hex_args) >= 2:
                return hex_args[0], hex_args[1]
            return None, None

        # 逐个尝试，直到能解析出目录入口（包含 GotoUrl('ubrowse.aspx')）
        for call in onclick_calls:
            id0, gid = parse_id_gid(call)
            if not id0 or not gid:
                continue
            data = {
                'ID': id0,
                'IsDiy': '0',
                'GroupClassID': gid,
                'UseState': course['usestate'],
                'BuyHtml': '',
                'CourseName': urlp.quote(course['course_name'], safe=''),
                'CourseClassID': str(course['courseclassid']),
                'StudyDegreeID': str(course['studydegreeid']),
                'CourseGroupShowMethod': '0',
                'IsDefCourse': '0',
                'FuncSwitch': '0',
                'QuesByDef': '0',
                'IsRandomGet': '1',
                'WordBaseKind': '0',
                'CommonJsonPara': json.dumps([{
                    'IsMobile': False,
                    'IsTablet': False,
                    'ScreenWidth': 1512,
                    'ScreenHeight': 701,
                    'WindowScreenHeight': 701,
                    'IsIOS': False,
                    'ReturnUrl': UCOURSE_URL,
                    'CurrentUrl': UCOURSE_URL,
                    'IsInWeixin': False,
                    'IP': '',
                    'Location': '{"region":"","city":"","country":""}'
                }], separators=(',', ':'), ensure_ascii=False),
                'ReturnUrl': UCOURSE_URL,
                'IsMobile': 'false',
                'PageJs': 'LoadCourseGroupDetail',
                'processFlag': 'LoadCourseGroupDetail'
            }
            res = self.session.post(HANDLE_URL, headers=self._headers(UCOURSE_URL), data=data, timeout=6000, verify=False)
            res.encoding = 'utf-8'
            html = res.text
            # 快速校验是否含有目录入口
            if "GotoUrl('ubrowse.aspx" in html:
                logging.info(f"  📥 LoadCourseGroupDetail 成功（GroupClassID={gid}）HTML 长度={len(html)}")
                try:
                    os.makedirs('tmp_api_html', exist_ok=True)
                    with open(os.path.join('tmp_api_html', f"detail_{course['id']}_{gid}.html"), 'w', encoding='utf-8') as f:
                        f.write(html)
                except Exception:
                    pass
                return html
            else:
                # 调试：若疑似登录页/会话过期，打印前500字符
                if self._is_login_or_fallback_html(html):
                    logging.warning("  ⚠️ LoadCourseGroupDetail 疑似会话失效/登录页，返回前500字符：\n" + html[:500])
                logging.info(f"  ⏭️  GroupClassID={gid} 返回不含目录入口，尝试下一个")

        logging.warning("  ⚠️ 所有 LoadCourseGroupDetail 调用均未返回目录入口")
        return ''

    # ---------- 第3步：解析目录 GotoUrl ----------
    def extract_directories_from_html(self, detail_html: str) -> List[Dict[str, str]]:
        dirs: List[Dict[str, str]] = []
        if not detail_html:
            return dirs
        soup = BeautifulSoup(detail_html, 'html.parser')
        # 点击小眼睛的 i 元素（浏览播放）
        for i_tag in soup.select("i[title='浏览播放'][onclick*='GotoUrl']"):
            onclick = i_tag.get('onclick', '')
            m = re.search(r"GotoUrl\('([^']+)'\)", onclick)
            if not m:
                continue
            url = f"{BASE}/a/{m.group(1)}"

            # 目录名称与“共 xx”：优先 info-box，再退回 card-title
            name = ''
            expected_total = None

            info_box = i_tag.find_parent(class_='info-box')
            if info_box:
                icon = info_box.select_one('.info-box-icon')
                if icon:
                    name = icon.get_text(strip=True)
                desc = info_box.select_one('.info-box-content .progress-description')
                if desc:
                    mcount = re.search(r"共\s*([0-9]+)", desc.get_text(strip=True))
                    if mcount:
                        expected_total = int(mcount.group(1))

            card = i_tag.find_parent('div', class_='card')
            if card and expected_total is None:
                # 从卡片标题中解析“共 xx”
                card_title = card.select_one('div.card-title')
                if card_title:
                    title_text = card_title.get_text(' ', strip=True)
                    mcount2 = re.search(r"共\s*([0-9]+)", title_text)
                    if mcount2:
                        expected_total = int(mcount2.group(1))
                # 同时尽量拿目录名（b 标签）
                if not name and card_title:
                    bt = card_title.find('b')
                    if bt:
                        name = bt.get_text(strip=True)

            if not name:
                name = '未命名目录'

            # 必须拿到 expected_total，否则视为异常，跳过该目录
            if expected_total is None:
                logging.warning(f"  ⚠️ 目录[{name}] 未能解析到‘共 xx’，跳过（URL: {url}）")
                continue

            entry = {'name': name, 'browse_url': url, 'expected_total': expected_total}
            dirs.append(entry)
        logging.info(f"  📂 解析到 {len(dirs)} 个目录入口（均包含总数）")
        return dirs

    # ---------- 第4步：通过 POST 拉单词（复用 test.py 逻辑） ----------
    def fetch_words_by_post(self, browse_url: str, course_name: str, directory_name: str, expected_total: int | None = None) -> List[Dict[str, Any]]:
        words: List[Dict[str, Any]] = []
        # 按要求：不去重，直接以服务端返回数量为准
        # 解析 URL 参数
        from urllib.parse import urlparse, parse_qs
        pu = urlparse(browse_url)
        q = parse_qs(pu.query)
        course_id = (q.get('courseid') or [''])[0]
        group_id = (q.get('groupid') or [''])[0]
        course_group_id = (q.get('coursegroupid') or [''])[0]
        course_class_id = (q.get('courseclassid') or [''])[0]
        group_class_id = (q.get('groupclassid') or [''])[0]
        study_degree_id = (q.get('studydegreeid') or [''])[0] or '2'
        memory_type = (q.get('memorytype') or [''])[0] or '0'
        grasp_degree = (q.get('graspdegree') or [''])[0] or '6'
        order_method = (q.get('ordermethod') or [''])[0] or '0'
        word_base_kind = (q.get('wordbasekind') or [''])[0] or '0'
        vocab_group_type = (q.get('vocabgrouptype') or [''])[0] or '0'
        # 参数验证
        required = {
            'courseid': course_id,
            'groupid': group_id,
            'coursegroupid': course_group_id,
            'courseclassid': course_class_id,
        }
        missing = [k for k, v in required.items() if not v]
        if missing:
            logging.error(f"    ❌ 目录URL缺少必要参数: {missing}，URL={browse_url}")
            return words

        # 组装请求体（双重编码）——严格使用 URL 中提取的原始参数
        params_fore_obj = [{
            "IsBackLook": False,
            "UserID": "",
            "CourseID": course_id,
            "GroupID": group_id,
            "CourseGroupID": course_group_id,
            "ToStudyCourseGroupIDs": course_group_id,
            "GroupClassID": group_class_id or "100",
            "VocabGroupType": vocab_group_type,
            "CourseName": urlp.quote(course_name, safe=''),
            "GroupName": urlp.quote(directory_name, safe=''),
            "IsDIY": False,
            "IsDefCourse": 0,
            "StudyDegreeID": study_degree_id,
            "CourseClassID": course_class_id,
            "WordBaseKind": word_base_kind,
            "StartTime": "",
            "EndTime": "",
            "MemoryType": memory_type,
            "ShowMethod": False,
            "SearchWord": "",
            "GraspDegree": grasp_degree,
            "OrderMethod": order_method
        }]
        common_json_obj = [{
            "IsMobile": False,
            "IsTablet": False,
            "ScreenWidth": 1512,
            "ScreenHeight": 291,
            "WindowScreenHeight": 291,
            "IsIOS": False,
            "ReturnUrl": urlp.quote(browse_url, safe=''),
            "CurrentUrl": urlp.quote(browse_url, safe=''),
            "IsInWeixin": False,
            "IP": "",
            "Location": '{"region":"","city":"","country":""}'
        }]
        page_info_obj = [{
            "IFlag": 72,
            "CurrentPage": 1,
            "PageSize": 100,
            "OrderByDefault": "",
            "StrCondition": "",
            "TotalCount": -1,
            "ShowInDiv": "",
            "ShowInDivPaging": "",
            "IsPreLoad": 0,
            "ParamsForeJs": "IParamForeUBrowse",
            "LoadHtmlSuccessJs": "ILoadHtmlSuccessUBrowse",
            "IsAppendHtml": False,
            "PageJsCommon": "CommonLoadHtml"
        }]

        def enc(v: Any) -> str:
            return urlp.quote(urlp.quote(json.dumps(v, separators=(',', ':'), ensure_ascii=False), safe=''), safe='')

        headers = self._headers(browse_url)
        total_pages = 1
        cumulative = 0

        # 先请求第1页
        data = (
            'ParamsBack=&'
            f'ParamsFore={enc(params_fore_obj)}&'
            'ParamOrderByList=&'
            'ParamsOther=&'
            f'CommonJsonPara={urlp.quote(json.dumps(common_json_obj, separators=(",", ":"), ensure_ascii=False), safe="")}&'
            f'ParamsPageInfo={enc(page_info_obj)}&'
            'PageJs=GetBrowseRecord&'
            'processFlag=GetBrowseRecord'
        )
        r = self.session.post(HANDLE_URL, headers=headers, data=data.encode('utf-8'), timeout=6000, verify=False)
        r.encoding = 'utf-8'
        parsed = self._double_json_loads(r.text)
        paging_html = parsed.get('PagingHtml', '')
        m = re.search(r'/\s*(\d+)页', paging_html)
        if m:
            total_pages = int(m.group(1))
        page_added = 0
        html_arr = parsed.get('Html')
        if html_arr:
            items = json.loads(html_arr)
            for it in items:
                enc_html = it.get('Html')
                if not enc_html:
                    continue
                dec = decrypt_nenver(enc_html)
                wd = get_html_content(dec)
                if not wd:
                    continue
                wd['popup_html'] = dec
                words.append(wd)
                page_added += 1
        cumulative += page_added
        logging.info(f"      - 请求第1/{total_pages}页，返回{page_added}条记录；累计已获取{cumulative}条记录/期望{expected_total if expected_total is not None else '?'}")

        # 后续页
        for p in range(2, total_pages + 1):
            page_info_obj[0]['CurrentPage'] = p
            data = (
                'ParamsBack=&'
                f'ParamsFore={enc(params_fore_obj)}&'
                'ParamOrderByList=&'
                'ParamsOther=&'
                f'CommonJsonPara={urlp.quote(json.dumps(common_json_obj, separators=(",", ":"), ensure_ascii=False), safe="")}&'
                f'ParamsPageInfo={enc(page_info_obj)}&'
                'PageJs=GetBrowseRecord&'
                'processFlag=GetBrowseRecord'
            )

            # 增加重试机制处理网络异常
            max_retries = 3
            retry_count = 0
            while retry_count < max_retries:
                try:
                    r = self.session.post(HANDLE_URL, headers=headers, data=data.encode('utf-8'), timeout=6000, verify=False)
                    r.encoding = 'utf-8'
                    parsed = self._double_json_loads(r.text)
                    break  # 成功则跳出重试循环
                except requests.exceptions.ChunkedEncodingError as e:
                    retry_count += 1
                    logging.warning(f"      ⚠️ 第{p}页请求失败 (尝试 {retry_count}/{max_retries}): {e}")
                    if retry_count >= max_retries:
                        raise  # 超过最大重试次数则抛出异常
                    time.sleep(1)  # 等待1秒后重试
                except Exception as e:
                    # 处理其他可能的异常
                    logging.error(f"      ⚠️ 第{p}页请求出现未知错误: {e}")
                    raise

            html_arr = parsed.get('Html')
            page_added = 0
            if html_arr:
                items = json.loads(html_arr)
                for it in items:
                    enc_html = it.get('Html')
                    if not enc_html:
                        continue
                    dec = decrypt_nenver(enc_html)
                    wd = get_html_content(dec)
                    if not wd:
                        continue
                    wd['popup_html'] = dec
                    words.append(wd)
                    page_added += 1
            cumulative += page_added
            logging.info(f"      - 请求第{p}/{total_pages}页，返回{page_added}条记录；累计已获取{cumulative}条记录/期望{expected_total if expected_total is not None else '?'}")
            time.sleep(0.2)

        logging.info(f"    ✅ 目录词汇获取完成：{len(words)} 个")
        # 不再在此处截断；由调用方根据 expected_total 决定是否重试
        return words

    # ---------- 保存 ----------
    def save_course(self, course_name: str, directories_data: List[Dict[str, Any]]):
        safe = re.sub(r'[<>:"/\\|?*]', '_', course_name)
        json_path = os.path.join(self.output_dir, f"{safe}.json")
        skip_path = os.path.join(self._skip_dir, f"{safe}.skip")

        total_words = sum(len(d.get('words', [])) for d in directories_data)
        total_dirs = len(directories_data)
        # 如果所有目录都没有获取到任何单词，则不保存 JSON，仅写 skip 标记
        if total_words == 0:
            try:
                with open(skip_path, 'w', encoding='utf-8') as sf:
                    sf.write(time.strftime('%Y-%m-%d %H:%M:%S'))
            except Exception:
                pass
            self._processed.add(safe)
            logging.warning(f"⚠️ 课程 [{course_name}] 未获取到任何单词，跳过保存文件")
            return

        payload = {
            'schema_version': '2.0-nested',
            'schema': 'course -> directories[] -> words[]',
            'course_name': course_name,
            'total_directories': total_dirs,
            'total_words': total_words,
            'directories': directories_data,
            'crawl_time': time.strftime('%Y-%m-%d %H:%M:%S')
        }
        with open(json_path, 'w', encoding='utf-8') as f:
            json.dump(payload, f, ensure_ascii=False, indent=2)
        logging.info(f"💾 保存（schema=2.0-nested）：{json_path}")
        # 同时写 processed 集合与 skip 标记删除（如存在）
        self._processed.add(safe)
        try:
            if os.path.exists(skip_path):
                os.remove(skip_path)
        except Exception:
            pass

    # ---------- 主流程（批量爬取，已爬过跳过） ----------
    def run(self):
        # 1) 登录 + 种 cookie
        self.session = login()
        self._seed_cookies()
        # 2) 取课程（记录总页数/总条数）
        courses = self.fetch_courses(page_size=100)
        logging.info(f"📚 一共有 {self.total_courses} 门课程，共 {self.total_pages} 页（每页 {self.page_size} 条）")

        # 3) 逐门课程处理（如果 output 已存在对应 JSON 则跳过）
        for idx, course in enumerate(courses, start=1):
            safe = re.sub(r'[<>:"/\\|?*]', '_', course['name'])
            out_path = os.path.join(self.output_dir, f"{safe}.json")
            skip_path = os.path.join(self._skip_dir, f"{safe}.skip")
            if os.path.exists(out_path) or os.path.exists(skip_path) or (safe in self._processed):
                logging.info(f"⏭️  跳过（已处理或已存在）：{course['name']}")
                continue

            logging.info(f"🔄 处理第 {idx}/{len(courses)} 门课程：{course['name']}")
            # LoadCourseGroupClass 或直接 LoadCourseGroupDetail（默认展开首册）
            if course.get('mode') == 'detail':
                html1 = ''  # 无需 class
                html2 = self.load_course_group_detail('', course)
            else:
                html1 = self.load_course_group_class(course)
                html2 = self.load_course_group_detail(html1, course)

            # 如果疑似会话过期/返回登录页，尝试重新登录一次并重试加载
            if self._is_login_or_fallback_html(html2 or '') or (not html2 and self._is_login_or_fallback_html(html1 or '')):
                logging.warning("  ⚠️ 可能会话过期/返回登录页，尝试重新登录并重试加载课程目录")
                self._log_session_cookies("重登前")
                self._relogin_and_seed()
                self._log_session_cookies("重登后")
                if course.get('mode') == 'detail':
                    html1 = ''
                    html2 = self.load_course_group_detail('', course)
                else:
                    html1 = self.load_course_group_class(course)
                    html2 = self.load_course_group_detail(html1, course)

            # 提取目录并逐个拉词
            directories = self.extract_directories_from_html(html2 or html1)
            if not directories:
                logging.warning(f"  ⚠️ 未解析到目录入口：{course['name']}")
                # 也写一个空结构，避免重复尝试
                self.save_course(course['name'], [])
                continue
            data = []
            for d in directories:
                expected = d.get('expected_total')
                logging.info(f"  📁 目录：{d['name']}（页面共 {expected if expected is not None else '?'} 个）")

                # 首次抓取
                words = self.fetch_words_by_post(d['browse_url'], course['name'], d['name'], expected_total=None)

                # 校验与重试：数量不一致时，重新解析 onclick -> 重新发起请求
                if expected is not None and len(words) != expected:
                    logging.warning(f"  ⚠️ 目录[{d['name']}] 数量不一致：页面{expected}，抓到{len(words)}，准备重试")
                    # 重置：不带上一次任何缓存
                    words = []
                    # 通过 reload 细化：重新获取 detail_html 并重新提取目录列表，找出对应目录的最新 browse_url
                    html2_retry = self.load_course_group_detail(html1, course)
                    dirs_retry = self.extract_directories_from_html(html2_retry or html1)
                    # 按名称匹配同一目录（若名称重复则取第一个）
                    target = next((x for x in dirs_retry if x.get('name') == d['name']), None)
                    if target:
                        words = self.fetch_words_by_post(target['browse_url'], course['name'], target['name'], expected_total=None)
                        logging.info(f"  🔁 重试完成：抓到 {len(words)} 条")
                    else:
                        logging.warning("  ⚠️ 重试时未找到同名目录，跳过")

                # 去重（按单词名）
                dedup = []
                seen = set()
                for w in words:
                    k = w.get('word_name')
                    if not k or k in seen:
                        continue
                    seen.add(k)
                    dedup.append(w)
                words = dedup

                data.append({'directory_name': d['name'], 'expected_total': expected, 'word_count': len(words), 'words': words})
                # 即时保存
                self.save_course(course['name'], data)


def main():
    crawler = APICrawler()
    crawler.run()


if __name__ == '__main__':
    main()