import json
import os
import time
import logging
import requests
import re
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from urllib.parse import urlparse, parse_qs, unquote, quote_plus, quote
from pa import login, get_html_content
from crypto import decrypt_nenver

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')


class EnglishCourseCrawler:
    def __init__(self):
        self.output_dir = "output"
        self.ensure_output_dir()
        self.driver = None
        self.session = None

    def ensure_output_dir(self):
        """确保输出目录存在"""
        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir)

    def setup_driver_with_login(self):
        """设置浏览器驱动并登录"""
        try:
            # 先通过pa.py登录获取session
            logging.info("正在登录...")
            self.session = login()

            # 提取cookies
            cookies = {}
            for cookie in self.session.cookies:
                cookies[cookie.name] = cookie.value
                logging.info(f"获取到cookie: {cookie.name}")

            # 设置Chrome驱动
            options = Options()
            # options.add_argument('--headless')  # 调试时可以注释掉
            options.add_argument(
                'user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36')
            options.add_argument('--no-sandbox')
            options.add_argument('--disable-dev-shm-usage')
            options.add_argument('--disable-gpu')

            # 使用系统默认的chromedriver
            self.driver = webdriver.Chrome(service=Service(), options=options)

            # 访问les登录页面
            self.driver.get("https://les.znjiyi.com/a/Login.aspx")
            time.sleep(2)

            # 添加cookies
            for name, value in cookies.items():
                try:
                    cookie_data = {"name": name, "value": value}
                    if name in ['Lenwi_ZNJY_SessionId', 'HasGetTopMenu', 'HasGetSliderMenu']:
                        cookie_data["domain"] = ".znjiyi.com"
                    self.driver.add_cookie(cookie_data)
                    logging.info(f"成功添加cookie: {name}")
                except Exception as e:
                    logging.warning(f"添加cookie {name} 失败: {e}")

            return True

        except Exception as e:
            logging.error(f"设置驱动和登录失败: {e}")
            return False

    def access_course_page(self):
        """访问课程页面并设置每页100条"""
        try:
            # 访问课程页面
            logging.info("正在访问课程页面...")
            self.driver.get("https://les.znjiyi.com/a/ucourse.aspx")
            time.sleep(5)

            # 检查是否被重定向到登录页面
            current_url = self.driver.current_url
            if "Login.aspx" in current_url:
                logging.error(f"被重定向到登录页面: {current_url}")
                return False

            logging.info(f"成功访问课程页面: {current_url}")

            # 设置每页显示100条
            self.set_page_size_to_100()

            return True

        except Exception as e:
            logging.error(f"访问课程页面失败: {e}")
            return False

    def set_page_size_to_100(self):
        """设置每页显示100条"""
        try:
            # 查找页面大小选择器
            page_size_selectors = [
                'select[onchange*="PageSize"]',
                'select[name*="pagesize"]',
                'select[id*="pagesize"]',
                '.form-control'
            ]

            for selector in page_size_selectors:
                try:
                    select_elements = self.driver.find_elements(By.CSS_SELECTOR, selector)
                    for select_elem in select_elements:
                        # 检查是否是页面大小选择器
                        options = select_elem.find_elements(By.TAG_NAME, 'option')
                        option_values = [opt.get_attribute('value') for opt in options]

                        if '100' in option_values:
                            # 选择100条每页
                            for option in options:
                                if option.get_attribute('value') == '100':
                                    option.click()
                                    time.sleep(3)
                                    logging.info("✅ 已设置每页显示100条")
                                    return True
                except:
                    continue

            logging.warning("⚠️  未找到页面大小选择器，使用默认设置")
            return False

        except Exception as e:
            logging.error(f"设置页面大小时出错: {e}")
            return False

    def get_total_course_pages(self):
        """获取课程列表总页数"""
        try:
            # 查找分页信息 - 格式如："条，共110条/11页"
            page_elements = self.driver.find_elements(By.XPATH,
                                                      '//div[contains(text(), "页") or contains(text(), "/")]')
            for elem in page_elements:
                text = elem.text
                # 先尝试匹配 "条，共110条/11页" 格式
                match = re.search(r'/(\d+)页', text)
                if match:
                    return int(match.group(1))
                # 备用匹配 "共X页" 格式
                match = re.search(r'共\s*(\d+)\s*页', text)
                if match:
                    return int(match.group(1))

            # 如果没找到，查找分页按钮
            page_buttons = self.driver.find_elements(By.CSS_SELECTOR, '.pagination .page-item')
            if page_buttons:
                # 获取最后一个数字按钮
                for btn in reversed(page_buttons):
                    text = btn.text.strip()
                    if text.isdigit():
                        return int(text)
        except:
            pass
        return 1

    def goto_course_page(self, page_num):
        """跳转到指定课程页面"""
        try:
            # 查找页码按钮
            page_buttons = self.driver.find_elements(By.XPATH,
                                                     f'//a[contains(@class, "page-link") and text()="{page_num}"]')
            if page_buttons:
                ActionChains(self.driver).move_to_element(page_buttons[0]).click().perform()
                time.sleep(3)
                return True

            # 如果没有直接的页码按钮，尝试下一页按钮
            for _ in range(page_num - 1):
                next_buttons = self.driver.find_elements(By.XPATH,
                                                         '//a[contains(@class, "page-link") and contains(text(), "下一页")]')
                if next_buttons:
                    ActionChains(self.driver).move_to_element(next_buttons[0]).click().perform()
                    time.sleep(2)
                else:
                    break
            return True
        except:
            return False

    def get_courses_on_current_page(self):
        """获取当前页面的所有课程"""
        courses = []

        try:
            # 查找所有课程卡片 - 只查找主课程标题，不包括子分组
            course_cards = self.driver.find_elements(By.XPATH,
                                                     '//div[@class="card-header p-2"]/div[@class="card-title" and not(ancestor::div[@id="nested"])]')

            for i, card in enumerate(course_cards):
                try:
                    course_name = card.text.strip()
                    # 过滤掉子分组标题
                    if course_name and len(course_name) > 2 and not any(
                            x in course_name for x in ['常规分组', '按目录分组学习', '按字母分组学习']):
                        # 查找对应的卡片容器
                        card_container = card.find_element(By.XPATH, './ancestor::div[contains(@class, "card")]')

                        course_info = {
                            'name': course_name,
                            'index': i,
                            'card_element': card,
                            'card_container': card_container
                        }
                        courses.append(course_info)
                        logging.info(f"  📚 发现课程: {course_name}")
                except:
                    continue

        except Exception as e:
            logging.error(f"获取当前页课程时出错: {e}")

        return courses

    def expand_course_if_collapsed(self, course):
        """展开课程（如果是折叠状态）"""
        try:
            # 使用课程容器查找展开按钮
            card_container = course['card_container']

            # 查找展开按钮（fa-plus表示折叠状态）
            expand_buttons = card_container.find_elements(By.CSS_SELECTOR,
                                                          'button[data-card-widget="collapse"] i.fa-plus')

            if expand_buttons:
                logging.info(f"  🔽 展开课程: {course['name']}")
                expand_button = expand_buttons[0].find_element(By.XPATH, "./parent::button")
                self.driver.execute_script("arguments[0].scrollIntoView({block: 'center'});", expand_button)
                time.sleep(1)
                ActionChains(self.driver).move_to_element(expand_button).click().perform()
                time.sleep(3)
                return True
            else:
                logging.info(f"  ✅ 课程 {course['name']} 已展开")
                return False

        except Exception as e:
            logging.warning(f"展开课程时出错: {e}")
            return False

    def enter_course_study(self, course, was_expanded=False):
        """进入课程学习"""
        try:
            # 在当前课程的容器内查找"学习本课"按钮
            card_container = course['card_container']
            study_buttons = card_container.find_elements(By.XPATH,
                                                         './/button[contains(@onclick, "LoadCourseGroupClass") and contains(text(), "学习本课")]')

            if study_buttons:
                logging.info(f"  📖 点击学习本课: {course['name']}")
                btn = study_buttons[0]
                self.driver.execute_script("arguments[0].scrollIntoView({block: 'center'});", btn)
                time.sleep(1)
                ActionChains(self.driver).move_to_element(btn).click().perform()
                time.sleep(3)

                # 点击"学习本课"后，需要点击"按目录分组学习"下的"显示明细"
                return self.click_directory_group_detail()

            # 如果没有找到"学习本课"按钮，说明课程已经展开，直接点击"显示明细"
            if not was_expanded:
                logging.info(f"  🔍 查找显示明细按钮...")
            return self.click_directory_group_detail()

        except Exception as e:
            logging.error(f"进入课程学习时出错: {e}")
            return False

    def click_directory_group_detail(self):
        """点击'按目录分组学习'下的'显示明细'按钮"""
        try:
            # 方法1: 查找"按目录分组学习"标题
            directory_group_titles = self.driver.find_elements(By.XPATH,
                                                               '//div[@class="card-title" and contains(text(), "按目录分组学习")]')

            for title in directory_group_titles:
                try:
                    # 查找该标题下的"显示明细"按钮
                    card_container = title.find_element(By.XPATH, './ancestor::div[contains(@class, "card")]')
                    detail_buttons = card_container.find_elements(By.XPATH,
                                                                  './/button[contains(@onclick, "LoadCourseGroupDetail") and contains(text(), "显示明细")]')

                    if detail_buttons:
                        logging.info(f"  🔍 点击按目录分组学习的显示明细按钮")
                        btn = detail_buttons[0]
                        self.driver.execute_script("arguments[0].scrollIntoView({block: 'center'});", btn)
                        time.sleep(1)
                        ActionChains(self.driver).move_to_element(btn).click().perform()
                        time.sleep(3)
                        return True
                except:
                    continue

            # 方法2: 如果没找到"按目录分组学习"，直接查找所有"显示明细"按钮
            logging.info("  🔍 未找到'按目录分组学习'，查找所有显示明细按钮...")
            detail_buttons = self.driver.find_elements(By.XPATH,
                                                       '//button[contains(@onclick, "LoadCourseGroupDetail") and contains(text(), "显示明细")]')

            for btn in detail_buttons:
                try:
                    logging.info(f"  🔍 尝试点击显示明细按钮...")
                    self.driver.execute_script("arguments[0].scrollIntoView({block: 'center'});", btn)
                    time.sleep(1)
                    ActionChains(self.driver).move_to_element(btn).click().perform()
                    time.sleep(3)

                    # 检查是否成功展开了内容
                    nested_elements = self.driver.find_elements(By.ID, 'nested')
                    if nested_elements:
                        logging.info(f"  ✅ 成功点击显示明细按钮")
                        return True

                    # 如果没有nested元素，等待一下再检查
                    time.sleep(2)
                    nested_elements = self.driver.find_elements(By.ID, 'nested')
                    if nested_elements:
                        logging.info(f"  ✅ 成功点击显示明细按钮（延迟检测）")
                        return True
                except Exception as e:
                    logging.warning(f"  ⚠️  点击显示明细按钮失败: {e}")
                    continue

            logging.warning("  ⚠️  未找到可用的显示明细按钮")
            return False

        except Exception as e:
            logging.error(f"点击目录分组详情时出错: {e}")
            return False

    def get_course_directories(self, course):
        """获取课程目录"""
        directories = []

        try:
            # 等待nested元素出现
            try:
                WebDriverWait(self.driver, 10).until(
                    EC.presence_of_element_located((By.ID, 'nested'))
                )
            except:
                logging.warning("  ⚠️  未找到nested元素")
                return directories

            # 查找id="nested"下的目录信息
            nested_element = self.driver.find_element(By.ID, 'nested')

            # 查找所有"浏览播放"按钮
            browse_buttons = nested_element.find_elements(By.CSS_SELECTOR, 'i[title="浏览播放"][onclick*="GotoUrl"]')

            for i, browse_btn in enumerate(browse_buttons):
                try:
                    onclick = browse_btn.get_attribute('onclick')
                    if onclick and 'ubrowse.aspx' in onclick:
                        # 提取URL
                        url_match = re.search(r"GotoUrl\('([^']+)'\)", onclick)
                        if url_match:
                            browse_url = url_match.group(1)

                            # 尝试获取目录名称
                            dir_name = self.extract_directory_name(browse_btn, i)

                            directory_info = {
                                'name': dir_name,
                                'browse_url': f"https://les.znjiyi.com/a/{browse_url}",
                                'element': browse_btn
                            }
                            directories.append(directory_info)
                            logging.info(f"    📁 找到目录: {dir_name}")
                except:
                    continue

        except Exception as e:
            logging.error(f"获取课程目录时出错: {e}")

        return directories

    def extract_directory_name(self, browse_btn, index):
        """提取目录名称"""
        try:
            # 方法1: 查找同一行的card-title中的b标签
            parent_row = browse_btn.find_element(By.XPATH, "./ancestor::tr")

            # 在当前行查找card-title下的b标签
            try:
                card_title_b = parent_row.find_element(By.XPATH, ".//div[@class='card-title']//b")
                dir_name = card_title_b.text.strip()
                if dir_name and len(dir_name) > 1:
                    return dir_name
            except:
                pass

            # 方法2: 查找前面的card-title（可能在上级元素中）
            try:
                # 向上查找包含card-title的元素
                card_titles = browse_btn.find_elements(By.XPATH, "./ancestor::*//div[@class='card-title']//b")
                for title in card_titles:
                    title_text = title.text.strip()
                    # 过滤掉不相关的标题
                    if title_text and len(title_text) > 1 and not any(
                            x in title_text for x in ['按目录分组学习', '常规分组', '按字母分组']):
                        return title_text
            except:
                pass

            # 方法3: 使用行文本的第一部分
            try:
                row_text = parent_row.text.strip()
                if row_text:
                    # 取第一行作为目录名
                    first_line = row_text.split('\n')[0].strip()
                    if first_line and len(first_line) > 1:
                        return first_line
            except:
                pass

            # 默认名称
            return f"目录{index + 1}"

        except Exception as e:
            logging.warning(f"提取目录名称时出错: {e}")
            return f"目录{index + 1}"

    def crawl_directory_words_by_post(self, directory, course_name):
        """通过POST请求获取目录单词数据"""
        words = []

        try:
            logging.info(f"    🚀 使用POST请求获取单词数据: {directory['browse_url']}")

            # 先访问目录页面建立正确的session状态
            logging.info(f"    🌐 先访问目录页面建立session...")
            self.driver.get(directory['browse_url'])
            time.sleep(3)

            # 解析URL获取参数
            parsed_url = urlparse(directory['browse_url'])
            params = parse_qs(parsed_url.query)

            # 提取必要的参数
            course_id = params.get('courseid', [''])[0]
            group_id = params.get('groupid', [''])[0]
            course_group_id = params.get('coursegroupid', [''])[0]
            course_class_id = params.get('courseclassid', [''])[0]

            if not all([course_id, group_id, course_group_id, course_class_id]):
                logging.warning(f"    ⚠️  URL参数不完整，跳过该目录")
                return words

            # 获取访问页面后的cookies
            cookies = self.driver.get_cookies()

            # 构建请求头（完全模仿浏览器）
            headers = {
                "host": "les.znjiyi.com",
                "sec-ch-ua-platform": '"macOS"',
                "x-requested-with": "XMLHttpRequest",
                "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36",
                "accept": "text/html, */*; q=0.01",
                "sec-ch-ua": '"Not)A;Brand";v="8", "Chromium";v="138", "Google Chrome";v="138"',
                "content-type": "application/x-www-form-urlencoded; charset=UTF-8",
                "sec-ch-ua-mobile": "?0",
                "origin": "https://les.znjiyi.com",
                "sec-fetch-site": "same-origin",
                "sec-fetch-mode": "cors",
                "sec-fetch-dest": "empty",
                "referer": directory['browse_url'],
                "accept-encoding": "gzip, deflate, br, zstd",
                "accept-language": "zh-CN,zh;q=0.9",
                "priority": "u=1, i"
            }

            logging.info(f"    🔍 POST请求参数:")
            logging.info(f"      CourseID: {course_id.lower()}")
            logging.info(f"      GroupID: {group_id.lower()}")
            logging.info(f"      CourseGroupID: {course_group_id.lower()}")
            logging.info(f"      CourseClassID: {course_class_id.lower()}")

            # 解码URL中的中文参数
            parsed_url = urlparse(directory['browse_url'])
            params = parse_qs(parsed_url.query)

            # 解码中文参数
            course_name_decoded = course_name
            directory_name_decoded = directory['name']

            # 尝试从URL参数中获取解码后的名称
            try:
                if 'cname' in params:
                    course_name_decoded = unquote(params['cname'][0], encoding='utf-8')
                if 'gname' in params:
                    directory_name_decoded = unquote(params['gname'][0], encoding='utf-8')
            except:
                pass

            # 构建POST数据（完全模仿浏览器格式）
            params_fore_obj = [{
                "IsBackLook": False,
                "UserID": "",
                "CourseID": course_id.lower(),
                "GroupID": group_id.lower(),
                "CourseGroupID": course_group_id.lower(),
                "ToStudyCourseGroupIDs": course_group_id.lower(),
                "GroupClassID": "100",
                "VocabGroupType": "6",
                "CourseName": course_name_decoded,
                "GroupName": directory_name_decoded,
                "IsDIY": False,
                "IsDefCourse": 0,
                "StudyDegreeID": "4",
                "CourseClassID": course_class_id.lower(),
                "WordBaseKind": 0,
                "StartTime": "",
                "EndTime": "",
                "MemoryType": "0",
                "ShowMethod": False,
                "SearchWord": "",
                "GraspDegree": "6",
                "OrderMethod": "0"
            }]

            common_json_obj = [{
                "IsMobile": False,
                "IsTablet": False,
                "ScreenWidth": 1512,
                "ScreenHeight": 796,
                "WindowScreenHeight": 796,
                "IsIOS": False,
                "ReturnUrl": directory['browse_url'],
                "CurrentUrl": directory['browse_url'],
                "IsInWeixin": False,
                "IP": "183.212.182.43",
                "Location": '{"region":"","city":"","country":""}'
            }]

            page_info_obj = [{
                "IFlag": 72,
                "CurrentPage": 1,
                "PageSize": 100,
                "OrderByDefault": "",
                "StrCondition": "",
                "TotalCount": -1,
                "ShowInDiv": "",
                "ShowInDivPaging": "",
                "IsPreLoad": 0,
                "ParamsForeJs": "IParamForeUBrowse",
                "LoadHtmlSuccessJs": "ILoadHtmlSuccessUBrowse",
                "IsAppendHtml": False,
                "PageJsCommon": "CommonLoadHtml"
            }]

            # 对params_fore_obj和common_json_obj进行URL编码，确保所有字符都被正确编码
            params_fore_encoded = quote(json.dumps(params_fore_obj, separators=(',', ':'), ensure_ascii=False))
            common_json_encoded = quote(json.dumps(common_json_obj, separators=(',', ':'), ensure_ascii=False))
            params_page_encoded = quote(json.dumps(page_info_obj, separators=(',', ':'), ensure_ascii=False))

            # 构建完整的POST数据字符串（完全按照test.py的格式）
            data_string = (
                'ParamsBack=&'
                f'ParamsFore={params_fore_encoded}&'
                'ParamOrderByList=&'
                'ParamsOther=&'
                f'CommonJsonPara={common_json_encoded}&'
                f'ParamsPageInfo={params_page_encoded}&'
                'PageJs=GetBrowseRecord&'
                'processFlag=GetBrowseRecord'
            )

            # 使用已有的session发送请求
            session = self.session

            # 确保session编码正确
            session.encoding = 'utf-8'
            
            # 添加必要的cookies（参考test.py）
            session.cookies.set('ASP.NET_SessionId', 'sycgc4uwu2j1qs5ftuzm2a23')
            session.cookies.set('Lenwi_ZNJY_SessionId', 'e00ee726-fee2-44a5-8a33-85150a23cfff')
            session.cookies.set('last_classroom_enabbr', 'les')
            session.cookies.set('last_classroom_name', '%E4%B9%90%E5%B0%94%E6%80%9D')
            session.cookies.set('HasGetTopMenu', '0')
            session.cookies.set('HasGetSliderMenu', '0')
            session.cookies.set('MMForUStudy2', '101259839')
            session.cookies.set('user_ip_global', '183.212.182.43')
            session.cookies.set('user_location_global', '{"region":"","city":"","country":""}')

            # 发送第一次请求获取总页数
            url = 'https://les.znjiyi.com/a/Handle/ExamLib_Process.aspx'

            try:
                # 打印完整的请求信息
                logging.info(f"    🌐 完整请求URL: {url}")
                logging.info(f"    📋 完整请求头:")
                for key, value in headers.items():
                    logging.info(f"      {key}: {value}")
                logging.info(f"    📦 完整请求体:")
                logging.info(f"      {data_string}")
                
                # 打印分页请求的完整信息
                logging.info(f"        🌐 分页请求URL: {url}")
                logging.info(f"        📋 分页请求头:")
                for key, value in headers.items():
                    logging.info(f"          {key}: {value}")
                logging.info(f"        📦 分页请求体:")
                logging.info(f"          {data_string}")
                
                # 完全按照test.py的方式：手动编码为UTF-8字节流
                response = session.post(url, headers=headers, data=data_string.encode('utf-8'), verify=False, timeout=30)
                response.raise_for_status()

                # 设置响应编码
                response.encoding = 'utf-8'
                
                # 打印分页请求的完整响应信息
                logging.info(f"        📨 分页响应头:")
                for key, value in response.headers.items():
                    logging.info(f"          {key}: {value}")
                logging.info(f"        📄 分页响应内容:")
                logging.info(f"          {response.text}")
                
                # 打印完整的响应信息
                logging.info(f"    📨 完整响应头:")
                for key, value in response.headers.items():
                    logging.info(f"      {key}: {value}")
                logging.info(f"    📄 完整响应内容:")
                logging.info(f"      {response.text}")

                # 检查响应内容
                if response.text.startswith("2025/") or response.text == "WordError":
                    logging.error(f"    ❌ 服务器返回错误: {response.text}")
                    return words

                # 按照pa.py的方式解析第一次响应
                json_data = json.dumps(response.text)
                parsed_data = json.loads(json.loads(json_data))
                paging_html = parsed_data.get('PagingHtml', '')

                # 提取总页数
                total_pages = 1
                pattern = r'共(\d+)条/(\d+)页'
                match = re.search(pattern, paging_html)
                if match:
                    total_num = int(match.group(1))
                    total_pages = int(match.group(2))
                    logging.info(f"    📄 通过POST请求检测到总条数: {total_num}, 总页数: {total_pages}")
                else:
                    pattern = r'/(\d+)页'
                    match = re.search(pattern, paging_html)
                    if match:
                        total_pages = int(match.group(1))
                        logging.info(f"    📄 通过POST请求检测到 {total_pages} 页单词")

                # 遍历所有页面
                for page_num in range(1, total_pages + 1):
                    logging.info(f"        📖 处理第 {page_num}/{total_pages} 页...")

                    # 更新页码
                page_info_obj[0]['CurrentPage'] = page_num
                
                # 重新构建POST数据字符串（完全按照test.py的格式）
                params_page_encoded = quote(json.dumps(page_info_obj, separators=(',', ':'), ensure_ascii=False))
                data_string = (
                    'ParamsBack=&'
                    f'ParamsFore={params_fore_encoded}&'
                    'ParamOrderByList=&'
                    'ParamsOther=&'
                    f'CommonJsonPara={common_json_encoded}&'
                    f'ParamsPageInfo={params_page_encoded}&'
                    'PageJs=GetBrowseRecord&'
                    'processFlag=GetBrowseRecord'
                )

                # 完全按照test.py的方式：手动编码为UTF-8字节流
                response = session.post(url, headers=headers, data=data_string.encode('utf-8'), verify=False, timeout=30)
                response.raise_for_status()

                # 设置响应编码
                response.encoding = 'utf-8'

                # 解析响应
                json_data = json.dumps(response.text)
                parsed_data = json.loads(json.loads(json_data))
                html_obj_arr = parsed_data.get('Html')

                if html_obj_arr:
                    html_items = json.loads(html_obj_arr)
                    logging.info(f"        🔤 找到 {len(html_items)} 个单词")

                    for index, item in enumerate(html_items, start=1):
                        encrypted_html = item.get('Html')
                        if encrypted_html:
                            try:
                                # 按照pa.py第2232行调用解密函数
                                decrypted_content = decrypt_nenver(encrypted_html)
                                # 按照pa.py第2234行解析单词数据
                                word_data = get_html_content(decrypted_content)

                                if word_data:
                                    word_data['popup_html'] = decrypted_content
                                    words.append(word_data)
                                    logging.info(f"        ✅ {word_data.get('word_name', 'Unknown')}")

                            except Exception as e:
                                logging.error(f"        ❌ 解析单词数据时出错: {e}")
                                continue

                # 添加延迟避免请求过快
                time.sleep(0.5)

                logging.info(f"    ✅ POST请求完成，获取 {len(words)} 个单词")
                return words

            except Exception as e:
                logging.error(f"    ❌ POST请求失败: {e}")
                return words

        except Exception as e:
            logging.error(f"爬取目录单词时出错: {e}")
            return words

    def save_course_data(self, course_name, directories_data):
        """保存课程数据"""
        try:
            # 清理文件名
            safe_filename = re.sub(r'[<>:"/\\|?*]', '_', course_name)
            filepath = os.path.join(self.output_dir, f"{safe_filename}.json")

            # 计算总单词数
            total_words = sum(len(dir_data['words']) for dir_data in directories_data)

            # 按照要求的格式保存
            course_data = {
                'course_name': course_name,
                'total_directories': len(directories_data),
                'total_words': total_words,
                'directories': directories_data,
                'crawl_time': time.strftime('%Y-%m-%d %H:%M:%S')
            }

            with open(filepath, 'w', encoding='utf-8') as f:
                json.dump(course_data, f, ensure_ascii=False, indent=2)

            logging.info(f"💾 课程 '{course_name}' 数据已保存: {total_words} 个单词")

        except Exception as e:
            logging.error(f"保存课程数据时出错: {e}")

    def process_single_course(self, course, page_num, course_index):
        """处理单个课程"""
        try:
            # 重新获取页面，确保元素有效
            self.driver.get("https://les.znjiyi.com/a/ucourse.aspx")
            time.sleep(3)

            # 设置页面大小
            self.set_page_size_to_100()

            # 如果不是第一页，翻页
            if page_num > 1:
                self.goto_course_page(page_num)
                time.sleep(3)

            # 重新获取当前页面的课程列表
            current_courses = self.get_courses_on_current_page()

            # 找到对应的课程（按索引匹配更准确）
            target_course = None
            if course_index < len(current_courses):
                if current_courses[course_index]['name'] == course['name']:
                    target_course = current_courses[course_index]
                else:
                    # 如果索引不匹配，按名称查找
                    for c in current_courses:
                        if c['name'] == course['name']:
                            target_course = c
                            break

            if not target_course:
                logging.warning(f"⚠️  未找到课程 '{course['name']}'，跳过")
                return False

            # 展开课程（如果是折叠状态）
            was_expanded = self.expand_course_if_collapsed(target_course)

            # 点击"学习本课"或处理已展开的课程
            if not self.enter_course_study(target_course, was_expanded):
                logging.warning(f"⚠️  无法进入课程学习: {course['name']}")
                return False

            # 获取课程目录
            directories = self.get_course_directories(target_course)

            if not directories:
                logging.warning(f"⚠️  课程 '{course['name']}' 没有找到目录")
                return False

            # 爬取每个目录的单词
            directories_data = []
            for j, directory in enumerate(directories):
                logging.info(f"  📁 处理目录 {j + 1}/{len(directories)}: {directory['name']}")

                # 使用POST请求方式获取单词
                words = self.crawl_directory_words_by_post(directory, course['name'])

                directory_data = {
                    'directory_name': directory['name'],
                    'word_count': len(words),
                    'words': words
                }
                directories_data.append(directory_data)

                logging.info(f"  ✅ 目录 '{directory['name']}' 完成，获取 {len(words)} 个单词")

            # 保存课程数据
            self.save_course_data(course['name'], directories_data)
            return True

        except Exception as e:
            logging.error(f"处理单个课程时出错: {e}")
            return False

    def crawl_all_courses(self):
        """爬取所有课程"""
        try:
            # 设置驱动并登录
            if not self.setup_driver_with_login():
                logging.error("登录失败，无法继续")
                return

            # 访问课程页面
            if not self.access_course_page():
                logging.error("访问课程页面失败，无法继续")
                return

            # 获取总页数
            total_pages = self.get_total_course_pages()
            logging.info(f"📄 课程列表共有 {total_pages} 页（每页100条）")

            # 遍历每一页
            for page_num in range(1, total_pages + 1):
                logging.info(f"\n📖 处理第 {page_num}/{total_pages} 页课程...")

                # 如果不是第一页，需要翻页
                if page_num > 1:
                    self.goto_course_page(page_num)
                    time.sleep(3)

                # 获取当前页的所有课程
                courses = self.get_courses_on_current_page()
                logging.info(f"📚 第 {page_num} 页找到 {len(courses)} 个课程")

                # 遍历当前页的每个课程
                for i, course in enumerate(courses):
                    try:
                        logging.info(f"\n🔄 处理第 {page_num} 页第 {i + 1}/{len(courses)} 个课程: {course['name']}")

                        # 处理当前课程
                        success = self.process_single_course(course, page_num, i)

                        if success:
                            logging.info(f"✅ 课程 '{course['name']}' 处理完成")
                        else:
                            logging.warning(f"⚠️  课程 '{course['name']}' 处理失败")

                    except Exception as e:
                        logging.error(f"❌ 处理课程 '{course['name']}' 时出错: {e}")
                        import traceback
                        traceback.print_exc()
                        continue

            logging.info("\n🎉 所有课程爬取完成！")

        except Exception as e:
            logging.error(f"爬取过程中出错: {e}")
        finally:
            if self.driver:
                self.driver.quit()

    def test_access(self):
        """测试访问课程页面"""
        try:
            # 设置驱动并登录
            if not self.setup_driver_with_login():
                print("❌ 登录失败")
                return False

            # 访问课程页面
            if not self.access_course_page():
                print("❌ 访问课程页面失败")
                return False

            # 查找课程数据
            detail_buttons = self.driver.find_elements(By.XPATH, "//*[contains(text(), '显示明细')]")
            print(f"🔍 找到 {len(detail_buttons)} 个'显示明细'按钮")

            if detail_buttons:
                print("🎉 登录成功，可以开始爬取！")
                return True
            else:
                print("⚠️  没有找到'显示明细'按钮，页面结构可能不同")
                return False

        except Exception as e:
            logging.error(f"测试访问时出错: {e}")
            return False
        finally:
            if self.driver:
                self.driver.quit()


def main():
    """主函数"""
    crawler = EnglishCourseCrawler()

    # 可以选择测试访问或直接开始爬取
    import sys
    if len(sys.argv) > 1 and sys.argv[1] == 'test':
        crawler.test_access()
    else:
        crawler.crawl_all_courses()


if __name__ == "__main__":
    main()