from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException  # 引入TimeoutException
from bs4 import BeautifulSoup
import time
import random
import os  # 用于创建文件夹

# -----------------------------------------------------------------------------
# !!! 重要: 在这里设置你手动下载的 ChromeDriver 的绝对路径 !!!
# 确保这个路径是正确的，并且 chromedriver 文件有执行权限 (macOS/Linux)
# -----------------------------------------------------------------------------
CHROME_DRIVER_MANUAL_PATH = "/Users/promoriarty/PycharmProjects/xuexi-crawler/chromedriver-mac-x64/chromedriver"
# -----------------------------------------------------------------------------
# 调试文件保存目录
DEBUG_HTML_DIR = "debug_html_pages"
if not os.path.exists(DEBUG_HTML_DIR):
    os.makedirs(DEBUG_HTML_DIR)


# -----------------------------------------------------------------------------

def setup_driver_old_selenium(driver_executable_path):
    """
    使用旧版Selenium方式设置并返回一个Chrome WebDriver实例。
    Args:
        driver_executable_path (str): ChromeDriver可执行文件的绝对路径。
    Returns:
        webdriver.Chrome: WebDriver实例。
    """
    options = webdriver.ChromeOptions()
    options.add_argument('--headless')
    options.add_argument('--disable-gpu')
    options.add_argument('--no-sandbox')
    options.add_argument('--disable-dev-shm-usage')
    options.add_argument(
        "user-agent='Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'")

    try:
        print(f"尝试使用 ChromeDriver 路径: {driver_executable_path}")
        driver = webdriver.Chrome(executable_path=driver_executable_path, options=options)
        print("WebDriver (旧版方式) 初始化成功。")
        return driver
    except Exception as e:
        print(f"初始化WebDriver (旧版方式) 失败，路径: {driver_executable_path}")
        print(f"错误: {e}")
        raise


def scrape_single_page_data_selenium(driver, url, page_identifier):
    """
    使用Selenium爬取单个CCDI页面中的所有标题和时间。

    Args:
        driver: Selenium WebDriver 实例。
        url (str): 要爬取的网页URL。
        page_identifier (str): 用于命名调试文件的页面标识符 (例如 "index", "index_1")

    Returns:
        list: 包含字典的列表，每个字典包含'title'和'date'。
              如果发生错误则返回空列表。
    """
    debug_file_path = os.path.join(DEBUG_HTML_DIR, f"debug_page_{page_identifier}.html")
    actual_sleep_time = 0
    try:
        driver.get(url)

        # --- 选项1: WebDriverWait (根据新结构调整选择器) ---
        # wait_selector = "ul.list_news_dl2 li:first-child div.title a" # 等待第一个新闻项的标题链接
        # try:
        #     WebDriverWait(driver, 20).until(
        #         EC.presence_of_element_located((By.CSS_SELECTOR, wait_selector))
        #     )
        #     print(f"页面元素在 {url} (通过WebDriverWait) 加载完成。")
        # except TimeoutException:
        #     print(f"错误: 在 {url} 等待元素 '{wait_selector}' 超时。")
        #     html_content_timeout = driver.page_source
        #     with open(debug_file_path, "w", encoding="utf-8") as f_timeout:
        #         f_timeout.write(html_content_timeout)
        #     print(f"超时HTML内容已保存到 {debug_file_path}")
        #     return []

        # --- 选项2: time.sleep ---
        actual_sleep_time = random.uniform(3, 6)
        print(f"为 {url} 等待 {actual_sleep_time:.2f} 秒...")
        time.sleep(actual_sleep_time)

        html_content = driver.page_source

        with open(debug_file_path, "w", encoding="utf-8") as f:
            f.write(html_content)
        print(f"完整 HTML 内容已保存到: {debug_file_path}")

        # 打印部分内容到控制台 (如果需要，可以取消注释)
        # print(f"\n--- HTML Content for {url} (First 3000 chars from saved file) ---")
        # print(html_content[:3000])
        # print("--- End of HTML Snippet ---\n")

        if "seccaptcha.haplat.net" in html_content or "captchaPage" in html_content:
            print(f"警告: 在 {url} 检测到验证码页面，无法提取数据。")
            return []

        soup = BeautifulSoup(html_content, 'lxml')

        # --- 根据提供的HTML结构修改后的选择器 ---
        news_items_container = soup.find('ul', class_='list_news_dl2')
        if not news_items_container:
            print(f"警告: 在 {url} 未能找到新闻列表容器 (ul.list_news_dl2)。请检查 {debug_file_path}。")
            return []

        list_items = news_items_container.find_all('li', recursive=False)

        page_results = []
        if not list_items:
            print(f"警告: 在 {url} 容器 (ul.list_news_dl2) 中未能找到新闻条目 (li)。请检查 {debug_file_path}。")
            return []

        for item_li in list_items:
            title_div = item_li.find('div', class_='title')
            more_div = item_li.find('div', class_='more')

            if title_div and more_div:
                title_tag = title_div.find('a')

                if title_tag:
                    title_from_attr = title_tag.get('title', '').strip()
                    title_from_text = title_tag.get_text(strip=True)

                    title = title_from_text  # 默认使用 text
                    if title_from_attr and len(title_from_attr) > len(title_from_text):
                        title = title_from_attr  # 如果 title 属性更长或文本为空，则用属性
                    elif not title_from_text and title_from_attr:
                        title = title_from_attr

                    date_str = more_div.get_text(strip=True)

                    if title and date_str:
                        page_results.append({'title': title, 'date': date_str})
                    # else: # 调试信息
                    #     print(f"调试: Title或Date为空。 Title='{title}', Date='{date_str}' from li: {item_li.prettify()[:200]}")
                # else:
                #     print(f"调试: 未在 div.title 中找到 <a> 标签。Div.title 内容: {title_div.prettify()[:200]}")
            # else:
            #     print(f"调试: 未找到 div.title 或 div.more。Li 内容: {item_li.prettify()[:200]}")

        if not page_results and list_items:
            print(f"警告: 在 {url} 找到了{len(list_items)}个列表项，但未能解析出任何标题和日期。请检查 {debug_file_path} 并仔细核对选择器。")

        return page_results

    except Exception as e:
        print(f"处理网页 {url} 内容时发生未知错误: {e}")
        import traceback
        traceback.print_exc()
        try:
            error_html_content = driver.page_source
            with open(os.path.join(DEBUG_HTML_DIR, f"error_page_{page_identifier}.html"), "w",
                      encoding="utf-8") as f_err:
                f_err.write(error_html_content)
            print(f"发生错误时的HTML内容已保存到 error_page_{page_identifier}.html")
        except Exception as save_err:
            print(f"尝试保存错误页面HTML时也发生错误: {save_err}")
        return []


if __name__ == "__main__":
    base_url = "https://www.ccdi.gov.cn/scdcn/zggb/zjsc/"
    total_pages = 19  # 先测试少量页面，例如3页。成功后再改回19或更大。
    all_news_items = []
    output_filename = "ccdi_news_extracted.txt"  # 更新输出文件名

    driver = None
    try:
        driver = setup_driver_old_selenium(CHROME_DRIVER_MANUAL_PATH)

        print(f"开始爬取中央纪委国家监委网站新闻，共 {total_pages} 页...")

        for page_num in range(total_pages):
            page_name_for_debug = ""
            if page_num == 0:
                current_url = base_url + "index.html"
                page_name_for_debug = "index"
            else:
                current_url = base_url + f"index_{page_num}.html"
                page_name_for_debug = f"index_{page_num}"

            print(f"\n正在爬取第 {page_num + 1}/{total_pages} 页: {current_url}")

            page_data = scrape_single_page_data_selenium(driver, current_url, page_name_for_debug)

            if page_data:
                all_news_items.extend(page_data)
                print(f"成功从第 {page_num + 1} 页获取 {len(page_data)} 条数据。")
            else:
                print(f"未能从第 {page_num + 1} 页获取数据或页面无内容/遇到验证码/解析失败。")

            if page_num < total_pages - 1:
                sleep_time = random.uniform(4, 7)  # 调整回稍短的延时观察验证码触发情况
                print(f"暂停 {sleep_time:.2f} 秒...")
                time.sleep(sleep_time)

    except Exception as e:
        print(f"爬虫主程序发生错误: {e}")
        import traceback

        traceback.print_exc()
    finally:
        if driver:
            print("正在关闭WebDriver...")
            driver.quit()
            print("WebDriver已关闭。")

    if all_news_items:
        print(f"\n共爬取到 {len(all_news_items)} 条新闻。")
        print(f"正在将数据写入文件: {output_filename}")

        try:
            with open(output_filename, 'w', encoding='utf-8') as f:
                for item in all_news_items:
                    line = f"标题: {item['title']} 时间: {item['date']}\n"
                    f.write(line)
            print(f"数据成功保存到 {output_filename}")
        except IOError as e:
            print(f"写入文件时发生错误: {e}")
    else:
        print(f"\n未能获取到任何新闻条目。请检查 '{DEBUG_HTML_DIR}' 目录下的html文件以分析后续页面是否遇到验证码或其他问题。")