import requests
from bs4 import BeautifulSoup
import json
import time
import random
from selenium import webdriver
from selenium.webdriver.edge.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.edge.options import Options


def get_zhihu_comments_selenium(question_url):
    """
    使用 Selenium 爬取知乎某个热门话题下的回答（作为评论的替代），只爬取第一页。
    :param question_url: 知乎问题的URL。
    :return: 包含所有回答内容的列表。
    """
    all_comments = []

    # 配置 EdgeDriver 的路径
    service = Service(r'C:\Program Files (x86)\Microsoft\Edge\Application\msedgedriver.exe')

    # 配置 Edge 浏览器选项
    edge_options = Options()
    # 可以添加无头模式，取消下面一行的注释即可
    # edge_options.add_argument("--headless")

    # 移除配置文件相关的参数，每次启动新的浏览器会话
    # edge_options.add_argument(r"user-data-dir=C:\Users\your_username\AppData\Local\Microsoft\Edge\User Data")
    # edge_options.add_argument("profile-directory=ZhihuScraper")

    edge_options.add_argument(
        "user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0")
    edge_options.add_argument("--disable-blink-features=AutomationControlled")
    edge_options.add_argument("--disable-infobars")
    edge_options.add_argument("--disable-extensions")
    edge_options.add_argument("--no-sandbox")
    edge_options.add_argument("--disable-dev-shm-usage")
    edge_options.add_argument("window-size=1920,1080")

    driver = webdriver.Edge(service=service, options=edge_options)

    driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
        "source": """
            Object.defineProperty(navigator, 'webdriver', {
                get: () => undefined
            })
        """
    })

    print(f"开始使用 Selenium 爬取：{question_url}")

    try:
        driver.get(question_url)

        # 等待页面加载完成
        WebDriverWait(driver, 20).until(
            EC.presence_of_element_located((By.CSS_SELECTOR, 'div.List-item'))
        )

        html_content = driver.page_source
        soup = BeautifulSoup(html_content, 'html.parser')

        answers = soup.find_all('div', class_='List-item')

        for answer_item in answers:
            content_div = answer_item.find('div', class_='RichContent-inner')
            content = content_div.get_text(separator='\n', strip=True) if content_div else ""

            author_tag = answer_item.find('a', class_='UserLink-link')
            author = author_tag.get_text(strip=True) if author_tag else "匿名用户"

            voteup_count_tag = answer_item.find('button', class_='VoteButton--up')
            voteup_count = 0
            if voteup_count_tag:
                voters_span = voteup_count_tag.find('span', class_='Voters')
                if voters_span:
                    try:
                        voteup_count = int(
                            voters_span.get_text(strip=True).replace(' 赞同', '').replace('K', '000').replace('万',
                                                                                                              '0000'))
                    except ValueError:
                        pass

            comment_count_tag = answer_item.find('button',
                                                 class_='Button ContentItem-action Button--plain Button--withIcon Button--withLabel',
                                                 text=lambda t: '评论' in t if t else False)
            comment_count = 0
            if comment_count_tag:
                try:
                    comment_count_text = comment_count_tag.get_text(strip=True)
                    comment_count = int(''.join(filter(str.isdigit, comment_count_text)))
                except ValueError:
                    pass

            if content:
                all_comments.append({
                    'author': author,
                    'content': content,
                    'voteup_count': voteup_count,
                    'comment_count': comment_count
                })

    except Exception as e:
        print(f"爬取过程中发生错误：{e}")
    finally:
        driver.quit()

    return all_comments


if __name__ == "__main__":
    zhihu_question_url = "https://www.zhihu.com/question/46863675"
    # num_pages_to_scrape = 2 # 不再需要，只爬取第一页

    comments_data = get_zhihu_comments_selenium(zhihu_question_url)

    if comments_data:
        output_filename = "zhihu_answers.json"
        with open(output_filename, 'w', encoding='utf-8') as f:
            json.dump(comments_data, f, ensure_ascii=False, indent=4)
        print(f"回答内容已保存到 {output_filename}")
    else:
        print("未获取到任何回答内容。")