第三部分：主函数和数据处理(Person
3)
# person3_code.py
from person1_code import setup_driver
from person2_code import load_page_and_get_html, parse_answer_item
from bs4 import BeautifulSoup
import json


def get_zhihu_comments_selenium(question_url):
    """
    使用 Selenium 爬取知乎某个热门话题下的回答（作为评论的替代），只爬取第一页。
    :param question_url: 知乎问题的URL。
    :return: 包含所有回答内容的列表。
    """
    all_comments = []
    driver = None

    try:
        driver = setup_driver()
        html_content = load_page_and_get_html(driver, question_url)
        soup = BeautifulSoup(html_content, 'html.parser')

        answers = soup.find_all('div', class_='List-item')

        for answer_item in answers:
            comment_data = parse_answer_item(answer_item)
            if comment_data:
                all_comments.append(comment_data)

    except Exception as e:
        print(f"爬取过程中发生错误：{e}")
    finally:
        if driver:
            driver.quit()

    return all_comments


if __name__ == "__main__":
    zhihu_question_url = "https://www.zhihu.com/question/46863675"
    comments_data = get_zhihu_comments_selenium(zhihu_question_url)

    if comments_data:
        output_filename = "zhihu_answers.json"
        with open(output_filename, 'w', encoding='utf-8') as f:
            json.dump(comments_data, f, ensure_ascii=False, indent=4)
        print(f"回答内容已保存到 {output_filename}")
    else:
        print("未获取到任何回答内容。")