# 爬取部分（后续要修改）
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
import time
import pymysql
# 导入Select类
from selenium.webdriver.support.ui import Select

# 创建 WebDriver 对象
wd = webdriver.Chrome(service=Service(r'D:\tools\chromedriver-win64\chromedriver.exe'))
wd.get('https://www.byhy.net/cdn2/files/selenium/test2.html')

import time
import pymysql

# 数据库连接配置
db_config = {
    'host': 'localhost',
    'port': 3306,
    'user': 'root',
    'password': '123456',
    'database': 'test',
    'charset': 'utf8mb4'
}


def insert_data_to_db(user_id, voteup_count, comment_count, publication_time, content):
    try:
        # 连接数据库
        conn = pymysql.connect(**db_config)
        cursor = conn.cursor()
        # 插入数据的 SQL 语句
        sql = "INSERT INTO answers_1 (UserID, VoteupCount, CommentCount, PublicationTime, Content) VALUES (%s, %s, %s, %s, %s)"
        cursor.execute(sql, (user_id, voteup_count, comment_count, publication_time, content))
        conn.commit()
        print("数据插入成功")
    except pymysql.Error as e:
        print(f"数据插入失败: {e}")
    finally:
        if conn:
            conn.close()


def crawl_zhihu(url):
    # 设置 ChromeDriver 路径
    service = Service('path/to/chromedriver')
    driver = webdriver.Chrome(service=Service(r'D:\tools\chromedriver-win64\chromedriver.exe'))
    driver.get(url)

    # 等待页面加载
    time.sleep(5)

    try:
        # 滚动页面以加载更多回答
        for i in range(3):  # 可根据需要调整滚动次数
            driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
            time.sleep(3)

        # 查找所有回答元素
        answer_elements = driver.find_elements(By.CSS_SELECTOR, '.AnswerItem')

        for answer in answer_elements:
            try:
                # 提取作者昵称
                user_id_element = answer.find_element(By.CSS_SELECTOR, '.AuthorInfo-name a')
                user_id = user_id_element.text if user_id_element else '未知'

                # 提取点赞数
                voteup_count_element = answer.find_element(By.CSS_SELECTOR, '.VoteButton--up')
                voteup_count = int(voteup_count_element.text) if voteup_count_element.text.isdigit() else 0

                # 提取评论数
                comment_count_element = answer.find_element(By.CSS_SELECTOR, '.ContentItem-actions button:nth-child(2)')
                comment_count_text = comment_count_element.text.split(' ')[0]
                comment_count = int(comment_count_text) if comment_count_text.isdigit() else 0

                # 提取发布时间
                publication_time_element = answer.find_element(By.CSS_SELECTOR, '.ContentItem-time time')
                publication_time = publication_time_element.get_attribute(
                    'datetime') if publication_time_element else '未知'

                # 提取回答内容
                content_element = answer.find_element(By.CSS_SELECTOR, '.RichContent-inner')
                content = content_element.text if content_element else '无内容'

                # 将数据插入数据库
                insert_data_to_db(user_id, voteup_count, comment_count, publication_time, content)

            except Exception as e:
                print(f"提取回答信息失败: {e}")

    except Exception as e:
        print(f"爬取过程中出现错误: {e}")
    finally:
        driver.quit()


if __name__ == "__main__":
    # 替换为你要爬取的知乎问题页面 URL
    url = 'https://www.zhihu.com/question/xxxxxx'
    crawl_zhihu(url)
