import traceback

from selenium.webdriver.chrome.webdriver import WebDriver

import mysql_connect
from selenium import webdriver
import os
import time
import re
from bs4 import BeautifulSoup
import random
from selenium.webdriver.common.by import By

class LabelData():
    content = ""
    data_id = ""

    def __init__(self, content, data_id):
        self.content = content
        self.data_id = data_id
    def __str__(self):
        return f"LabelData(content={self.content}, data_id={self.data_id})"
    def __repr__(self):
        return f"LabelData(content={self.content}, data_id={self.data_id})"

def get_label_data():
    label_list: list[LabelData] = []
    conn, cursor = mysql_connect.get_conn()
    try:
        sql = """
            SELECT
	            sl.content,
                sl.zhihu_data_id
            FROM
                shane_label AS sl
            WHERE
                sl.del_flag = 0
                AND sl.`level` = 2
        """
        cursor.execute(sql)
        result = cursor.fetchall()
        label_list = [LabelData(label[0], label[1]) for label in result]
    except:
        traceback.print_exc()
    finally:
        cursor.close()
        conn.close()

    return label_list

def get_top_answers_list_html_one(label_data: LabelData,browser: WebDriver):
    the_url = f"https://www.zhihu.com/topic/{label_data.data_id}/top-answers"

    browser.get(the_url)
    sleep_time_get_url = random.uniform(2, 3)
    time.sleep(sleep_time_get_url)
    close_login(browser)
    browser.execute_script("var q=document.documentElement.scrollTop=10000")
    sleep_time_down1 = random.uniform(2, 3)
    time.sleep(sleep_time_down1)

    soup = BeautifulSoup(browser.page_source,"html.parser")
    html_text = soup.prettify()
    with open(f"other_files/top_answers_list_htmls/{label_data.data_id}.html","w",encoding="utf-8") as w_file:
        w_file.write(html_text)

def close_login(browser: WebDriver ):
    # 查找指定类名的元素
    # modal_inner = browser.find_elements_by_class_name("Modal-inner")
    modal_inner = browser.find_elements(by=By.CLASS_NAME,value="Modal-inner")[0]
    # 判断元素是否存在
    if modal_inner:
        # 如果找到了指定元素，则点击
        close_icon = browser.find_elements(by=By.CLASS_NAME,value="Modal-closeIcon")[0]
        close_icon.click()
        sleep_time = random.uniform(1, 1.5)
        time.sleep(sleep_time)
    else:
        # 如果找不到指定元素，则继续其他操作
        pass


def get_already_spider_list():
    folder_path = 'other_files/top_answers_list_htmls/'
    file_html_list = os.listdir(folder_path)
    # print(file_html_list)
    file_list = [ file.split(".")[0] for file in file_html_list]
    return file_list

def get_top_answers_list_htmls():
    label_data_list: [LabelData] = get_label_data()
    already_spider_list: [str] = get_already_spider_list()
    option = webdriver.ChromeOptions()
    # option.add_argument('headless')
    option.add_argument("disable-blink-features=AutomationControlled")  # 就是这一行告诉chrome去掉了webdriver痕迹
    option.add_experimental_option("detach", True)
    # option.add_argument(r"--user-data-dir=C:\Users\wbz12\AppData\Local\Google\Chrome\User Data")
    browser = webdriver.Chrome(r'D:\Driver\chromedriver.exe', options=option)
    for index,label_data in enumerate(label_data_list) :
        if label_data.data_id in already_spider_list:
            continue
        sleep_time = random.uniform(1, 1.5)
        print(f"开始爬取第{index + 1}个，一共{len(label_data_list)}个")
        print(f"{label_data.content} {label_data.data_id}")
        try:
            get_top_answers_list_html_one(label_data,browser)
        except:
            print("大失败！")
            traceback.print_exc()
        finally:
            time.sleep(sleep_time)
        # break


if __name__ == "__main__" :
    get_top_answers_list_htmls()
    # get_already_spider_list()
    # get_top_answers_list_htmls()