import traceback

from selenium import webdriver
import os
import time
import re
from bs4 import BeautifulSoup
import random
import requests

from selenium.webdriver.common.by import By
import mysql_connect


class ZhihuEssay():
    def __init__(self, url, title, display_text, zhihu_data_id):
        self.url = url
        self.title = title
        self.display_text = display_text
        self.zhihu_data_id = zhihu_data_id

    def __str__(self):
        return f"ZhihuEssay(url={self.url}, title={self.title}, display_text={self.display_text}), zhihu_data_id={self.zhihu_data_id}"

    def __repr__(self):
        return f"ZhihuEssay(url={self.url}, title={self.title}, display_text={self.display_text}), zhihu_data_id={self.zhihu_data_id}"

def get_data() -> list[ZhihuEssay]:

    conn, cursor = mysql_connect.get_conn()
    result_list: list[ZhihuEssay] = []
    try:
        sql = """
            SELECT DISTINCT
                zq.title,
                zq.url
            FROM
                zhihu_essay AS zq;

        """
        cursor.execute(sql)
        result = cursor.fetchall()
        result_list: list[ZhihuEssay] = [ZhihuEssay(question[1], question[0], None, None) for question in result]
    except:
        traceback.print_exc()
    finally:
        cursor.close()
        conn.close()

    return result_list

def get_one_html(question: ZhihuEssay, already_list: list[str]):
    if (question.url.split('/')[-1] in already_list) :
        print("这个跳过喽！")
        return False
    header = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
        'Accept-Encoding': 'gzip, deflate, sdch',
        'Accept-Language': 'zh-CN,zh;q=0.8',
        'Connection': 'keep-alive',
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.235',
        "Cookie": """_zap=aa73886a-e368-4332-a732-67e854600ad1; d_c0=AGCYZtHCpxaPTjCAaTP9VRiKH8L0bqJfs78=|1681995372; YD00517437729195%3AWM_TID=YJHQ4pfKYCFERQEFQRc7%2FURz245Mnc74; __snaker__id=m99RKmKSTHtHwb4Y; YD00517437729195%3AWM_NI=oLRPwWyh8FIqo2ONE8hwpD%2Bk98nvFNska4yJg4j0VH1kPcYSIB%2FgRvSQJTgbxRlP3PedGeVRRSOokk0ok3CX8MCJ8JtbBYLsuaNbgbD63ySLiZqh94wtGaxowp%2BZtH8%2BUUQ%3D; YD00517437729195%3AWM_NIKE=9ca17ae2e6ffcda170e2e6eeb6d121b187a691ca70ae928bb3d55a829e9eadd5648c8dfea6ea6188bfbcaed52af0fea7c3b92aad949eb1f65dbbb8f8a7cc7badee81afc834aca9c0a5aa61fbe8a297c546b1b5afd5fc42a5eca6d1d544f3b5a084dc598b9ee188d17e918f86b5e66abc8ea4b1b25ca7bc8986d55a928d8190ea61f4b1a5d4c254fc938dabbb42b8be8ebacf79b7bf8283e66a88eafd9ad248ab86bf89bc40b5ac8982fb65b5b0e5bbf744fcf59bb6d437e2a3; q_c1=d333a471c0e84dffa5befaae67831722|1684920221000|1684920221000; q_c1=d333a471c0e84dffa5befaae67831722|1692624665000|1684920221000; __utmv=51854390.100--|2=registration_date=20160703=1^3=entry_date=20160703=1; _xsrf=b52003193babcd2f3a19472296f30a04; z_c0=2|1:0|10:1693130116|4:z_c0|80:MS4xQWZRMEF3QUFBQUFtQUFBQVlBSlZUWU5yMkdYWjNhOWdUV0hzRm5SYTVodDlPS3pPMUkxMFBBPT0=|d845d5db9929b06cc93d9081676c65136a44d6861d1ec002e5877b8764739b05; __utma=51854390.1016281673.1692624666.1693312041.1693460941.4; __utmz=51854390.1693460941.4.4.utmcsr=cn.bing.com|utmccn=(referral)|utmcmd=referral|utmcct=/; Hm_lvt_98beee57fd2ef70ccdd5ca52b9740c49=1693125876,1693312050,1693459829,1693469818; tst=r; SESSIONID=jbDM19VYhEad21HHeT1eCUo2MgSamzQYuAnUtess7Ar; JOID=V1sVAE2D5_9oy8c4VIVCa1K5Q3xF74a4IPn4bQ7opJ0P_PV2HwlmCQrOwjhSBgj28dyH1ad7zE69MwXMijVQl-c=; osd=Ul8UBUiG4_5tzsI8VYBHbla4RnlA64e9Jfz8bAvtoZkO-fBzGwhjDA_Kwz1XAwz39NmC0aZ-yUu5MgDJjzFRkuI=; Hm_lpvt_98beee57fd2ef70ccdd5ca52b9740c49=1693483424; KLBRSID=d6f775bb0765885473b0cba3a5fa9c12|1693483568|1693483405"""
    }

    req = requests.get(f"https:{question.url}", headers=header)
    req.encoding = 'utf-8'
    soup = BeautifulSoup(req.text, "html.parser")  # 创建BeautifulSoup对象
    body = soup.body  #
    html_text = soup.prettify()
    with open(f"other_files/question_title_htmls/title_htmls/{question.url.split('/')[-1]}.html","w",encoding="utf-8") as w_file:
        w_file.write(html_text)
    return True

def get_htmls():
    question_list: list[ZhihuEssay] = get_data()
    already_list = os.listdir("other_files/question_title_htmls/title_htmls")
    already_list = [ file_name.split(".")[0] for file_name in already_list ]
    for index,question in enumerate(question_list):
        sleep_time_get_url = random.uniform(1, 3)
        print(f"现在是第{index}个，总共{len(question_list)}个,即将休眠{sleep_time_get_url}秒")

        try:
            success = get_one_html(question, already_list)
            if success:
                time.sleep(sleep_time_get_url)
        except:
            traceback.print_exc()
            time.sleep(sleep_time_get_url)




if __name__ == "__main__":
    # get_content("https://www.zhihu.com/question/311318103")
    # get_data()

    get_htmls()