import os.path

import requests
from lxml import etree

author_name = input('请输入博主id：')
Max_PAGE_NUM = 200
i=1

sess = requests.Session
sess.headers[
    'User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36 MicroMessenger/7.0.20.1781(0x6700143B) NetType/WIFI MiniProgramEnv/Windows WindowsWechat/WMPF XWEB/8391'


def crawler_blog_by(author_name,article_id,title,i):
    article_request_url = f"https://blog.csdn.net/{author_name}/article/details/{article_id}?spm=1001.2014.3001.5502"
    response = sess.get(article_request_url)

    selector = etree.HTML(response.text)
    head_msg = selector.xpath("//hand")[0]
    print(head_msg)
    head_str = etree.tostring(head_msg,encoding='utf8',method='html').decode()
    body_msg = selector.xpath('//div[@id="content_views"]')[0]
    body_str = etree.tostring(body_msg,encoding='utf8',method='html').decode()

    if not os.path.exists("c_articles"):
        os.mkdir('c_articles')
    title = title.replace('/','-').replace(':','').replace(': ','')

    save_file_name = os.path.join('c_articles',f'{author_name}-{title}-{article_id}','.html')
    with open(save_file_name,'w',encoding='utf8') as writer:
        writer.write(f"""
        <head><meta charset="utf-8"></head>
        {body_str}
        """)
    # html_to_pdf(save_file_name)

    print(f'[info]: {author_name}第{i}篇博文{title}-{article_id}.html 保存文件成功')
    i+=1

if __name__ == '__main__':
    crawler_blog_by()