import urllib.request as ur
import urllib.parse as up
import lxml.etree as le
import re
# https://so.csdn.net/so/search/s.do?p=3&q=orc&t=blog&viparticle=&domain=&o=&s=&u=&l=&f=&rbg=0
# https://so.csdn.net/so/search/s.do?p=3&q=%E8%AF%81%E4%BB%B6%E8%AF%86%E5%88%AB&t=blog&viparticle=&domain=&o=&s=&u=&l=&f=&rbg=0
# ?p=2&q=证件识别&t=&viparticle=&domain=&o=&s=&u=&l=&f
url='https://so.csdn.net/so/search/s.do?p={page}&q={keyword}&t=blog&viparticle=&domain=&o=&s=&u=&l=&f=&rbg=0'

# 获取url访问时返回得到的response
# Use-Agent 能有最好就有，cookies能没有就没有
def getResponse(url):
    req=ur.Request(
        url=url,
        headers={
            'Use-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36',
        }
    )
    response=ur.urlopen(req).read()
    return response


if __name__ == '__main__':
    keyword=input('关键词')
    # 通过parse中的quote对中文进行url编码，不然url传输中，编码内容就会出现错误
    keyword=up.quote(keyword)
    # print(keyword)
    pn_start=int(input('起始页'))
    pn_end=int(input('结束页'))

    for page in range(pn_start,pn_end+1):
        # 访问1级页面
        response=getResponse(
            url='https://so.csdn.net/so/search/s.do?p={page}&q={keyword}&t=blog&viparticle=&domain=&o=&s=&u=&l=&f=&rbg=0'.format(page=page,keyword=keyword)
        )

    # 二级页面，具体的博客的链接
    # response是个字典对象，所以需要转换成xml对象
    hrefs=le.HTML(response).xpath('//div[@class="search-list-con"]/dl//span[@class="mr16"]/../../dt/div/a[1]/@href')
    for href in hrefs:
        response_blog=getResponse(
            url=href,
        )
        title=le.HTML(response_blog).xpath('//h1[@class="title-article"]/text()')[0]
        title = re.sub(
            r'[/\\:*"<>|?]', '', title
        )
        filepath='blog/%s.html'  %title
        with open(filepath,'wb') as f:
            f.write(response_blog)
        print(title)