import urllib.request as ur
import lxml.etree as le
import re
import urllib.parse as up
url = 'https://so.csdn.net/so/search/s.do?p={page}&q={keyword}&t=blog&viparticle=&domain=&o=&s=&u=&l=&f=&rbg=0'

def getResponse(url):
    rep = ur.Request(
        url=url,
        headers={
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36'
        }
    )
    response = ur.urlopen(rep).read()
    return response

if __name__ == '__main__':
    keyword = input('关键字:')
    pnstart = int(input('起始页:'))
    pnend = int(input('结束页:'))

    data = {
        'p':keyword
    }
    keyword = up.urlencode(data)
###录播课
    for page in range(pnstart, pnend + 1):
        #####访问一级页面
        response = getResponse(
            url='https://so.csdn.net/so/search/s.do?p={page}&q={keyword}&t=blog&viparticle=&domain=&o=&s=&u=&l=&f=&rbg=0'.format(
                page=page, keyword=keyword)
        )
        ###访问二级页面，博客的链接,把response转化成xpath对象
        hrefs = le.HTML(response).xpath('//div[@class="search-list-con"]/dl//span[@class="mr16"]/../../dt/div/a[1]/@href')
        for href in hrefs:
            response_blog = getResponse(
                url = href,
            )

            title = le.HTML(response_blog).xpath('//h1[@class="title-article"]/text()')[0]
            title = re.sub(
                r'[/\\:*"<>|?]','',title
            )
            filepath = 'blog/%s.html' % title
            with open(filepath,'wb') as f:
                f.write(response_blog)
            print(title)

