import urllib.request as ur
import lxml.etree as le
import re

def getResponse(url):
    req = ur.Request(
        url = url,
        headers = {
            'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36'
        }
    )
    response = ur.urlopen(req).read()
    return response

if __name__=='__main__':
    pn_start = int(input('起始页'))
    pn_end = int(input('终止页'))

    for page in range(pn_start,pn_end+1):
        # 访问1级页面
        response = getResponse(
            url = 'https://so.csdn.net/so/search/s.do?p={page}&q=python&t=&viparticle=&domain=&o=&s=&u=&l=&f='.format(page=page)
        )
        # 二级页面。博客链接 response是字典对象，需要转成xml
        #hrefs = le.HTML(response).xpath('//div[@class="limit_width"]')
        hrefs = le.HTML(response).xpath('//div[@class = "search-list-con"]/dl//span[@class="mr16"]/../../dt/div/a[1]/@href')
        for href in hrefs:
            response_blog = getResponse(
                url = href,
            )
            title = le.HTML(response_blog).xpath('//h1[@class="title-article"]/text()')[0]
            title = re.sub(
                r'[/\\:*"<>|?]','',title
            )
            filepath = 'blog/%s.html' % title
            with open(filepath,'wb') as f:
                f.write(response_blog)
            print(title)