# -*- coding: utf-8 -*-
import urllib.request as ur
import urllib.parse as up
import lxml.etree as le
import re

url = 'https://so.csdn.net/so/search/s.do?p={page}&q={keyword}&t=blog&viparticle=&domain=&o=&s=&u=&l=&f=&rbg=0'

def getResponse(url):
    req = ur.Request(
        url=url,
        headers={
            'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36'
        }
    )
    response = ur.urlopen(req).read().decode('utf-8','ignore')

    return response

if __name__ == '__main__':
    keyword = input('关键词')
    pn_start = int(input('起始页'))
    pn_end = int(input('终止页'))

    # 中文操作
    data = up.urlencode(
        {'q':keyword}
    )


    for page in range(pn_start,pn_end+1):
        #访问1级页面
        response = getResponse(
            url = 'https://so.csdn.net/so/search/s.do?p={page}&{data}&t=blog&viparticle=&'
                  'domain=&o=&s=&u=&l=&f=&rbg=0'.format(page=page,data=data)

        )
        #二级页面，博客的链接
        # hrefs = le.HTML(response).xpath('//span[@class="mr16"]/../../dt/div/a[1]/@href')
        hrefs = le.HTML(response).xpath('//div[@class="limit_width"]/a/@href')
        print(hrefs)
        #遍历二级页面
        for href in hrefs:
            response_blog = getResponse(
                url = href,
            )
            #文件名称
            title = le.HTML(response_blog).xpath('//h1[@class="title-article"]/text()')[0]
            #文件名称除去非法字符
            title = re.sub(
                r'[/\\:*"<>|? ]','',title
            )
            #文件路径
            filepath = 'blog/%s.html' % title
            #文件写入
            with open(filepath,'w',encoding='utf-8') as f:
                f.write(response_blog)
            print(title)