import ssl
# 全局取消证书验证，避免访问https网页报错
ssl._create_default_https_context = ssl._create_unverified_context

import urllib.request as ur
import urllib.parse as up
import lxml.etree as le
import re

def Getresponse(url):
    request = ur.Request(
        url=url,
        headers={
            'user-agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Safari/537.36',
        }
    )
    response = ur.urlopen(request).read()
    return response


url = 'https://so.csdn.net/so/search/s.do?p={page}&q={keyword}&t=blog&viparticle=&domain=&o=&s=&u=&l=&f=&rbg=0'

if __name__ == '__main__':
    keyword = input('keyword:')
    page_start =int( input('first page:'))
    page_end =int( input('last page:'))

    #進行編碼
    data={
        'q':keyword
    }
    keyword_e=up.urlencode(data)
    print(keyword_e)

    #訪問ㄧ級頁面
    for page in range(page_start, page_end + 1):
        url = 'https://so.csdn.net/so/search/s.do?p={page}&q={keyword}&t=blog&viparticle=&domain=&o=&s=&u=&l=&f=&rbg=0'.format(
            page=page, keyword=keyword_e)

        response = Getresponse(url)
        print(page)

        #訪問二級頁面(使用xpath)
        hrefs=le.HTML(response).xpath('//div[@class="limit_width"]/a/@href')
        for href in hrefs:
            response_blog=Getresponse(href)
            title=le.HTML(response_blog).xpath('//h1[@class="title-article"]/text()')[0]

            #用正則替換違法字詞
            title_n = re.sub(r'[/\\:*"<>|?]','', title)

            #儲存路徑
            filepath='blog/%s.html' % title_n


            with open(filepath,'wb') as f:
                f.write(response_blog)
            print(title)

