import lxml.etree as le
import urllib.request as ur
import urllib.response as up
import re
import ssl
ssl._create_default_https_context = ssl._create_unverified_context

url = 'https://so.csdn.net/so/search/s.do?p={page}&q=+{keyword}&t=blog&viparticle=&domain=&o=&s=&u=&l=&f=&rbg=0'

# 封装方法
def getResponse(url):
    req = ur.Request(
        url=url,
        headers={
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:76.0) Gecko/20100101 Firefox/76.0',
            'Cookies':'uuid_tt_dd=10_7844173860-1588341033077-836887; dc_session_id=10_1588341033077.513081; _ga=GA1.2.199969945.1588743496; c-toolbar-writeguide=1; UserName=yang126511; UserInfo=7999b24ec4be4eabadc9552bc073223b; UserToken=7999b24ec4be4eabadc9552bc073223b; UserNick=Quin_NaL; AU=1B4; UN=yang126511; BT=1589134794704; p_uid=U100000; Hm_up_6bcd52f51e9b3dce32bec4a3997715ac=%7B%22islogin%22%3A%7B%22value%22%3A%221%22%2C%22scope%22%3A1%7D%2C%22isonline%22%3A%7B%22value%22%3A%221%22%2C%22scope%22%3A1%7D%2C%22isvip%22%3A%7B%22value%22%3A%221%22%2C%22scope%22%3A1%7D%7D; Hm_ct_6bcd52f51e9b3dce32bec4a3997715ac=6525*1*10_7844173860-1588341033077-836887!5744*1*yang126511; searchHistoryArray=%255B%2522Python%2520%2522%252C%2522Python%2522%252C%2522python%2522%255D; dc_sid=59bd6eebf777e81a02dd29d35898935f; c_first_ref=default; c_first_page=https%3A//so.csdn.net/so/search/s.do%3Fp%3D2%26q%3D+python%26t%3D%26viparticle%3D%26domain%3D%26o%3D%26s%3D%26u%3D%26l%3D%26f%3D; Hm_lvt_6bcd52f51e9b3dce32bec4a3997715ac=1588576498,1588743498,1589116766,1589187075; TY_SESSION_ID=b17a4259-e189-4bd4-a672-6b12287496c2; c_utm_medium=distribute.pc_search_result.none-task-blog-2%7Eblog%7Efirst_rank_v2%7Erank_v25-17-88606228.nonecase; c_utm_term=+python; dc_tos=qa5u12; Hm_lpvt_6bcd52f51e9b3dce32bec4a3997715ac=1589188936'
        }

    )
    response = ur.urlopen(req).read()
    return response

if __name__ == '__main__':
    keyword = input('关键词')  # str
    pn_start = int(input('起始页'))
    pn_end = int(input('终止页'))

    for page in range(pn_start, pn_end + 1):
        # 一级页面
        response = getResponse(
            url='https://so.csdn.net/so/search/s.do?p={page}&q=+{keyword}&t=blog&viparticle=&domain=&o=&s=&u=&l=&f=&rbg=0'.format(
                page=page, keyword=keyword))
        hrefs = le.HTML(response).xpath('//div[@class="search-list-con"]/dl//span[@class="mr16"]/../../dt/div/a[1]/@href')
        for href in hrefs:
            # 二级页面
            response_blog = getResponse(
                url = href
            )
            title = le.HTML(response_blog).xpath('//h1[@class="title-article"]/text()')[0]
            title = re.sub(
                r'[/\\?*:"<>|]','',title
            )
            filepath = 'blog/%s.html'%title
            with open(filepath,'wb')as f:
                f.write(response_blog)
            print(title)