import urllib.request as ur
import lxml.etree as le
import ssl
import re

ssl._create_default_https_context = ssl._create_unverified_context


# url = 'https://so.csdn.net/so/search/s.do?p={page}&q={keyword}&t=&viparticle=&domain=&o=&s=&u=&l=&f='


def getRseponse(url):
    req = ur.Request(
        url=url,
        headers={
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36',
            'Cookie': 'JSESSIONID=D5389794B6EEA1AC8063EFC4CB6EB19F; TY_SESSION_ID=8e6ce042-d166-43f7-8049-30c335bc46f4; uuid_tt_dd=10_37479441810-1590415432279-636826; dc_session_id=10_1590415432279.292567; __gads=ID=02ee62f33796453f:T=1590415433:S=ALNI_Ma1_RJb6rC5l5JRrRN4mhcqqmJ2Kw; UserName=bhh19881225; UserInfo=4afb08d8ea3c4f8e9ab1af676cd7cc58; UserToken=4afb08d8ea3c4f8e9ab1af676cd7cc58; UserNick=bhh19881225; AU=BDA; UN=bhh19881225; BT=1590495050336; p_uid=U100000; Hm_ct_6bcd52f51e9b3dce32bec4a3997715ac=6525*1*10_37479441810-1590415432279-636826!5744*1*bhh19881225; Hm_up_6bcd52f51e9b3dce32bec4a3997715ac=%7B%22islogin%22%3A%7B%22value%22%3A%221%22%2C%22scope%22%3A1%7D%2C%22isonline%22%3A%7B%22value%22%3A%221%22%2C%22scope%22%3A1%7D%2C%22isvip%22%3A%7B%22value%22%3A%221%22%2C%22scope%22%3A1%7D%2C%22uid_%22%3A%7B%22value%22%3A%22bhh19881225%22%2C%22scope%22%3A1%7D%7D; searchHistoryArray=%255B%2522java_%2522%252C%2522python%2522%252C%2522java%2522%255D; dc_sid=7f52b833b20a8a8dfdc93fb10a0477ed; announcement=%257B%2522isLogin%2522%253Atrue%252C%2522announcementUrl%2522%253A%2522https%253A%252F%252Fbss.csdn.net%252Fm%252Ftopic%252Flive_recruit%253Futm_source%253Dannounce0515%2522%252C%2522announcementCount%2522%253A0%252C%2522announcementExpire%2522%253A3600000%257D; c_first_ref=www.baidu.com; c_first_page=https%3A//blog.csdn.net/qq_31285709/article/details/82380042; Hm_lvt_6bcd52f51e9b3dce32bec4a3997715ac=1590666612,1590674216,1590674423,1590674459; c_utm_medium=distribute.pc_relevant.none-task-blog-BlogCommendFromMachineLearnPai2-1.nonecase; c_ref=https%3A//edu.csdn.net/teach_course/detail/68; c_mod=popu_271; c_page_id=https%3A//so.csdn.net/so/search/s.do; dc_tos=qb1qsb; Hm_lpvt_6bcd52f51e9b3dce32bec4a3997715ac=1590677723'
        }
    )
    response = ur.urlopen(req).read().decode('utf-8', 'ignore')
    return response


if __name__ == '__main__':
    keyword = 'Python_'
    pn_start = 1
    pn_end = 10

    for page in range(pn_start, pn_end + 1):
        # 访问1级页面
        response = getRseponse(
            url='https://so.csdn.net/so/search/s.do?p={page}&q={keyword}&t=&viparticle=&domain=&o=&s=&u=&l=&f='.format(
                page=page, keyword=keyword)
        )
        # 二级页面，博客的链接
        hrefs = le.HTML(response).xpath(
            '//div[@class="search-list-con"]/dl//span[@class="mr16"]/../../dt/div/a[1]/@href')
        for href in hrefs:
            response_blog = getRseponse(
                url=href,
            )
            #title = le.HTML(response_blog).xpath('//h1[@class="title-article"]/text()')[0]
            title_s = le.HTML(response_blog).xpath('//h1[@class="title-article"]/text()')
            if not title_s:
                continue
            title = title_s[0]
            title = re.sub(
                r'[/\\:*"<>|?]', '', title
            )
            filepath = 'blog/%s.html' % title
            with open(filepath, 'w', encoding='utf-8') as f:
                f.write(response_blog)

            print(title)
