import urllib.request as ur
import urllib.parse as up
import lxml.etree as le
from proxy import proxy_opener, proxy_handler
import re


def getResponse(url):
    req = ur.Request(
        url=url,
        headers={
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3775.400 QQBrowser/10.6.4208.400',
            'Cookie': 'TY_SESSION_ID=dc8e5446-9d31-4dea-9cfe-d575e8672586; JSESSIONID=014B7FACDC57BC246FBA67A16F7F8306; uuid_tt_dd=10_10351383310-1597660145281-809420; dc_session_id=10_1597660145281.154708; UN=weixin_44549563; announcement=%257B%2522isLogin%2522%253Atrue%252C%2522announcementUrl%2522%253A%2522https%253A%252F%252Flive.csdn.net%252Froom%252Fyzkskaka%252Fats4dBdZ%253Futm_source%253D908346557%2522%252C%2522announcementCount%2522%253A0%257D; Hm_ct_6bcd52f51e9b3dce32bec4a3997715ac=6525*1*10_10351383310-1597660145281-809420!5744*1*weixin_44549563; dc_sid=247da785a74ce6a39f6f89781f1a22c2; c_first_page=https%3A//www.csdn.net/; c_segment=12; Hm_lvt_6bcd52f51e9b3dce32bec4a3997715ac=1598489443,1598514458,1598517556,1598520548; p_uid=U110000; c_first_ref=mycollege.html; Hm_up_facf15707d34a73694bf5c0d571a4a72=%7B%22islogin%22%3A%7B%22value%22%3A%221%22%2C%22scope%22%3A1%7D%2C%22isonline%22%3A%7B%22value%22%3A%221%22%2C%22scope%22%3A1%7D%2C%22isvip%22%3A%7B%22value%22%3A%221%22%2C%22scope%22%3A1%7D%2C%22uid_%22%3A%7B%22value%22%3A%22weixin_44549563%22%2C%22scope%22%3A1%7D%7D; Hm_ct_facf15707d34a73694bf5c0d571a4a72=6525*1*10_10351383310-1597660145281-809420!5744*1*weixin_44549563; Hm_lvt_facf15707d34a73694bf5c0d571a4a72=1598581707,1598581891; Hm_lpvt_facf15707d34a73694bf5c0d571a4a72=1598581891; c_ref=https%3A//blog.csdn.net/; c_utm_medium=distribute.pc_search_result.none-task-blog-2%7Eall%7Efirst_rank_ecpm_v3%7Epc_rank_v2-1-37607971.first_rank_ecpm_v3_pc_rank_v2; c_utm_term=java; c_page_id=default; log_Id_click=6; SESSION=ef900e87-539b-4c74-9cb2-a7db82325724; UserName=weixin_44549563; UserInfo=e51e6feb0607444785ed5cea90bf6374; UserToken=e51e6feb0607444785ed5cea90bf6374; UserNick=%E3%81%A5%E5%AE%89%E7%9C%A0%E4%B8%B6%E4%B9%90%E7%81%AC; AU=612; BT=1598598251970; Hm_up_6bcd52f51e9b3dce32bec4a3997715ac=%7B%22islogin%22%3A%7B%22value%22%3A%221%22%2C%22scope%22%3A1%7D%2C%22isonline%22%3A%7B%22value%22%3A%221%22%2C%22scope%22%3A1%7D%2C%22isvip%22%3A%7B%22value%22%3A%221%22%2C%22scope%22%3A1%7D%2C%22uid_%22%3A%7B%22value%22%3A%22weixin_44549563%22%2C%22scope%22%3A1%7D%7D; searchHistoryArray=%255B%2522python%2522%252C%2522java%2522%255D; dc_tos=qfrib6; log_Id_pv=36; is_advert=1; Hm_lpvt_6bcd52f51e9b3dce32bec4a3997715ac=1598598259; log_Id_view=90'
        }
    )
    proxy_opener = ur.build_opener(proxy_handler)
    response = proxy_opener.open(req).read()
    return response


if __name__ == '__main__':
    keyword = input('请输入关键词:')
    pn_start = int(input('起始页:'))
    pn_end = int(input('终止页:'))
    for page in range(pn_start, pn_end + 1):
        # 访问 1 级页面
        response = getResponse(
            url='https://so.csdn.net/so/search/s.do?q={keyword}&t=all&platform=pc&p={page}&pageSize=30'.format(
                page=page, keyword=keyword)
        )
        with open('text.html', 'wb', ) as f:
            f.write(response)
        href_s = le.HTML(response).xpath('//div[@class="search-list-con"]/dl/dt//a[1]/@href')
        # 2 级页面
        for href in href_s:
            response_blog = getResponse(
                url=href
            )
            if le.HTML(response_blog).xpath('//h1[contains(@class,"title")]/text()'):
                title = le.HTML(response_blog).xpath('//h1[contains(@class,"title")]/text()')[0]
                title = re.sub(
                    r'[/\\:*"<>|?]', '', title
                )
                if title:
                    filepath = 'blog/{title}.html'.format(title=title)
                    with open(filepath, 'wb', ) as f:
                        f.write(response_blog)
                        print(title)