import urllib.request as ur
import lxml.etree as le
import user_agent

# 为用户定制输入框，定义三个变量，分别为用户搜索的关键词，和页码范围
kw = input('请输入搜索的关键词:')
pn_start = int(input('请输入起始页：'))
pn_end = int(input('请输入终止页：'))

# 获取一级页面的url
def getRequest(url):
    return ur.Request(
        url=url,
        # 对请求头User-Agent进行封装
        headers={
            'User-Agent': user_agent.get_user_agent_pc(),
            'Cookie':'acw_tc=2760821215760416377505673ee7959209d4396b127f7ed00d435788835f58; acw_sc__v2=5df07ca5776e2122e7dc173b113a363a14ce0d14; uuid_tt_dd=10_29485821600-1576041638364-588214; dc_session_id=10_1576041638364.367541; dc_tos=q2c1id; Hm_lvt_6bcd52f51e9b3dce32bec4a3997715ac=1576041639; Hm_lpvt_6bcd52f51e9b3dce32bec4a3997715ac=1576041639; Hm_ct_6bcd52f51e9b3dce32bec4a3997715ac=6525*1*10_29485821600-1576041638364-588214; c-login-auto=1; firstDie=1; announcement=%257B%2522isLogin%2522%253Afalse%252C%2522announcementUrl%2522%253A%2522https%253A%252F%252Fblogdev.blog.csdn.net%252Farticle%252Fdetails%252F103053996%2522%252C%2522announcementCount%2522%253A0%252C%2522announcementExpire%2522%253A3600000%257D'
        }
    )

# 配置代理IP
def getProxyOpener():
    # proxy_address = ur.urlopen(
    #     'http://api.ip.data5u.com/dynamic/get.html?order=3e843f7eb3c23db4be351f3abb9a16f6&sep=3').read().decode(
    #     'utf-8').strip()
    # proxy_handler = ur.ProxyHandler(
    #     {
    #         'http': proxy_address
    #     }
    # )
    return ur.build_opener()



# 对页码进行循环

for pn in range(pn_start,pn_end+1):
    request = getRequest(
        "https://so.csdn.net/so/search/s.do?p=%s&q=%s&t=&viparticle=&domain=&o=&s=&u=&l=&f=" % (pn,kw)
    )
    response = getProxyOpener().open(request).read()

    # response加载为xlm类型
    href_s = le.HTML(response).xpath('//span[@class="down fr"]/../span[@class="link"]/a/@href')
    # print(href_s)
    # 定义for循环，依次访问每个链接,并得到对应的标题
    for href in href_s:
        # 获得链接
        response_blog = getProxyOpener().open(
            getRequest(href)
        ).read()
        # 获得标题
        title = le.HTML(response_blog).xpath('//h1[@class="title-article"]/text()')
        # print(title)
        with open('blog/%s.html' % title,'wb') as f:
            f.write(response_blog)




