import urllib.request as ur
import user_agent
import lxml.etree as le

keyword = input('请输入关键字：')
pn_start = int(input('起始页:'))
pn_end = int(input('结束页:'))


# 创建request对象
def getRequest(url):
    return ur.Request(
        url=url,
        headers={
            'User-Agent': user_agent.get_user_agent_pc(),
            'Cookie': 'uuid_tt_dd=10_37481512220-1569308388465-175433; dc_session_id=10_1569308388465.523847; __yadk_uid=7zbhC31P4CzO6Nlh8AEzaWd1LICJIU31; smidV2=2019092416110425e145c7547d37c2cc42cafd9820c76a006e41e5b2b9608f0; UserName=weixin_37865353; UserInfo=90fb3b34efc7418889ee545fd6456f93; UserToken=90fb3b34efc7418889ee545fd6456f93; UserNick=Jason_cd91; AU=2A4; UN=weixin_37865353; BT=1572178872858; p_uid=U100000; Hm_ct_6bcd52f51e9b3dce32bec4a3997715ac=6525*1*10_37481512220-1569308388465-175433!1788*1*PC_VC!5744*1*weixin_37865353; acw_tc=2760820915749300245292902e5151d6f3fe17c266a6b9a06a5ab1e708bd9e; Hm_lvt_6bcd52f51e9b3dce32bec4a3997715ac=1574903656,1574926220,1575460047; firstDie=1; TY_SESSION_ID=99202988-00ed-4b9e-90f9-7c6f89bc2db0; announcement=%257B%2522isLogin%2522%253Atrue%252C%2522announcementUrl%2522%253A%2522https%253A%252F%252Fblogdev.blog.csdn.net%252Farticle%252Fdetails%252F103053996%2522%252C%2522announcementCount%2522%253A0%252C%2522announcementExpire%2522%253A3600000%257D; acw_sc__v2=5de7d1010bcb0f16f32819fd78499598a873c8ba; dc_tos=q1zutp; Hm_lpvt_6bcd52f51e9b3dce32bec4a3997715ac=1575473102'
        }

    )

#创建opener对象
def getProxyOpener():
    proxy_address = ur.urlopen(
        'http://api.ip.data5u.com/dynamic/get.html?order=c0214fd6861dc41ef74765aa845a32b3&sep=4').read().decode(
        'utf-8').strip()
    proxy_handler = ur.ProxyHandler(
        {
            'http': proxy_address
        }
    )
    return ur.build_opener(proxy_handler)


for pn in range(pn_start,pn_end+1):
    # 循环访问CSDN博客页面
    request = getRequest('https://so.csdn.net/so/search/s.do?p=%s&q=%s&t=blog&viparticle=&domain=&o=&s=&u=&l=&f=&rbg=0'%(pn,keyword))

    try:
        response = getProxyOpener().open(request).read()
        #xpath获取热门文章链接
        href_s=le.HTML(response).xpath('//span[@class="down fr"]/../span[@class="link"]/a/@href')
        print(href_s)

        #提取每一页热门文章的标题并把热门文章下载到blog文件夹
        for href in href_s:
            try:
                response_blog = getProxyOpener().open(getRequest(href)).read()
                #提取标题
                title = le.HTML(response_blog).xpath('//h1[@class="title-article"]/text()')[0]
                with open('blog/%s.html' % title,'wb') as f:
                    f.write(response_blog)
            except Exception as e:
                print(e)
    except Exception as e:
        print(e)