import urllib.request as ur
import lxml.etree as le
import user_agent
keyword=input('输入关键词')
pn_start=int(input('起始页'))
pn_end=int(input('终止页'))
def get_request(url):
    return ur.Request(
        url=url,
        headers={
            'User-Agent': user_agent.get_user_agent_pc(),
        }
    )
def getproxyopener():
    proxy_add = ur.urlopen(
        'http://api.ip.data5u.com/dynamic/get.html?order=9517ca30b02cf3af19aa4283ba88898e&sep=4').read().decode(
        'utf-8').strip()
    proxy_handler = ur.ProxyHandler(
        {
            'http': proxy_add
        }
    )
    return ur.build_opener(proxy_handler)

for pn in range(pn_start,pn_end+1):
    url='https://so.csdn.net/so/search/s.do?p=%s&q=%s&t=blog&viparticle=&domain=&o=&s=&u=&l=&f=&rbg=0'%(pn,keyword)
    request=get_request(url)
    try:
        response=getproxyopener().open(request).read()
        href_s=le.HTML(response).xpath('//dt/div/a[1]/@href')#打印所有博客，实在不知道什么算热门，什么不算热门
        for href in href_s:
            try:
                response_blog = getproxyopener().open(
                    get_request(href)
                ).read()
                title = le.HTML(response_blog).xpath('//h1[@class="title-article"]/text()')[0]
                print(title)
                with open('blog%s.html' % title, 'wb') as f:
                    f.write(response_blog)
            except Exception as e:
                print(e)
    except:pass
