import urllib.request as ur
import user_agent
import lxml.etree as le


keyword = input('请输入要查询的关键字：')
start_page = int(input('起始页码：'))
end_page = int(input('终止页码：'))

for page in range(start_page, end_page + 1):
    url = 'https://so.csdn.net/so/search/s.do?p=%s&q=%s&t=blog&domain=&o=&s=&u=&l=&f=&rbg=0' % (page, keyword)
    request = ur.Request(
        url= url,
        headers={
            'User-Agent': user_agent.get_user_agent_pc(),
        }
    )

    proxy_address = ur.urlopen('').read().encode('utf-8').strip()
    proxy_handler = ur.ProxyHandler(
        {
            'http': proxy_address
        }
    )
    proxy_opener = ur.build_opener(proxy_handler)

    response = proxy_opener.open(request).read()

    href_s = le.HTML(response).xpath('//span[@class="down fr"]/../span[@class="link"]/a/@href')
    print(href_s)

    for href in href_s:
        response_blog = proxy_opener.open(request).read()
        title = le.HTML(response_blog).xpath('//h1[@class="title-article"]/text()')[0]
        print(title)
        with open('blog/%s.html'% title, 'wb') as f:
            f.write(response_blog)
