import urllib.request as ur
import lxml.etree as le
import user_agent

keyword = input('请输入关键词:')
pn_start = int(input('起始页:'))
pn_end = int(input('终止页:'))

def getRequest(url):        # 定义要请求的url + 代理
    return ur.Request(
        url=url,
        headers={
            'User-Agent':user_agent.get_user_agent_pc(),
        }
    )

for pn in range(pn_start,pn_end+1):    # 循环读取页面
    k = 0  # 定义一个变量
    request = getRequest(
        'https://so.csdn.net/so/search/s.do?p=%s&q=%s&t=blog&domain=&o=&s=&u=&l=&f=&rbg=0' % (pn,keyword)  # 将要读取的页面和查询的关键字写入url
    )
    try:
        response=ur.urlopen(request).read() #获取response

        print(response)
        href_s = le.HTML(response).xpath('//dl/dd[@class="author-time"]/span[@class="mr16"]/../../dt/div/a/@href')  # 从response爬取url
        for href in href_s:  # 逐个读取每个url
            k = k+1
            if k % 2 == 0 :  #处理url
                try:
                    response_blog = ur.urlopen(
                        getRequest(href)
                    ).read()  # 读取url，获取response
                    title = le.HTML(response_blog).xpath('//h1[@class="title-article"]/text()')[0]  # 获得标题
                    print(title)
                    with open('blog/%s.html' % title,'wb') as f:  # 将读取的url内容保存为html
                        f.write(response_blog)
                except Exception as e:
                    print(e)
    except Exception as e:
        print(e)



