import urllib.request as ur
import user_agent
import lxml.etree as le


# 获取request函数
def getRequest(url):

    return ur.Request(
        url = url,
        headers={
            'User-Agent': user_agent.get_user_agent_pc(),
            'Cookie': 'acw_tc=2760823115711433602705780e725725395657e75df5b8cd6c6bbef98fe835; uuid_tt_dd=10_19289830350-1571143360784-741305; dc_session_id=10_1571143360784.456860; acw_sc__v2=5da5bec036444cdf8c364f3514c54b69c7c253c8; Hm_lvt_6bcd52f51e9b3dce32bec4a3997715ac=1571143363; Hm_ct_6bcd52f51e9b3dce32bec4a3997715ac=6525*1*10_19289830350-1571143360784-741305; firstDie=1; dc_tos=pzf2yj; Hm_lpvt_6bcd52f51e9b3dce32bec4a3997715ac=1571144636; c-login-auto=4',
        }
    )


# 获取response
def getResponse(request):
    return ur.urlopen(request).read().decode('utf-8')


keyword = input('请输入要查询的关键字：')
start_page = int(input('起始页码：'))
end_page = int(input('终止页码：'))

for page in range(start_page, end_page + 1):

    # 博客搜索页面的url配置，包括搜索项目以及页码范围
    url = 'https://so.csdn.net/so/search/s.do?p=%s&q=%s&t=blog&domain=&o=&s=&u=&l=&f=&rbg=0' % (page, keyword)
    print('url=', url)

    # 调用getRequest()函数获取request
    item_request = getRequest(url)

    item_response = getResponse(item_request)
    # print('response=', response)

    # 获取每个文章页面的url
    href_s = le.HTML(item_response).xpath('//span[@class="down fr"]/../span[@class="link"]/a/@href')
    print('href_s=', href_s)

    # 循环遍历href_f列表，获取每个页面的url
    for href in href_s:

        # 调用获取request
        try:

            blog_request = getRequest(href)

            blog_response = getResponse(blog_request)

            title = le.HTML(blog_response).xpath('//h1[@class="title-article"]/text()')[0]
            print('title=', title)

            with open('blog/%s.html'% title, 'w', encoding='utf-8') as f:
                f.write(blog_response)

        except Exception as e:
            print(e)

    # proxy_address = ur.urlopen('').read().encode('utf-8').strip()
    # proxy_handler = ur.ProxyHandler(
    #     {
    #         'http': proxy_address
    #     }
    # )
    # proxy_opener = ur.build_opener(proxy_handler)
    #
    # response = proxy_opener.open(request).read()
    #
    # href_s = le.HTML(response).xpath('//span[@class="down fr"]/../span[@class="link"]/a/@href')
    # print(href_s)
    #
    # for href in href_s:
    #     response_blog = proxy_opener.open(request).read()
    #     title = le.HTML(response_blog).xpath('//h1[@class="title-article"]/text()')[0]
    #     print(title)
    #     with open('blog/%s.html'% title, 'wb') as f:
    #         f.write(response_blog)
