import urllib.request as ur

import ip_list
import user_agent
import lxml.etree as le

# https://so.csdn.net/so/search/s.do?p=2&q=python&t=blog&domain=&o=&s=&u=&l=&f=&rbg=0
'''
返回一个request对象.
使用header伪装策略访问网址
'''


def getRequest(url):
    return ur.Request(
        url=url,
        headers={
            'User-Agent': user_agent.get_user_agent_pc(),
            'Cookie': 'TY_SESSION_ID=a10a4b65-de90-4a8a-9206-8d9627be957e; JSESSIONID=62CFE8FF67EB70A0DAE5FA339E1B3FC0; uuid_tt_dd=10_36832054360-1516582551080-431999; ARK_ID=JS5ebee742bfd5f8a7ceaa734f048b9fe35ebe; _ga=GA1.2.795091608.1542351168; smidV2=201812141508172cadee73f3736e9bbdec1a81e5f736bd002b2a0dcb0fe8450; __yadk_uid=H7RFrwiqAyPDS98m581yaqVEt9KuTu2w; UM_distinctid=169800351632a6-014200b75daf85-4313362-144000-169800351651169; dc_session_id=10_1560126867411.936030; UserName=qq_35408086; UserInfo=f575835def074b56ac8183fb12729e76; UserToken=f575835def074b56ac8183fb12729e76; UserNick=qq_35408086; AU=884; UN=qq_35408086; BT=1563801032095; p_uid=U000000; Hm_ct_6bcd52f51e9b3dce32bec4a3997715ac=1788*1*PC_VC!5744*1*qq_35408086!6525*1*10_36832054360-1516582551080-431999; acw_tc=2760822215638464155398922e230fbc6cecaa50a8c583ce96408c48142f1e; Hm_lvt_eb5e3324020df43e5f9be265a8beb7fd=1566182075; Hm_ct_eb5e3324020df43e5f9be265a8beb7fd=5744*1*qq_35408086!6525*1*10_36832054360-1516582551080-431999; Hm_lvt_6bcd52f51e9b3dce32bec4a3997715ac=1566282847,1566282866,1566282874,1566282885; dc_tos=pwiwoe; Hm_lpvt_6bcd52f51e9b3dce32bec4a3997715ac=1566284271'

        }
    )


'''
返回一个opener对象.然后使用.open(request)即返回字符串
'''


def getProxyHandler():
    proxy_address = ur.urlopen(
        'http://piping.mogumiao.com/proxy/api/get_ip_bs?appKey=6f838edb44644cdb8fd965521f5c9d38&count=1&expiryDate=0&format=2&newLine=1').read().decode(
        'utf-8').strip()
    # 使用数据库的ip池
    proxy_address1 = ip_list.get_ip()
    print('=====',proxy_address1)
    proxy_hander = ur.ProxyHandler(
        {
            'http': proxy_address1,
        }
    )
    return ur.build_opener(proxy_hander)


keyword = input('输入关键词')
pn_start = int(input('输入开始页数'))
pn_end = int(input('输入结束页数'))

for pn in range(pn_start, pn_end + 1):
    request = getRequest(
        'https://so.csdn.net/so/search/s.do?p=%s&q=%s&t=blog&domain=&o=&s=&u=&l=&f=&rbg=0' % (pn, keyword)
    )
    try:
        response = getProxyHandler().open(request).read()
        href_s = le.HTML(response).xpath('//span[@class="down fr"]/../span[@class="link"]/a/@href')
        for href in href_s:
            try:
                response_blog = getProxyHandler().open(getRequest(href)).read()
                title = le.HTML(response_blog).xpath('//h1[@class="title-article"]/text()')[0]
                print(title)
                # with open('blog/%s.html' % title, 'wb')as f:
                #     f.write(response_blog)

                # 打开爬下来的网页会自动跳转到首页,打算单纯爬文章,但是文章那一块是列表,需要更多操作
                # content = le.HTML(response_blog).xpath('//article[@class="baidu_pl"]')
                # print(type(content))
                # with open('blogs/%s.html'%title,'wb')as f :
                #     f.write(content)
            except Exception as e:
                print("=======", e)
    except:
        pass
