import urllib.request as ur
import lxml.etree as le
import user_agent

keyword = input('请输入关键词:')
pn_start = int(input('起始页:'))
pn_end = int(input('终止页:'))

def getRequest(url):
    return ur.Request(
        url=url,
        headers={
            'User-Agent': user_agent.get_user_agent_pc(),
            'Cookie':'uuid_tt_dd=10_21018046110-1578648542486-322152; dc_session_id=10_1578648542486.617614; Hm_lvt_6bcd52f51e9b3dce32bec4a3997715ac=1578648543; Hm_ct_6bcd52f51e9b3dce32bec4a3997715ac=6525*1*10_21018046110-1578648542486-322152; announcement=%257B%2522isLogin%2522%253Afalse%252C%2522announcementUrl%2522%253A%2522https%253A%252F%252Fblog.csdn.net%252Fblogdevteam%252Farticle%252Fdetails%252F103603408%2522%252C%2522announcementCount%2522%253A0%252C%2522announcementExpire%2522%253A3600000%257D; SESSION=ad59e784-9bac-4496-93ee-18bb3afdcf97; acw_tc=2760822815786486504635917e2990463c5d66d8ce2225fbd31731813e8d4d; acw_sc__v2=5e18444aa7599283de0cf0ec351ac0e82b0cf574; acw_sc__v3=5e18444ae36f84f42c56066e406b497708a09d9f; TY_SESSION_ID=f20f1ba2-8c67-4a93-a857-d65b59c281b7; searchHistoryArray=%255B%2522python%2522%255D; utm_source=jiansuoso; dc_tos=q3vxbf; Hm_lpvt_6bcd52f51e9b3dce32bec4a3997715ac=1578648940; c-login-auto=2'
        }
    )

def getProxyOpener():
    proxy_address = ur.urlopen('http://api.ip.data5u.com/dynamic/get.html?order=d231b3b0a57410b2c011f9fe5ba0159f&sep=3').read().decode('utf-8').strip()
    proxy_handler = ur.ProxyHandler(
        {
            'http':proxy_address
        }
    )
    return ur.build_opener(proxy_handler)


for pn in range(pn_start,pn_end+1):
    request = getRequest(
        'https://so.csdn.net/so/search/s.do?p=%s&q=%s&t=blog&domain=&o=&s=&u=&l=&f=&rbg=0' % (pn,keyword)
    )
    try:
        response = getProxyOpener().open(request).read()
        print(response)
        href_s = le.HTML(response).xpath('//div[@class="limit_width"]/a[position()=1]/@href')
        print(href_s)
        for href in href_s:
            try:
                response_blog = getProxyOpener().open(
                    getRequest(href)
                ).read()
                title = le.HTML(response_blog).xpath('//h1[@class="title-article"]/text()')[0]
                print(title)
                with open('blog/%s.html' % title,'wb') as f:
                    f.write(response_blog)
            except Exception as e:
                print(e)
    except:pass



