import urllib.request as ur
import lxml.etree as le

def getRequest(url):
    return ur.Request(
        url=url,
        headers={
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.75 Safari/537.36'
        }
    )

def getProxyHander():
        proxyAddr = ur.urlopen(
            'http://t.11jsq.com/index.php/api/entry?method=proxyServer.generate_api_url&packid=0&fa=0&fetch_key=&qty=1&time=100&pro=&city=&port=1&format=txt&ss=1&css=&dt=1&specialTxt=3&specialJson=&usertype=16').read().decode('utf-8').strip()

        proxyHander = ur.ProxyHandler(
            {
                'http':proxyAddr
            }
        )
        return ur.build_opener(proxyHander)

# for i in range(2):
request = getRequest(
    'https://so.csdn.net/so/search/s.do?p=%s&q=%s&t=blog&domain=&o=&s=&u=&l=&f=&rbg=0' % (1, 'python')
)
response = getProxyHander().open(request).read()
# print('response===',response)
html = le.HTML(response)
print('html==',html)   # html== <Element html at 0x12016d04ec8>
href_s = html.xpath('//span[@class="down fr"]/../span[@class="link"]/a/@href')
for i in href_s:
    response_blog = getProxyHander().open(getRequest(i)).read()
    title = le.HTML(response_blog).xpath('//h1[@class="title-article"]/text()')[0]
    print(title)

