import urllib.request as ur
import lxml.etree as le
import re

url = 'https://so.csdn.net/so/search/s.do?p={}&q={}&t=&viparticle=&domain=&o=&s=&u=&l=&f='.format(2, 'java')


def getRequest(url):
    return ur.Request(
        url=url,
        headers={
            'User_Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36'
        }
    )


def getProxyOpener():
    proxy_address = ur.urlopen('http://api.ip.data5u.com/dynamic/get.html?order=844f5d97771f73ced2136f0f85c1125d&sep=3').read().decode('utf-8').strip()
    proxy_handler = ur.ProxyHandler(
        {
            'http':proxy_address
        }
    )
    return ur.build_opener(proxy_handler)

if __name__ == '__main__':
    keyword = input('搜索词:')
    pn_start = int(input('起始:'))
    pn_end = int(input('终止:'))

for page in range(pn_start, pn_end + 1):
    # 访问1级页面
    request = getRequest(
        url='https://so.csdn.net/so/search/s.do?p={}&q={}&t=&viparticle=&domain=&o=&s=&u=&l=&f='.format(page, keyword)
    )
    # 访问2级页面
    try:
        response = getProxyOpener().open(request).read()
        hrefs = le.HTML(response).xpath('//div[@class="search-list-con"]/dl//span[@class="mr16"]/../../dt/div/a[1]/@href')
        for href in hrefs:
            try:
                response_blog = getProxyOpener().open(getRequest(href)).read()
                title = le.HTML(response_blog).xpath('//h1[@class="title-article"]/text()')[0]
                title = re.sub(
                    r'[/\\:*"<>|?]', '', title
                )
                print(title)
                filepath = 'blog/%s.html' % title
                with open(filepath, 'wb') as f:
                    f.write(response_blog)
            except Exception as e:
                print(e)
    except:pass
