import lxml.etree as le
import urllib.request as ur
import urllib.parse as up
import urllib.error as ue
import os
import re


# 访问url得到response对象
def getResponse(url):
    req = ur.Request(
        url=url,
        headers={
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:74.0) Gecko/20100101 Firefox/74.0',
            'Cookie': 'uuid_tt_dd=10_30828397030-1585995362270-648854; dc_session_id=10_1585995362270.455629; Hm_lvt_6bcd52f51e9b3dce32bec4a3997715ac=1586093574,1586136416,1586136428,1586136436; Hm_ct_6bcd52f51e9b3dce32bec4a3997715ac=6525*1*10_30828397030-1585995362270-648854!5744*1*ielxd1989; __gads=ID=8446789263ead0aa:T=1586093576:S=ALNI_MYkNh4myO6u6zN34HwYMojNBR0G2w; UserName=ielxd1989; UserInfo=3d9fdcc4f2a9408bb6f8f5ef3e99c72c; UserToken=3d9fdcc4f2a9408bb6f8f5ef3e99c72c; UserNick=%E5%B9%B3%E5%B8%B8%E5%BF%83006; AU=15A; UN=ielxd1989; BT=1586094166682; p_uid=U000000; searchHistoryArray=%255B%2522csdn%2522%252C%2522java%2522%252C%2522ffff%2522%255D; dc_sid=a5f2dc23634e319772dc22e7cf63b4c9; Hm_lpvt_6bcd52f51e9b3dce32bec4a3997715ac=1586136535; TY_SESSION_ID=17b0f0c4-48b0-4dac-beba-4415b6144175; c_ref=http%3A//localhost%3A63342/%25E7%25AC%25AC%25E4%25BA%258C%25E7%25AB%25A0%25E4%25BD%259C%25E4%25B8%259A2/python%25E6%258A%2580%25E6%259C%25AF_articles/%2520Python%2520%25E6%258A%2580%25E6%259C%25AF%25E7%2582%25B9.html%3F_ijt%3Dcmjbrf2ueblp9kplbmge82hbbi; SESSION=cb9077d2-c383-4a28-ab0e-dfa3e7ba5f7f; dc_tos=q8ces6; announcement=%257B%2522isLogin%2522%253Atrue%252C%2522announcementUrl%2522%253A%2522https%253A%252F%252Fblog.csdn.net%252Fblogdevteam%252Farticle%252Fdetails%252F105203745%2522%252C%2522announcementCount%2522%253A0%252C%2522announcementExpire%2522%253A3600000%257D; utm_source=distribute.pc_search_result.none-task-blog-blog_SOOPENSEARCH-15'
        }
    )

    return ur.urlopen(req)


def getXpathResult(resp_body_str, xpath_pattern):
    html = le.HTML(resp_body_str)
    return html.xpath(xpath_pattern)


def getUrlParam(page, kw):
    return up.urlencode(
        {
            'p': page,
            'q': kw,
            't': 'blog',
            'viparticle': "",
            'domain': "",
            'o': "",
            's': "",
            'u': "",
            'l': "",
            'f': "",
            'rbg': 0,
        }
    )


if __name__ == '__main__':
    url_base = 'https://so.csdn.net/so/search/s.do?'
    kw = input('搜索关键字:')
    page_beg = int(input('起始页'))
    page_end = int(input('结束页'))

    root_path = kw + "_articles"

    if not os.path.exists(root_path):
        os.makedirs(root_path, exist_ok=True)

    if not os.path.isdir(root_path):
        print(root_path, 'exists, but not a dir')
        exit(1)

    for page in range(page_beg, page_end + 1):
        # 得到一级页面
        url_param = getUrlParam(page, kw)
        print(url_param)
        search_result_page = getResponse(url_base + url_param)

        # 得到当前搜索结果页面中，有阅读量的文章的网址
        article_urls = getXpathResult(search_result_page.read().decode('utf-8'),
                                      '//span[@class="mr16"]/../../dt/div/a[1]/@href')

        for article_url in article_urls:
            try:
                article_page = getResponse(article_url)
            except ue.HTTPError as e:
                print(e)
                continue

            resp_body_str = article_page.read().decode('utf-8')
            title = getXpathResult(resp_body_str, '//h1[@class="title-article"]/text()')[0]

            title = re.sub('[/:\\\\*"<>|,\\?]', ' ', title)

            file_path = os.path.join(root_path, title + ".html")
            print(file_path)

            with open(file_path, 'w', encoding='utf-8') as f:
                f.write(resp_body_str)
