import urllib.request as ur
import urllib.parse as up
import lxml.etree as le

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                  'Chrome/80.0.3987.149 Safari/537.36',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,'
              'application/signed-exchange;v=b3;q=0.9',
    'Accept-Language': 'zh-CN,zh;q=0.9',
    'Cookie': 'TY_SESSION_ID=5677dd93-0b41-4f88-bada-6ffbbb890628; JSESSIONID=7F2C1D74CFF868DEE5C9A2C91860C7E6; '
              'dc_sid=7a6915c9a1ee40bb55bce5268dbd083f; '
              'c_ref=https%3A//blog.csdn.net/xufive/article/details/105215593; '
              'Hm_lpvt_6bcd52f51e9b3dce32bec4a3997715ac=1585811082; '
              'Hm_ct_6bcd52f51e9b3dce32bec4a3997715ac=6525*1*10_29367302380-1585105204028-498288; '
              'uuid_tt_dd=10_29367302380-1585105204028-498288; dc_tos=q85fnt; dc_session_id=10_1585105204028.397212; '
              'c-login-auto=10; searchHistoryArray=%255B%2522c%2523%25E8%25AF%25AD%25E8%25A8%2580%2522%252C'
              '%2522Python%25E8%25BF%259B%25E9%2598%25B6%25E4%25B9%258B%25E8%25B7%25AF%2522%252C%2522python%2522%252C'
              '%2522dfadfg%2522%255D; __gads=ID=272602d8edcca5b0:T=1585105206:S=ALNI_Mb9WjR7aQqxV6wDsbkMO-0SpZFb7g; '
              'c-toolbar-writeguide=1; announcement=%257B%2522isLogin%2522%253Afalse%252C%2522announcementUrl%2522'
              '%253A%2522https%253A%252F%252Fblog.csdn.net%252Fblogdevteam%252Farticle%252Fdetails%252F105203745%2522'
              '%252C%2522announcementCount%2522%253A1%252C%2522announcementExpire%2522%253A26754308%257D; '
              'Hm_lvt_6bcd52f51e9b3dce32bec4a3997715ac=1585105207,1585798830,1585808382',
}


def save_blog_detail(url):
    # 获取返回html数据
    request_blog = ur.Request(url=url, method='GET', headers=headers)
    html_blog = ur.urlopen(request_blog).read().decode('utf-8')
    # 将HTML转换为XML
    html_blog_x = le.HTML(html_blog)
    # 获取class='title-article'的h1节点
    blog_title = html_blog_x.xpath('//h1[@class="title-article"]/text()')[0]
    # 替换文件名关键字
    blog_title = blog_title.replace('"', '_').replace('\\', '_').replace('/', '_').replace('*', '_').replace('?', '_')
    blog_title = blog_title.replace(':', '_').replace('<', '_').replace('>', '_').replace('|', '_')
    # 保存html文件
    f = open(blog_title + '.html', 'w', encoding='utf-8')
    f.write(html_blog)
    f.close()
    print('[' + blog_title + '.html] save success!')


if __name__ == '__main__':
    word = input('请输入查询关键字：')
    data = {'p': 1, 'q': word, 't': 'blog', 'viparticle': '', 'domain': '', 'o': '', 's': '', 'u': '', 'l': '', 'f': '',
            'rbg': 0}
    data_url = up.urlencode(data)
    request = ur.Request(url='https://so.csdn.net/so/search/s.do?' + data_url, method='GET', headers=headers)
    # 获取返回html数据
    html = ur.urlopen(request).read().decode('utf-8')
    # 将HTML转换为XML
    html_x = le.HTML(html)
    # 获取class='limit_width'的div节点
    div_blog = html_x.xpath('//div[@class="limit_width"]')
    count = 1
    # 遍历节点
    for div in div_blog:
        # 获取节点下的a子节点href属性值
        blog_url = div.xpath('./a[1]/attribute::href')[0]
        # 爬取并保存blog内容
        save_blog_detail(blog_url)
        count = count + 1
        # 爬10个
        if count > 10:
            break
