import urllib.request as ur
import lxml.etree as le
import re
import user_agent


def getResponse(url):
    req = ur.Request(
        url=url,
        headers={
            'Use-Agent': user_agent.get_user_agent_pc(),
            'Cookie': 'TY_SESSION_ID=aefa1eb1-f1a4-4a9b-b040-84a99a9cd401; JSESSIONID=ACAECACADDB3D50617BDE494EFC83CCD; uuid_tt_dd=10_17002952040-1586952773224-673718; dc_session_id=10_1586952773224.248718; Hm_ct_6bcd52f51e9b3dce32bec4a3997715ac=6525*1*10_17002952040-1586952773224-673718!1788*1*PC_VC!5744*1*weixin_43731151; UserName=weixin_43731151; UserInfo=96920829ed624ae58cb2df8c587473e5; UserToken=96920829ed624ae58cb2df8c587473e5; UserNick=%E9%BB%AF%E6%9C%88; AU=CD7; UN=weixin_43731151; BT=1586952783205; p_uid=U100000; Hm_up_6bcd52f51e9b3dce32bec4a3997715ac=%7B%22islogin%22%3A%7B%22value%22%3A%221%22%2C%22scope%22%3A1%7D%2C%22isonline%22%3A%7B%22value%22%3A%221%22%2C%22scope%22%3A1%7D%2C%22isvip%22%3A%7B%22value%22%3A%221%22%2C%22scope%22%3A1%7D%7D; __gads=ID=3f781f113913a902:T=1589125144:S=ALNI_MbyVwIe2Cohr3BnjKkZNNaafss5NA; dc_sid=28b5c1599179560dacc7acd657d9c715; Hm_lvt_6bcd52f51e9b3dce32bec4a3997715ac=1589792986,1589793485,1589794488,1589801389; c_first_page=https%3A//edu.csdn.net/; c_first_ref=default; c_page_id=default; announcement=%257B%2522isLogin%2522%253Atrue%252C%2522announcementUrl%2522%253A%2522https%253A%252F%252Fbss.csdn.net%252Fm%252Ftopic%252Flive_recruit%253Futm_source%253Dannounce0515%2522%252C%2522announcementCount%2522%253A0%252C%2522announcementExpire%2522%253A3600000%257D; __guid=129686286.4044704218408068600.1589801824170.1943; c_ref=https%3A//blog.csdn.net/; searchHistoryArray=%255B%2522html%2522%252C%2522python%2522%252C%2522Python%2522%255D; c_utm_term=html; c_utm_medium=distribute.pc_search_result.none-task-blog-2%7Eblog%7Esobaiduweb%7Edefault-0-81708229; monitor_count=7; dc_tos=qaj0dp; Hm_lpvt_6bcd52f51e9b3dce32bec4a3997715ac=1589803694',
        },
    )
    response = ur.urlopen(req).read()
    return response


if __name__ == '__main__':
    context = input('请输入你需要查询的主题：')
    start_page = int(input('请输入需要查询的起始页码：'))
    end_page = int(input('请输入你需要查询的结束页码：'))

    try:
        for page in range(start_page, end_page + 1):
            # 构建request对象,访问一级页面
            response = getResponse(
                url='https://so.csdn.net/so/search/s.do?p={page}&q={context}&t=blog&viparticle=&domain=&o=&s=&u=&l=&f=&rbg=0'.format(
                    page=page, context=context),
            )
            # 获得热门博客地址
            hrefs = le.HTML(response).xpath(
                '//div[@class="search-list-con"]/dl//span[@class="mr16"]/../../dt/div/a[1]/@href')
            # 开始访问热门博客主题，并下载
            for href in hrefs:
                # 访问二级页面，即热门博客
                response_blog = getResponse(
                    url=href,
                )
                # 获得二级页面的博客主题名字
                title = le.HTML(response_blog).xpath('//h1[@class="title-article"]/text()')[0]
                # 删除文件名字不允许出现的字符，注意\需要加\用于转义
                title = re.sub(
                    r'[/\\:*"<>|?]', '', title
                )
                filepath = 'blog/%s.html' % title
                with open(filepath, 'wb') as f:
                    f.write(response_blog)
                print(title)
    except Exception as e:
        print(e)
