import urllib.parse as up
import lxml.etree as le
from get_url_response import geturl

if __name__ == '__main__':
    word = input('请输入关键词：')
    # word = 'Python爬虫'
    # word = word.strip(" ")
    data = {
        # 'p': '1',
        'q': word,
        't': 'blog',
        'o': '',
        'u': '',
    }
    data_url = up.urlencode(data)

    # Search for the first 10 pages of hot articles
    start_page = int(input('请输入起始页码：'))
    end_page = int(input('请输入终止页码：'))
    for pg in range(start_page,end_page+1):
        url = 'https://so.csdn.net/so/search/s.do?p={}&{}'.format(pg,data_url)
        # print(url)
        try:
            response = geturl(url)

            href_s = le.HTML(response).xpath('//div[contains(@class,"search-list-con")]/dl/dt/div/a[1]/@href')
            for href in href_s:
                blog_page = geturl(href)
                title = le.HTML(blog_page).xpath('//h1[@class="title-article"]/text()')[0]
                # print(title)
                filename = 'blog_' + str(title).replace(":;\n\\/*\"\'<>?|", ' ') + '.html'
                with open(filename,'wb') as f:
                    f.write(blog_page)

                print(filename)
        except:
            print("Errors occurred!")

    print("End of the list!")


