from urllib.request import Request, urlopen
from urllib.parse import urlencode
from fake_useragent import UserAgent


def get_html(url):
    headers = {
        # "User-Agent": UserAgent().random
        "User-Agent": UserAgent().chrome
    }
    request = Request(url, headers=headers)
    response = urlopen(request)
    return response.read().decode()


def save_html(filename,html_bytes):
    with open(filename,"wb") as f:
        f.write(html_bytes.encode())


def main():
    content = input("请输入要下载的内容：")
    num = input("请输入您要下载多少页：")
    base_url = "https://so.csdn.net/so/search/s.do?{}"
    for i in range(int(num)):
        args = {
            "p": i,
            "q": content
        }
        filename = "D:\spider_file_content"+"\CSDN之" + str(content) + "第" + str(i+1) + "页.html"
        args = urlencode(args)
        print("正在下载"+filename)
        html_bytes = get_html(base_url.format(args))
        save_html(filename,str(html_bytes))
        # print("=================================分隔符======================================================")


if __name__ == '__main__':
    main()
