import urllib3
# 定义下载网页的方法
def download(url,user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:128.0) Gecko/20100101 Firefox/128.0',request_max=3):
    print("Starting Download, The URL is: {}".format(url))
    http = urllib3.PoolManager()
    # 保存的路径
    file_path = 'D:\\Crawl_Results\\2023top.html'
    try:
        # 发起GET请求
        headers = {'User-agent':user_agent}
        response = http.request('GET', url,headers=headers)
        if response.status == 200:
            htmlfile = response.data  # 或者 response.data.decode('utf-8') 如果需要字符串
            # 在这里处理 htmlfile，比如保存到文件或进行解析等
            # 读取响应内容为字符串
            html_content = response.data.decode('utf-8')
            # 将HTML内容写入到本地文件
            with open(file_path, 'w', encoding='utf-8') as file:
                file.write(html_content)
            return htmlfile  # 如果需要返回下载的内容
        else:
            print("遇到了错误，状态码是：{}".format(response.status))
            if request_max >= 0:
                download(url,user_agent,request_max-1)
    except urllib3.exceptions.HTTPError as e:
        print("An error occurred: {}".format(e))
    except Exception as ex:
        print("An error occurred: {}".format(ex))
    finally:
        response.release_conn()  # 确保资源被释放，如果需要的话

seed_url2="https://movie.douban.com/annual/2023/"
download(seed_url2)
