import itertools

import urllib3

http = urllib3.PoolManager()

# 定义下载网页的方法
def download(url,user_agent='wswp',request_max=3):
    print("Starting Download, The URL is: {}".format(url))
    # 发起GET请求
    headers = {'User-agent':user_agent}
    response = http.request('GET', url,headers=headers)
    try:
        if response.status == 200:
            htmlfile = response.data  # 或者 response.data.decode('utf-8') 如果需要字符串
            # 在这里处理 htmlfile，比如保存到文件或进行解析等
            #html_content = response.data.decode('utf-8')
            return htmlfile  # 如果需要返回下载的内容
        else:
            print("遇到了错误，状态码是：{}".format(response.status))
            if request_max >= 0:
                download(url,user_agent,request_max-1)
    except urllib3.exceptions.HTTPError as e:
        print("An error occurred: {}".format(e))
    except Exception as ex:
        print("An error occurred: {}".format(ex))
    finally:
        response.release_conn()  # 确保资源被释放，如果需要的话

#download('http://www.meetup.com')

def crawl_do(url):
    max_errors = 5
    num_errors = 0
    for page in itertools.count(1):
        target_url = url
        # 保存的路径
        file_path = 'D:/Crawl_Results'
        target_url = "{}/e0{}".format(target_url,page)
        print("Crawling Url==>{}".format(target_url))
        html = download(target_url)
        file_save_path = "{}/htmls/{}.html".format(file_path,page)
        if html == None:
            print("啥也没有")
            num_errors += 1
            if num_errors >= max_errors:
                break
        else:
            print("有了")
            with open(file_save_path,'wb') as file:
                file.write(html)
            num_errors = 0

crawl_do('http://www.spiderbuf.cn/playground')
