import urllib3
import re
import chardet
import gzip

http = urllib3.PoolManager()
# 保存的路径
file_path = 'D:/Crawl_Results'
# 定义下载网页的方法
def download(url,user_agent='wswp',request_max=3):
    print("Starting Download, The URL is: {}".format(url))
    # 发起GET请求
    headers = {'User-agent':user_agent}
    response = http.request('GET', url,headers=headers)
    try:

        if response.status == 200:
            htmlfile = response.data  # 或者 response.data.decode('utf-8') 如果需要字符串
            # 在这里处理 htmlfile，比如保存到文件或进行解析等
            #html_content = response.data.decode('utf-8')
            return htmlfile  # 如果需要返回下载的内容
        else:
            print("遇到了错误，状态码是：{}".format(response.status))
            if request_max >= 0:
                download(url,user_agent,request_max-1)
    except urllib3.exceptions.HTTPError as e:
        print("An error occurred: {}".format(e))
    except Exception as ex:
        print("An error occurred: {}".format(ex))
    finally:
        response.release_conn()  # 确保资源被释放，如果需要的话

#download('http://www.meetup.com')

def crawl_do(url):
    sitemap = download(url)
    if not sitemap == None:
        sitemap_decompress = gzip.decompress(sitemap)
        result = chardet.detect(sitemap_decompress)
        encoding = result['encoding']
        print("这个sitemap的编码是{}".format(encoding))
        with open("D:/Crawl_Results/sitemap.xml","wb") as sitemapfile:
             sitemapfile.write(sitemap_decompress)
        if not encoding == None or encoding == "":
            sitemap_str = sitemap_decompress.decode(encoding)
            links = re.findall('<loc>(.*?)</loc>',sitemap_str)
            if links == None or len(links)==0:
                print("一条链接也没有获取到")
            else:
                count = 0
                for link in links:
                    count = count + 1
                    print("链接==>{}".format(link))
                    file_save_path = "{}/htmls/{}.html".format(file_path,count)
                    html = download(link)
                    if not html == None:
                        # 将HTML内容写入到本地文件
                        with open(file_save_path, 'wb') as file:
                            file.write(html)
        else:
            print("这个sitemap里面啥都没有")
    else:
        print("这个网站没有sitemap，换个地方")

crawl_do('https://www.douban.com/sitemap_updated.xml.gz')
