import urllib.error as error
import urllib.request as request
import re

def download(url, user_agent='wswp', num_retries=2):
    print('Dwonloading:' + url)
    headers = {'User-agent': user_agent}
    req = request.Request(url, headers=headers)
    try:
        html = request.urlopen(req).read()
    except error.HTTPError as e:
        print('Download error:', e.reason)
        html = None
        if num_retries > 0:
            if hasattr(e, 'code') and 500 <= e.code < 600:
                return download(url, num_retries-1)
    return html


def crawl_sitemap(url):
    # 下载网站地图 Sitemap文件
    sitemap = download(url)
    # 解析链接
    links = re.findall(r'<url>(.*?)</url>', str(sitemap))
    # 下载所有链接
    for link in links:
        html = download(link)
        with open('download/sitemap.txt','a') as sitemap:
            sitemap.writelines(html)
            sitemap.writelines('\n\n')
