import requests, os, bs4
url = 'https://xkcd.com/'
if not os.path.exists('xkcd'):
    os.mkdir('xkcd')
while not url.endswith('#'):
    #下载页面
    print('downloading page %s...' % url)
    res = requests.get(url)
    #如果下载出现异常 抛出异常 停止下载
    res.raise_for_status()

    soup = bs4.BeautifulSoup(res.text)

    #找到页面中图片的url
    comicElem = soup.select('#comic img')
    if comicElem == []:
        print('not find')
    else:
        comicUrl = 'https:' + comicElem[0].get('src')

        #下载图片
        print('downloading img %s....'%(comicUrl))
        res = requests.get(comicUrl)
        res.raise_for_status()

        #保存图片
        imgFile = open(os.path.join('xkcd', os.path.basename(comicUrl)), 'wb')
        for chunk in res.iter_content(100000):
            imgFile.write(chunk)
        imgFile.close()
    #获取前一页按钮
    prevLink = soup.select('a[rel="prev"]')[0]
    url = 'https://xkcd.com'+prevLink.get('href')
print('done')