import requests, os, bs4

url = "https://xkcd.com"
os.mkdir("xkcd")
while not url.endswith('#'):
    print("downloading page %s ..." % url)
    res = requests.get(url)
    res.raise_for_status()
    soup = bs4.BeautifulSoup(res.text)

    comicElm = soup.select('#comic img')
    if len(comicElm) == 0:
        print('could not find comic image.')
    else:
        comicUrl = "http:" + comicElm[0].get("src")
        print("downloading image %s..." % comicUrl)
        res = requests.get(comicUrl)
        res.raise_for_status()
        imagefile = open(os.path.join('xkcd', os.path.basename(comicUrl)), 'wb')
        for chunk in res.iter_content(10000):
            imagefile.write(chunk)
        imagefile.close()

        preVlink = soup.select('a[rel="prev"]')[0]
        url = "https://xkcd.com" + preVlink.get('href')
        print(url)
