#!/usr/local/bin/python3
# -*- coding: utf-8 -*-

import logging
import os
import signal
import time

import bs4
import requests

# don't show trace info when `Ctrl-c` is pressed.
signal.signal(signal.SIGINT, signal.SIG_DFL)

base_url = 'http://xkcd.com'
os.makedirs('xkcd', exist_ok=True)
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s')


def save_image(img_url):
    # save the image to a file
    file_name = os.path.join('xkcd', os.path.basename(img_url))
    logging.info('download image %s to file %s' % (img_url, file_name))
    if os.path.exists(file_name):
        # ignore downloaded images
        return
    img_res = requests.get(img_url)
    img_res.raise_for_status()
    with open(file_name, 'wb') as f:
        for chunk in img_res.iter_content(1 << 10):
            f.write(chunk)
        f.close()


def download(start_url):
    down_url = start_url
    while not down_url.endswith('#'):
        logging.info('download page %s ...' % down_url)
        res = requests.get(down_url)
        res.raise_for_status()

        # take a break to be more like a human
        time.sleep(2)

        soup = bs4.BeautifulSoup(res.text, 'html5lib')
        img_elem = soup.select('#comic img')
        if not img_elem:
            logging.warning('could not find comic image.')
        else:
            # download the image
            yield 'http:' + img_elem[0].get('src')

        # take a break to be more like a human
        time.sleep(2)

        # get the Prev button's url
        prev_link = soup.select('a[rel="prev"]')[0]
        down_url = start_url + prev_link.get('href')


# download all comic images
img_iter = download(base_url)
for url in img_iter:
    save_image(url)
