"""
十八、用python写一个网页归档程序（爬虫），把 http://quotes.toscrape.com/ 网站上的10个页面全部下载下来，保存到独立的文件中。

url地址列表：
http://quotes.toscrape.com/page/1/
http://quotes.toscrape.com/page/2/
http://quotes.toscrape.com/page/3/
http://quotes.toscrape.com/page/4/
http://quotes.toscrape.com/page/5/
http://quotes.toscrape.com/page/6/
http://quotes.toscrape.com/page/7/
http://quotes.toscrape.com/page/8/
http://quotes.toscrape.com/page/9/
http://quotes.toscrape.com/page/10/

"""
import requests


def fetch(url):
    response = requests.get(url)
    if response.ok:
        html = response.text
    else:
        html = ''
    return html


if __name__ == '__main__':
    urls = ['http://quotes.toscrape.com/page/1/',
            'http://quotes.toscrape.com/page/2/',
            'http://quotes.toscrape.com/page/3/',
            'http://quotes.toscrape.com/page/4/',
            'http://quotes.toscrape.com/page/5/',
            'http://quotes.toscrape.com/page/6/',
            'http://quotes.toscrape.com/page/7/',
            'http://quotes.toscrape.com/page/8/',
            'http://quotes.toscrape.com/page/9/',
            'http://quotes.toscrape.com/page/10/']

    for url in urls:
        page_num = url.split('/')[-2]
        print('fetching page %s ...' % page_num)
        html = fetch(url)
        fname = 'page_%s.html' % page_num
        open(fname, 'w').write(html)
