import os
import sys
import requests
import re


class Spider:

    def __init__(self, url):
        self.urls = [url]
        self.result = []

    def crawl(self):
        for idx, url in enumerate(self.urls):
            text = self.fetch_page(url)
            data, new_urls = self.parse(text)
            self.result.append((data, url))
            self.urls.extend(new_urls)
            bname = os.path.basename(url)
            open('/tmp/docs/%s' % bname, 'w').write(text)
            print('finished:', url)
            # for test
            if idx == 10:
                break

    def fetch_page(self, url):
        r = requests.get(url)
        assert r.status_code == 200, "failed to fetch page"
        return r.text

    def parse(self, text):
        pat = re.compile('<a .*? href="([^"]+)">')
        new_urls = pat.findall(text)
        new_urls = set(x.partition('#')[0] for x in new_urls)
        left = '/'.join(self.urls[0].split('/')[:3]) + '/'
        new_urls = set(left+x if not x.startswith('http') else x for x in new_urls)
        new_urls = set(x for x in new_urls if x.startswith(left))
        new_urls = set(x for x in new_urls if not (set(x) & set('?&%')))
        new_urls = new_urls - set(self.urls)
        pat = re.compile('<title>([^<]+)</title>')
        title = pat.findall(text)[0]
        title = title.replace(' &#8212; Python 3.6.2 documentation', '')
        return (title, new_urls)


if __name__ == '__main__':
    url = 'https://docs.python.org/3/contents.html'
    spider = Spider(url)
    spider.crawl()
    for title, url in spider.result:
        print(title, url)
