import urllib
import urlparse
import re
import os
import subprocess

# website crawler
# acts like the 'wget -rx -nc URL'

def call(cmd):
    subprocess.check_call(cmd, shell = True)

def retrieve_children(html):
    children = []
    for line in html:
        children.extend(re.findall(r'href="(.*?\.html)"', line))
    return children

def get_content(url):
    while True:
        html = urllib.urlopen(url).readlines()
        success = True
        for line in html:
            if 'Error' in line:
                success = False
        if success:
            return html
        else:
            print 'retrying..'

def search(url, html, pattern):
    print 'searching in %s..' % url,
    flag = False
    for line in html:
        if re.search(pattern, line):
            flag = True
    if flag:
        print 'Match!'
    else:
        print

def save(html, path):
    call('mkdir -p %s' % os.path.dirname(path))
    f = open(path, 'w')
    f.writelines(html)
    f.close()
    print 'saved to %s' % path

class Spider:

    def __init__(self, home_url, outdir = None, pattern = None):
        self.home_url = home_url
        self.hostname = urlparse.urlparse(self.home_url).hostname
        if outdir is not None:
            self.outdir = outdir
        else:
            self.outdir = self.hostname
        self.pattern = pattern
        self.crawled = 0
        self.cache = {}

    def calpath(self, url):
        path = url.replace(self.home_url, self.outdir)
        if os.path.isdir(path):
            path = os.path.join(path, 'index.html')
        return path

    def crawl(self, url = None):
        if url is None:
            url = self.home_url
        try:
            self._crawl(url)
        except Exception, e:
            print 'ERROR:', e
        except KeyboardInterrupt:
            print '\rERROR: KeyboardInterrupt'
        except:
            print 'ERROR: unknown'
        print 'Total webpages crawled:', self.crawled

    def _crawl(self, url):
        if urlparse.urlparse(url).hostname != self.hostname:
            return
        path = self.calpath(url)
        if os.path.exists(path) and (self.pattern is None or not self.pattern.match(url)):
            print 'skipping %s.. (already crawled previously)' % url
            return
        if url in self.cache:
            print 'skipping %s.. (already crawled this time)' % url
            return
        self.cache[url] = 1

        print 'crawling %s..' % url
        html = get_content(url)
        save(html, path)
        self.crawled += 1

        for child in retrieve_children(html):
            self._crawl(urlparse.urljoin(url, child))

def main():
    home_url = 'http://www.example.com/'
    pat = re.compile(r'http://www.example.com/never-skip.html')
    outdir = 'output/'
    spider = Spider(home_url, outdir = outdir, pattern = pat)
    spider.crawl()

if __name__ == '__main__':
    main()
