#!/usr/bin/env python
#coding:utf-8
#一个网站爬虫程序，来自于《Python 核心编程》
#//wyatt@2011/04/10 18:23:52

from sys import argv
from os import makedirs, unlink, sep
from os.path import dirname, exists, isdir, splitext
from htmllib import HTMLParser
from string import replace, find, lower
from urlparse import urlparse, urljoin
from formatter import DumbWriter, AbstractFormatter
from cStringIO import StringIO
from urllib import urlretrieve

class Retriever(object):
    '''download web pages'''
    def __init__(self, url):
        self.url = url
        self.file = self.filename(url)

    def filename(self, url, deffile='index.html'):
        '''获取文件名称，建立目录'''
        parsedurl = urlparse(url, 'http:', 0)  ##parse path 得到无http:的网址
        path = parsedurl[1] + parsedurl[2] #分解网址
        ext = splitext(path)
        #以下内容为：根据网址生成与网络地址一样结构的本地目录
        if ext[1] == '':
            if path[-1] == '/':
                path += deffile
            else:
                path += '/' + deffile
        ldir = dirname(path)
        if sep != '/':
            ldir = replace(ldir, '/', sep)
        if not isdir(ldir):
            if exists(ldir):unlink(ldir)
            makedirs(ldir)
        return path

    def download(self):
        '''下载文件'''
        try:
            retval = urlretrieve(self.url, self.file)
        except IOError:
            retval = ('***ERROR: invalid URL "%s"' % self.url,)
        return retval

    def parseAndGetLinks(self):
        self.parser = HTMLParser(AbstractFormatter(\
            DumbWriter(StringIO())))
        self.parser.feed(open(self.file).read())
        self.parser.close()
        return self.parser.anchorlist

class Crawler(object):
    '''manage entire crawling process'''
    count = 0

    def __init__(self, url):
        self.q = [url]
        self.seen = []
        self.dom = urlparse(url)[1]  #从解析的网址中得到域名

    def getPage(self, url):
        r = Retriever(url)
        retval = r.download()
        if retval[0] == '*':
            print retval, '...skipping parse'
            return
        Crawler.count += 1
        print '\n(', Crawler.count, ')'
        print 'URL:', url
        print 'FILE:', retval[0]
        self.seen.append(url)
        links = r.parseAndGetLinks()
        for eachLink in links:
            if eachLink[:4] != 'http' and find(eachLink, '://') == -1:
                eachLink = urljoin(url, eachLink)
            print '* ', eachLink,

            if find(lower(eachLink,), 'mailto:') != -1:
                print '... discarded, mailto link'
                continue

            if eachLink not in self.seen:
                if find(lower(eachLink), self.dom) == -1:
                    print '... discarded, not in domain'
                else:
                    if eachLink not in self.q:
                        self.q.append(eachLink)
                        print '... new, added to Q'
                    else:
                        print '... discarded, already in Q'
            else:
                print '... discarded, already processed'

    def go(self):
        while self.q:
            url = self.q.pop()
            self.getPage(url)

def main():
    if len(argv) > 1:
        url = argv[1]
    else:
        try:
            url = raw_input('Enter starting URL: ')
        except (KeyboardInterrupt, EOFError):
            url = ''

        if not url: return
        robot = Crawler(url)
        robot.go()

if __name__ == '__main__':
    main()
