#!/usr/bin/env python
#coding=utf-8 

# 爬虫

from sys import argv
from os import makedirs, unlink, sep
from os.path import dirname, exists, isdir, splitext
from string import replace, find, lower
from htmllib import HTMLParser
from urllib import urlretrieve
from urlparse import urlparse, urljoin
from formatter import DumbWriter, AbstractFormatter
from cStringIO import StringIO


class Retriever(object): # 下载页面
	def __init__(self, url):
		self.url = url
		self.file = self.filename(url)

	def filename(self, url, deffile='index.htm'):
		parsedurl = urlparse(url, 'http:', 0)
		path = parsedurl[1] + parsedurl[2]
		ext = splitext(path)
		if ext[1] =='':
			if path[-1] == '/':
				path += deffile
			else:
				path += '/' + deffile
		ldir = dirname(path)
		if sep != '/':
			ldir = replace(ldir, '/', sep)
		if not isdir(ldir):
			if exists(ldir):unlink(ldir)
			makedirs(ldir)
		return path

	def download(self):
		try:
			retval = urlretrieve(self.url, self.file) # 下载 html 文件  到 file  中
		except IOError:
			retval = ('*** ERROR: invalid URL "%s"'% self.url,)
		return retval
	def parseAndGetLinks(self):
		self.parser = HTMLParser(AbstractFormatter(\
			DumbWriter(StringIO())))
		self.parser.feed(open(self.file).read())
		self.parser.close()
		return self.parser.anchorlist

class Crawler(object):# 管理所有的爬虫
	count = 0
	def __init__(self, url):
		self.q = [url] # 队列中存放这一个url
		self.seen = [] # GU： 已经看过的url
		self.dom = urlparse(url)[1] # 服务器的核心地址

	def getPage(self, url):
		r = Retriever(url) # 生成一个下载页面的对象
		retval = r.download() # 下载页面
		if retval[0] == '*':
			print retval, '...skipping parse'
			return
		Crawler.count += 1
		print '\n(', Crawler.count, ')'
		print 'URL:', url
		print 'FILE:', retval[0]
		self.seen.append(url)

		links = r.parseAndGetLinks() # 解析的链接
		for eachLink in links:
			if eachLink[:4] != 'http' and \
				find(eachLink, '://') == -1:
				eachLink = urljoin(url, eachLink)
			print '* ', eachLink

			if find(lower(eachLink), 'mailto:') != -1:
				print '...discarded, mailto link'
				continue

			if eachLink not in self.seen:
				if find(eachLink, self.dom) == -1:
					print '... discarded, not in domain',self.dom
				else:
					if eachLink not in self.q:
						self.q.append(eachLink)
						print '... new, added to Q'
					else:
						print '... discarded, already in Q'
			else:
				print '... discarded, already processed'
	def go(self):
		while self.q: # 如果队列中有url的话
			url = self.q.pop() # 队列中弹出
			self.getPage(url) # 通过url 下载页面

def main():
	if len(argv) > 1:
		url = argv[1]

	else:
		try:
			url = raw_input('Enter starting URL:')
		except (KeyboardInterrupt, EOFError):
			url =''
	if not url: return # 如果url空的话直接退出
	robot = Crawler(url) # 生成类对象
	robot.go() # 开始运行

if __name__ == "__main__":
	main()

