import os
import urlparse
from  sgmllib import SGMLParser,SGMLParseError
import re

EXT=set([
	'asp','htm','html','txt','xml','php','jsp','aspx','shtml','shtm','css','js'
])

#http://www.baidu.com=>http://www.baidu.com.html
#http://www.baidu.com/=>http://www.baidu.com.html
#http://www.baidu.com/a.xpz=>http://www.baidu.com/a.xpz.html
def normalize_to_html_url(url):
	url=url.strip()
	if url[-1]=='/':
		return url[:-1]+'.html'
	idxLastSlash=url.rfind('/')
	idxLastDot=url.rfind('.')
	if idxLastDot==-1:
		return url+'.html'
	ext=url[idxLastDot+1:]
	if ext not in EXT:
		return url+'.html' 
	return url


#http://www.baidu.com/index.htm=>www.baidu.com
#http://www.baidu.com.html=>''
def url_to_local_dir(url):
	url=url.strip()
	idxLastSlash=url.rfind('/')
	return url[len('http://'):idxLastSlash] 

#http://www.baidu.com/a/b/c.html=>www.baidu.com/a/b/c.html
def url_to_local_path(url):
	localPath=url[len('http://'):]
	return localPath

class LocalPageParser(SGMLParser):
	def __init__(self):
		self._html=''
		self._htmlSegs=[]
		SGMLParser.__init__(self)
	def get_html(self):
		return self._html
	def reset(self):
		SGMLParser.reset(self)
		self._htmlSegs=[]
		self._html=''
	def set_fetched_urls(self,urls):
		self._fetchedUrls=urls
	def parse(self,baseUrl,data):
		#you should put all data in the html page once
		self._baseUrl=baseUrl
		u=self._baseUrl[:-1]#strip last '/' if exists
		depth=u.count('/')-2
		l=['..']*depth
		self._relPath='/'.join(l)
		SGMLParser.feed(self,data)
		#force feed all data 
		self.close()
		self._html=''.join(self._htmlSegs)
	#	
	#if cur base url is http://www.baidu.com/a/b/c.html
	#http://www.baidu.com/a/b.html=>../../../../www.baidu.com/a/b.html
	def __gen_rel_url(self,fullUrl):
		fullUrl=normalize_to_html_url(fullUrl)
		u=fullUrl[len('http://'):]
		if self._relPath:
			return self._relPath+'/'+u
		else:
			return u
	def __handle_url_tag(self,attrs,tag,urlAttr):
		self._htmlSegs.append('<'+tag)
		for attr,val in attrs:
			if attr==urlAttr:
			#generate relative url	
				fullUrl=urlparse.urljoin(self._baseUrl,val)
				if fullUrl.startswith('http://') and fullUrl  in self._fetchedUrls:
					url=self.__gen_rel_url(fullUrl)
				else:
					url=fullUrl
				self._htmlSegs.append(' '+urlAttr+"="+url)	
			else:	
				self._htmlSegs.append(' '+attr+'='+val)
		self._htmlSegs.append('>')		

	def unknown_starttag(self,tag,attrs):
		if tag =='a':
			self.__handle_url_tag(attrs,'a','href')
		elif tag == 'frame':	
			self.__handle_url_tag(attrs,'frame','src')
		elif tag == 'link':	
			self.__handle_url_tag(attrs,'link','href')
		elif tag == 'img':	
			self.__handle_url_tag(attrs,'img','src')
		else:
			self._htmlSegs.append('<'+tag)
			for attr,val in attrs:
				self._htmlSegs.append(' '+attr+'='+val)
			self._htmlSegs.append('>')	

	def unknown_endtag(self,tag):
		self._htmlSegs.append('</'+tag+'>')
	def handle_data(self,data):
	#	print data
		data=data.strip()
		if data:
			self._htmlSegs.append(data)
class KeywordParser:
#key->[(url,offset,type),]	
	Type_UrlKeyword=0
	def set_keyword_regx(self,strRegx,x):
		self.__regx=re.compile(strRegx)
	def parse(self,strHtml):
		regx=self.__regx
		self._urlKeyword=regx.findall(strHtml)
	def get_keywords(self):	
		return self._urlKeyword	
#key->url,type(anchor|text)	
#page:  <#key> key
#visit: url#key

class DirMaker:
	DIRS=set()#already created dirs
	@classmethod
	def make(cls,path):
		if not path:
			return
		if path[-1]=='/':#strip last '/'
			path=path[:-1]
		if path in cls.DIRS:
			return
		pathSegs=path.split('/')
		depth=len(pathSegs)
		for i in xrange(1,depth+1):
			curDir='/'.join(pathSegs[:i])	
			if curDir  in cls.DIRS:
				continue
			elif curDir:
				try:
					os.mkdir(curDir)
				except OSError,e:
					if e[0]==17:#already exists
						pass
					else:
						raise
				cls.DIRS.add(curDir)

if __name__=='__main__':
	fetchedUrls=set()
	pages={}
	f=open('xxxsave.txt','rb')
	lines=f.readlines()
	f.close()
	curUrl=''
	curHtmlLines=[]
	for line in lines:
		line=line[:-1]#strip last '\n'	
		if line.startswith('spider-url:'):
			#path,strHtml=urlTranslate.parse(htmlLines)
			#save(path,strHtml)
			#process url
			strUrl=line[len('spider-url:'):]
			curUrl=strUrl
			curHtmlLines=[]
			pages[curUrl]=curHtmlLines
			fetchedUrls.add(curUrl)
		elif curUrl :
			curHtmlLines.append(line)	
	parser=LocalPageParser()
	parser.set_fetched_urls(fetchedUrls)
	keywordParser=KeywordParser()
	keywordParser.set_keyword_regx('<td class="category-table-td"><a href=\s*"([\w:\./]+)"\s*>(\w+)</a>')
	for url,htmlSegs in pages.iteritems():
		strHtml=''.join(htmlSegs)
		#get keywords
		keywordParser.parse(strHtml)
		#print strHtml
		result=keywordParser.get_keywords()
		for keyword,url in result:
			pass

		#parse to local html page
		parser.parse(url,strHtml)
		strHtml=parser.get_html()
		url=normalize_to_html_url(url)
		localPath=url_to_local_path(url)
		localDir=url_to_local_dir(url)
		DirMaker.make(localDir)
		f=open(localPath,'wb')
		f.write(strHtml)
		f.close()
		parser.reset()
	os.system(dd)	
	#how we build and store index:bdb,mysql,or other...
				


