#!/usr/bin/env python
from sys import argv
from multiprocessing import Process,Array,Queue
from urllib import urlopen
from htmllib import HTMLParser
from formatter import DumbWriter,AbstractFormatter
from StringIO import StringIO
from string import find,lower,replace
from urlparse import urlparse,urljoin
from os.path import dirname
import socket
import time

class glbls:
#	n = int(sys.argv[1])
#	nthreads = int(sys.argv[2])
	thrdlist = []

sourcedirhtml = '/home/freedomljc/apache/htdocs/source/'
sourcedirtxt = '/home/freedomljc/v1.4/source/'
mainurl = argv[1]
MaxDepth = 5
depth = {}
interval = 3600*24
dom = dirname(mainurl)
htmlpage = '''
<html>
	<head>
		<meta http-equiv="Content-type" content="text/html;charset=gbk">
		<title>cached for you can fly</title>
	</head>
	<body>
		<pre>
			<blockquote>
			"%s"
			</blockquote>
		</pre>
	</body>
</html>
'''
def foo(id,que,visited):
	while True:
		try:
			topurl = que.get(1,10)
		except:
			break
	#	visited.add(topurl)
		print topurl + '@'
		if depth[topurl] >= MaxDepth:
			break
		try:
			fin = urlopen(topurl)
		except IOError:
			continue
		data = ''
		flag = 0
		pagedata = ''
		for eachline in fin:
			pagedata += eachline
		#	print eachline
			if eachline.find('<textarea')!=-1:
				flag = 1
			elif eachline.find('</textarea')!=-1:
				flag = 0
			else:
				if flag == 1:
					data += eachline
			#	print eachline
	#	print data
		if data != '':
			print topurl
			print data
			if topurl[:7] == 'http://':
				path = topurl[7:]
			else:
				path = topurl
			path2 = path.replace('/','^')
			path2 = path2.replace('?','$')
			fout = open(sourcedirhtml+path2+'.html','w')	
			htmlpage2 = htmlpage % data
			fout.write(htmlpage2)
			fout .close()
			fout = open(sourcedirtxt+path2,'w')
			fout.write(data)
			fout.close()
		parser = HTMLParser(AbstractFormatter(DumbWriter(StringIO())))
		try:
			parser.feed(pagedata)
			parser.close()
		except:
			pass
		urllinks = parser.anchorlist
		if urllinks is None:
			continue
		for urllink in urllinks:
		#	print '#'+urllink
			if urllink[:7] != 'http://':
				urllink = urljoin(mainurl,urllink)
			if urllink not in visited:
				if find(urllink ,dom) == -1:
					continue
				print '#'+urllink
				visited.add(urllink)
				que.put(urllink)
				depth[urllink] = depth[topurl] + 1

	
		

def main():
#	prime = Array('i',(glbls.n+1) * [0])
	cnt = 0
	while True:
		cnt = cnt+1
		visited = set('')
		que = Queue()
		que.put(mainurl)
		depth[mainurl] = 1
		nthreads = 10
		for i in range(nthreads):
			pf = Process(target=foo,args=(i,que,visited))
			glbls.thrdlist.append(pf)
			pf.start()
		for thrd in glbls.thrdlist:
			thrd.join()
		time.sleep(interval)

if __name__ == '__main__':
	main()
