#!/usr/bin/env python
#encoding=utf-8
import MySQLdb
import spider_classes
import sys
import getopt
import re
import urllib

def main(argv):
	url=None	#initial URL
	thread_num=5	#number of threads
	count_limit=1000	#limit of URL number
	time_out=600	#limit of time out

	try:
		opts,args=getopt.getopt(argv,"u:s:l:t:h",["url","threads","countlimit","timeout","help"])
	except:
		sys.exit()
	
	#recieve args and check them out
	for opt,arg in opts:
		if opt in ("-u","--url"):
			url=arg
			try:
				html_txt=urllib.urlopen(url)
			except:
				print 'Please input a valid url'
				sys.exit()
		elif opt in ("-s","--threads"):
			try:
				threads_num=int(arg)
			except:
				print "Please input an [Integer] for number of threads."
				sys.exit()
		elif opt in ("-l","--countlimit"):
			try:
				count_limit=int(arg)
			except:
				print "Please input an Integer for limit of URL's number."
				sys.exit()
		elif opt in ("-t","--timeout"):
			try:
				time_out=float(arg)
			except:
				print "Please input an Integer or a float number for time_out limit."
				sys.exit()
		elif opt in ("-h","--help"):
			print """
				Args and Introduction:
				-u,--url string Initial URL
				-s,--threads int Numbers of threads
				-l,--countlimit int Limit of URL numbers
				-t,--timeout int Time limit
				-h,--help Print help information
				"""
	if url==None:
		while url==None:
			url=raw_input("Please input the initial URL: ")
			try:
				html_txt=urllib.urlopen(url)
			except:
				print 'Please input a valid url'
				sys.exit()
	del html_txt			
	
	#create an object of Spider and start to crawl the WWW pages
	crawler=spider_classes.Spider(url,thread_num,count_limit,time_out)
	crawler.run_spider()

if __name__=="__main__":
	main(sys.argv[1:]) 
