#!/usr/bin/env python
#encoding=utf-8

import MySQLdb	#mysqldb module
import urllib	#urllib2 module
from time import ctime
from datetime import datetime
import re	#regex module
import threading	#multi-threads module
import sys

class Spider(object):
	"""Manage entire crawling process"""

	count_page=0	#the number of pages
	pattern=r'<a\shref=\"(.*?)\"'	#the regex pattern for URL
	thread_lock=threading.RLock()	#lock of threads
	
	def __init__(self,url,thread_num,count_limit,time_out):
		"""The default number of pages is 1000
		and the default time_out is 600 seconds"""

		self.queue_visit=[url]	#the queue of links for visited
		self.queue_visited=[]	#the queue of links has been visited
		self.count_limit=count_limit
		self.time_start=datetime.now()
		self.time_out=time_out
		self.thread_num=thread_num
		
	
	def download(self,url):
		"""download the target url's web_page'"""
		html_txt=urllib.urlopen(url).read()
		print 'URL:',url,'downloaded successfully.----',ctime()	
		return html_txt


	def getLinks(self,txt):
		"""parse the html_txt to get all links"""

		r=re.compile(Spider.pattern)	#compile regex pattern
		matches=r.findall(txt)	#all the links in html_txt
		return matches
	
		
	def urlParse(self,url,link):
		"""parse the absolute URL and relative URL"""
		if((url[-1]=='/')and(link[0]=='/')):
			return url[:-1]+link
		elif((url[-1]!='/')and(link[0]=='/')):
			return url+link
		elif((url[-1]=='/')and(link[0]!='/')):
			return url+link
		else:
			return url+'/'+link	


	def getPage(self,url):
		"""download the target web_page and process the links in it"""

		html_txt=self.download(url)	#access the url
		
		Spider.thread_lock.acquire()	#get thread lock
		self.queue_visited.append(url)	#add this url to queue_visited
		Spider.thread_lock.release()	#release thread lock
		
		if(html_txt is not None):	#if the link can able to be acessed
			sql="insert into spider values(%s,%s)"
			params=(url,html_txt)

			#### 
			# store the html code to mysql db 
			###
			conn=MySQLdb.connect(host='localhost',user='root',passwd='Woshijiayu',db='test')	#establish the connnection to mysql db
			cur=conn.cursor()	#get the cursor
			cur.execute(sql,params)	#execute the sql Statement
			cur.close()	#close cursor
			conn.commit()	#commit the execute information
			conn.close()	#close the connection to mysql database
			
			print 'URL:',url,' was inserted to db.----',ctime()

			Spider.thread_lock.acquire()
			Spider.count_page+=1	#page's number plus 1
			Spider.thread_lock.release()
			
			links=self.getLinks(html_txt)	#get all links from that page

			#process these links parsed from web_page
			link_num=len(links)
			for i in xrange(link_num):
				if(links[i][:4] != "java"):
					if(links[i][:4] != "http"):
						links[i]=url+links[i]#self.urlParse(url,links[i])
					if ((links[i] not in self.queue_visit) and (links[i] not in self.queue_visited)):	#if the link isn't in both queues
						
						Spider.thread_lock.acquire()
						self.queue_visit.append(links[i])	#add this link to queue_visit
						Spider.thread_lock.release()
			del links
		del html_txt
		del url				
						

	def run_spider(self):	#process links in queue_visit by multi-threads
		while True:
			### 
			# if time is out,print time information and exit the program 
			# if url number is out of limit,print time information and exit the program
			###
			# if time is out,print time information and exit the program
			if (datetime.now()-self.time_start).seconds > self.time_out:
				print 'time out'
				print 'total time is :',(datetime.now()-self.time_start).seconds,'seconds'
				print 'total number of urls is :',Spider.count_page
				break
			
			# if url number is out of limit,print time information and exit the program
			if (Spider.count_page > self.count_limit):
				print 'number of links out of limit'
				print 'total time is :',(datetime.now()-self.time_start).seconds,'seconds'
				print 'total number of urls is :',Spider.count_page
				break
			
			count_threads=0
			threads=[]
			
			Spider.thread_lock.acquire()
			que_len=len(self.queue_visit)
			Spider.thread_lock.release()
			
			for i in xrange(que_len):
				#get an URL from queue_visit
				Spider.thread_lock.acquire()
				url=self.queue_visit.pop()
				Spider.thread_lock.release()
				
				# get every thread ready
				t=threading.Thread(target=self.getPage,args=(url,))
				
				#set every thread to be daemon thread,so that child-threads can be quited with the main thread terminate
				t.setDaemon(True)
				
				threads.append(t)	#add to the thread queue

				count_threads+=1
				if count_threads>self.thread_num:	#big than the limited threads number,then break
					break

			threads_num=len(threads)

			#start those threads
			for i in xrange(threads_num):
				threads[i].start()

			#wait to finish each thread
			for i in xrange(threads_num):
				threads[i].join()
