﻿#!/usr/bin/python
#coding: utf-8

################################
# file: crawler.py             #
# function:                    #
# author: xsseroot@gmail.com   #
# time: 2014-9-1 21:02:27      #
################################

import sys,os,re,math,logging,urllib
from log import loger
from xmlHelp import xmlHelper
from httpHelp import httpHelper
from models import Column
from threadPool import *

#print sys.getdefaultencoding()
reload(sys)
sys.setdefaultencoding('utf-8')
#print sys.getdefaultencoding()

class Program():
	__config_file = ""
	__columnList = {}
	__logHandler = None
	__xmlHandler = None
	__httpHandler = None
	__savePath = ""
	__limitPicCount = -1
	__threadCount = 10
	__threadPool = None
	
	def __init__(self,confFile=None,logHandler=None,savepath="./pics",limit=-1,threadCount=10):
		if confFile != None:
			self.__config_file = confFile
		if logHandler != None:
			self.__logHandler = logHandler
		self.__savePath = savepath
		self.__LoadConfig()
		self.__httpHandler = httpHelper()
		self.__limitPicCount = limit
		self.__threadCount = threadCount
		#try:
			#self.__threadPool = ThreadPool(int(threadCount))
		#except NameError,Msg:
		#	self.__Error("%s%s" % (NameError,Msg))
			
		
	# load config.xml and begin adapting
	def __LoadConfig(self):
		try:
			host_regex = re.compile(r'http://(.*?)/(.*)') # uri
			port_regex = re.compile(r':(\d+)')
			if os.path.exists(self.__config_file):
				self.__logHandler.Info("Found configure file [%s]" % self.__config_file)
				self.__logHandler.Info("Loading Configure...")
				self.__xmlHandler = xmlHelper(self.__config_file)
				nodelist = self.__xmlHandler.GetNodeList("./menu/column")
				for node in nodelist:	
					col = Column()
					col.name = node.find("./name").text
					col.savepath = node.find("./savepath").text
					col.url = node.find("./url").text
					col.host_port = 80
					match = host_regex.match(col.url)
					if match:
						port_match = port_regex.match(match.group(1))
						if port_match:
							col.host_port = port_match.group(1)
						col.host_name = match.group(1)
						col.host_path = "/%s" % match.group(2)
						#print col.host_name,col.host_port,col.host_path
					
					pageDic = self.__xmlHandler.GetChildren(node,"./page")
					col.page_size = pageDic["size"]
					col.page_file = pageDic["file"]
					col.page_total_regex = re.compile(u'%s' % pageDic["total_regex"])
					#for item in pageDic:
					#	print item,":",pageDic[item]			
					
					regexlist = self.__xmlHandler.GetNodeList(node,"./regex/item")
					#col.regex[]
					for item in regexlist:
						col.regex[item.get("name")] = item.text.split(",,,")
						#print "regex/item:",item.text,
						#print item.get("name")
					self.__columnList[col.name] = col
					
			else:
				self.__logHandler.Error("Not found configure file [%s]" % self.__config_file)
		except NameError,Msg:
			self.__logHandler.Error((NameError,Msg))
			
	# start
	def Start(self):
		if len(self.__columnList) > 0:
			self.__logHandler.Info("Found %s columns. %s " % (len(self.__columnList),self.__columnList.keys()) )
			self.__logHandler.Info("Started crawling website ...")
			self.__threadPool = ThreadPool(self.__threadCount)
			for col in self.__columnList:
				#self.__CrawlReady(self.__columnList[col])
				self.__threadPool.add_job( self.__CrawlReady,self.__columnList[col],1)
				time.sleep(0.2)
			self.__threadPool.wait_for_complete()
				#while tp.resultQueue.qsize():
				#	log.Info( tp.resultQueue.get())	
		else:
			self.__logHandler.Warning("Not found column list.")
			
	# Ready
	def __CrawlReady(self,columnModel,args):
		columnModel = columnModel[0]
		pageTotal = 0
		recordTotal = 0
		self.__logHandler.Info("Current column:[%s], url:[%s]" % (columnModel.name,columnModel.url))
		html = self.__httpHandler.GetHtml(columnModel.host_name,columnModel.host_path)
		#self.__logHandler.Debug(html)
		listpage = columnModel.page_total_regex.search(html)
		if listpage:
			recordTotal =  listpage.group(1)
			pageTotal = int(math.ceil(int(recordTotal) / int(columnModel.page_size)))
			self.__logHandler.Info("Page total:%s, Page size:%s, Record Total:%s" % (pageTotal,columnModel.page_size,recordTotal))
			currentPageFile = ""
			for currPage in range(1,pageTotal+1):
				if currPage == 1:
					currentPageFile = "%sindex.html" % columnModel.host_path
				else:
					currentPageFile = "%s%s" % (columnModel.host_path,columnModel.page_file.replace("\d+",str(currPage)))
				self.__logHandler.Info(("Current page:%s, Path:%s" % (currPage,currentPageFile)))
				self.__CrawlList(columnModel,currentPageFile,"%s" % currPage)

	# Get List			
	def __CrawlList(self,columnModel,path,page):
		html = self.__httpHandler.GetHtml(columnModel.host_name,path)
		#self.__logHandler.Debug(html)
		#print columnModel.regex["list"][0]
		imgDiv = re.compile(r'%s' % columnModel.regex["list"][0] ).findall(html)
		if imgDiv:		
			# todo Verify imgDiv
			#self.__logHandler.Debug(imgDiv[1])
			imgList = re.compile(r'%s' % columnModel.regex["list"][1] ).findall(imgDiv[1])
			#print imgList
			dirIndex = 1
			for img in imgList:
				msg = "title:%s, href:%s" % (img[1].decode('utf-8'),img[0])
				self.__logHandler.Info(msg)
				columnModel.savepath = '%s/%s/%s/%s/' % (self.__savePath,columnModel.name,("page-%s" % page.zfill(3)),dirIndex)
				self.__logHandler.Info("Save path:%s" % columnModel.savepath)
				if not os.path.exists(columnModel.savepath):
					os.makedirs(columnModel.savepath)				
				self.__CrawlShow(columnModel,img[0])
				dirIndex = dirIndex + 1
				
	
	# Get Show
	def __CrawlShow(self,columnModel,path):
		html = self.__httpHandler.GetHtml(columnModel.host_name,path)
		imgCount = 0
		imgDiv = re.compile(r'%s' % columnModel.regex["show"][0] ).findall(html)
		if imgDiv:
			self.__logHandler.Info("Found %s pictures." % imgDiv[0])
			self.__logHandler.Info("Set limit picture:%s,Current thread count:%s" % (self.__limitPicCount,self.__threadCount))
			imgCount = int(imgDiv[0])
			#subPath = path.replace(".html","-{$d$}.html")
			#for currentImg in range(1,imgCount + 1):
				#currPath = path
				#if currentImg != 1:
			currPath = path.replace(".html","-%s.html" % imgCount)
			#self.__logHandler.Debug(currPath)
			self.__CrawlDetail(columnModel,currPath)
				
	# Get Detail
	def __CrawlDetail(self,columnModel,path):
		try:
			html = self.__httpHandler.GetHtml(columnModel.host_name,path)
			#self.__logHandler.Debug(html)
			imgDiv = re.compile(r'%s' % columnModel.regex["detail"][0] ).findall(html)
			self.__logHandler.Debug(imgDiv)
			if imgDiv:
				#self.__logHandler.Debug(imgDiv)
				imgList = re.compile(r'%s' % columnModel.regex["detail"][1] ).findall(imgDiv[0][1])
				self.__logHandler.Info(imgList)
				currnetImgIndex = 0
				# set thread count, defualt 10
				tp = ThreadPool(self.__threadCount)
				
				for img in imgList:					
					if self.__limitPicCount > 0 and self.__limitPicCount == currnetImgIndex:						
						break
					currnetImgIndex = currnetImgIndex + 1
					self.__logHandler.Info("[%s]Save file success. %s%s, thread id:%s" % (currnetImgIndex,columnModel.savepath,os.path.basename(img),threading.currentThread().getName()))
					#urllib.urlretrieve(img,("%s%s" % (columnModel.savepath,os.path.basename(img))))
					tp.add_job( self.__GetDownImg,currnetImgIndex,img.replace("big","pic"),("%s%s" % (columnModel.savepath,os.path.basename(img))))
					time.sleep(0.2)
				tp.wait_for_complete()
				#while tp.resultQueue.qsize():
				#	log.Info( tp.resultQueue.get())					
					
		except NameError,Msg:
			self.__logHandler.Error("%s%s" % (NameError,Msg))
	#
	def __GetDownImg(self,url,args):
		result = ""
		try:
			time.sleep(1)
			urllib.urlretrieve(url[1],url[2])
			result = "[%s]Save file success. %s" % (url[0],url[2])
		except NameError,Msg:
			self.__logHandler.Error("%s%s" % (NameError,Msg))
		return  result
		
if __name__ == "__main__":
	threadCount = 10
	savePath = "%s/pics" % os.getcwd()
	limitPicCount = -1
	log = loger("mm_crawler.log",logging.DEBUG)
	program = None
	if len(sys.argv) == 2:
		#sys.exit()
		if sys.argv[1] == "-h":
			print '''\
-------------------------------------------------------------
This program is crawling MM pictures. Version beta 1.0
by: xsseroot@gamil.com time: 2014-09
-------------------------------------------------------------
Option:
    -v : Prints the version number  
    -h : Display this help
    -n : The number of threads, the default is 10.
    -o : Crawl save path, the default is "./pics/" path.
    -l : Set the number of crawling pictures, the default is 0 not limit. 

Demo:
-------------------------------------------------------------
    crawler.py -n5 -o"d:/pics" -l20
    crawler.py -n5
    crawler.py -l15
    crawler.py -o/data/pics'''
			sys.exit()
			
		if sys.argv[1] == "-v":
			print "Version beta 1.1"
			sys.exit()
		
	for currParaIndex in range(1,len(sys.argv)):
		try:
			if sys.argv[currParaIndex].startswith("-"):
				option = sys.argv[currParaIndex][1:2]
				value = sys.argv[currParaIndex][2:]
				if option == "n":
					threadCount = int(value)
				if option == "o":
					if not os.path.exists(value):
						os.makedirs(value)
					savePath = value
				if option == "l":
					limitPicCount = int(value)
		except ValueError,Msg:
			log.Warning((ValueError,Msg))
		except NameError,Msg:	
			log.Error((NameError,Msg))
			sys.exit()
		
	# statrt execute
	log.Debug("Current thread count:%s" % threadCount )
	log.Debug("Output path:%s" % savePath )
	log.Debug("limit pictures count:%s" % limitPicCount )
	program = Program("./conf/config.xml",log,savePath,limitPicCount,threadCount)
	program.Start()
		
	
	