# FILE: WebCrawler.py
# DESC: Manages web crawling process
#

#from pytz import timezone
from datetime import datetime

from infra.crawler.Crawler import Crawler
from infra.logging import logger
from web.service.HtmlParser import HtmlParser
from web.service.HttpReader import HttpReader, HttpDocumentNotFound, HttpReadError
from web.filter.UrlDomainFilter import UrlDomainFilter
from web.validator.UrlTypeValidator import UrlTypeValidator
from geo.search.GeoSearchStrategy import GeoSearchException


class WebCrawler(Crawler):

	def __init__(self, webDocMgr, geoPlacemarkMgr, geoSearchStrategy):
		self.webDocMgr_ = webDocMgr
		self.geoPlacemarkMgr_ = geoPlacemarkMgr 
		self.geoSearchStrategy_ = geoSearchStrategy
		self.htmlParser_ = HtmlParser(UrlDomainFilter(self.webDocMgr_.getCustomer().getUrl()), UrlTypeValidator())
		log = logger.getLogger()
		log.debug("%s = %s" % (self, self.__dict__))


	def crawl(self):
		webDocuments = self.webDocMgr_.getWebDocuments()		
		log = logger.getLogger()
		log.info("%d WebDocument objects in the dictionary." % len(webDocuments))
		for webDoc in webDocuments.values():
			self.crawlWebDocument(webDoc)


	def crawlWebDocument(self, webDoc, recursive = True, visitor = None):
		url = webDoc.getAbsoluteUrl()
		log = logger.getLogger()

		try:
			log.info("Crawling url = \"%s\" ..." % url)

			httpReader = HttpReader()
			htmlDoc = httpReader.read(url)
			contentLength = httpReader.getContentLength()
			if webDoc.getContentLength() != contentLength:
				self.htmlParser_.parse(htmlDoc, webDoc)

				webDoc.setLastCrawl(datetime.now())
				webDoc.setContentLength(contentLength)

				#self.namedEntityParser_.parseWebDoc(webDoc)

				self.webDocMgr_.addWebDocument(webDoc)


				try:
					self.geoSearchStrategy_.run(webDoc, self.geoPlacemarkMgr_)
					if visitor:
						visitor.visitGeoSearchStrategy(self.geoSearchStrategy_)
					self.geoSearchStrategy_.clear()
				except GeoSearchException, e:
					log.error("GeoSearchException exception caught, reason: %s" % e)

				if recursive:
					for refWebDoc in webDoc.getWebDocuments():
						if not self.webDocMgr_.hasWebDocument(refWebDoc):
							self.crawlWebDocument(refWebDoc)
			else:
				log.info("Won't parse html doc as it's content length has not changed.")

			log.info("Crawling completed.")

		except HttpDocumentNotFound, e:
			log.error("HttpDocumentNotFound exception caught.")
			if webDoc.isPersisted():
				self.webDocMgr_.removeWebDocument(webDoc)
		except HttpReadError, e:
			log.error("HttpReadError exception caught, reason: %s" % e)
		except Exception, e:
			log.error("An exception caught, reason: %s" % e)
			raise


	def dump(self):
		return self.__dict__
