#!/usr/bin/python
# Filename: dskb_download.py

import os
import time
import urllib
import re
import commands
import logging
import sys
import getopt
from pyPdf import PdfFileWriter, PdfFileReader

class DSKBDownloader:
	'''Download dskb from web, and merge to a pdf file.
	'''
	def __init__(self, save_dir, date = time.localtime()):
		'''Init the downloader class
		'''
		# init params
		self.date = date
		# init basic param
		self.base_url = 'http://hzdaily.hangzhou.com.cn/dskb/page/3/'
		self.base_url_2 = 'http://hzdaily.hangzhou.com.cn/dskb/'
		self.base_save_dir = save_dir
		self.paper = os.path.join(self.base_save_dir, time.strftime('%Y-%m-%d', date) + '.pdf')
		self.min_page_size = 1
		self.pages = []
		# init logger
		self.initLogger()

	def initLogger(self):
		'''Initialize logger
		'''
		# init logging
		logging.basicConfig(level=logging.DEBUG,
				format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
				datefmt='%m-%d %H:%M',
				filename='downloader.log',
				filemode='w')
		# create console handler and set level to debug
		handler = logging.StreamHandler()
		# create formatter
		formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
		# add formatter to handler
		handler.setFormatter(formatter)
		# add handler to logger
		logging.getLogger('').addHandler(handler)
		# create logger
		self.logger = logging.getLogger('downloader')
	
	def logDebug(self, msg):
		self.logger.debug(msg)

	def logError(self, msg):
		self.logger.error(msg)
	
	def logWarn(self, msg):
		self.logger.warn(msg)
	
	def downloadPaper(self):
		'''download paper.
		'''
		#if 0 != self.generatePageUrls(): return 1
		if 0 != self.parsePageUrls(): return 1
		if 0 != self.downloadPages(): return 1
		if 0 != self.mergePages(): return 1
		return 0

	def downloadPages(self):
		'''Download all pages from web.
		remove unavarialbe page from list.'''
		i = 0
		while i < len(self.pages):
			page_url, save_path = self.pages[i]
			rc = 0
			self.logDebug('download page from ' + page_url)
			self.logDebug('save to local ' + save_path)
			# download
			if 0 != self.downloadOnePage(page_url, save_path):
				return 1
			# check file type
			rc = self.checkFileType(save_path)
			if rc != 0:
				self.logDebug('the page is not found ' + page_url)
				self.logDebug('remove page from page list ' + page_url)
				self.pages.remove((page_url, save_path))
				continue
			# check file content
			rc = self.checkFileValid(save_path)
			if rc != 0:
				self.logDebug('remove the invalid file and retry download.')
				os.remove(save_path)
				continue
			i = i + 1
		return 0

	def downloadOnePage(self, page_url, save_path):
		'''Download one page of papers.'''
		# check file existence
		if os.path.exists(save_path) and self.min_page_size <= os.path.getsize(save_path):
			self.logDebug('the target file exists. ' + save_path)
			return 0
		
		# make save dir
		if not os.path.exists(os.path.dirname(save_path)):
			self.logDebug('%s is created.' % str(os.path.dirname(save_path)))
			os.makedirs(os.path.dirname(save_path))
		
		# download page
		max_retry_count = 3
		cur_retry_count = 0
		while cur_retry_count <= max_retry_count:
			try:
				urllib.urlretrieve(page_url, save_path)
			except:
				self.logError('failed to download page from ' + page_url)
				self.logDebug('download page retry count ' + str(cur_retry_count))
				cur_retry_count += 1
				continue
			break

		# check save_path file
		if not os.path.exists(save_path):
			self.logError('cant find local page ' + save_path)
			return 1
		if self.min_page_size > os.path.getsize(save_path):
			self.logError('get an error file ' + save_path)
			os.remove(save_path)
			return 1
		return 0

	def checkFileType(self, save_path):
		'''Check the file type
		return 0, if file type is 'PDF document'
		return 1, if file type is 'HTML document text'
		return 9, others'''
		cmd = 'file \'' + save_path + '\''
		stat, ret = commands.getstatusoutput(cmd)
		if stat == 0:
			m = re.match(r'.*PDF document.*', ret)
			if m:
				return 0
			m = re.match(r'.*HTML document text.*', ret)
			if m:
				return 1
			return 9
		else:
			self.logDebug('failed to get file type of ' + save_path)
			return -1
		return 0

	def checkFileValid(self, save_path):
		'''Check the file, whether the pdf file is valid or not.
		return 0 if valid
		return 1 others'''
		pdf_file = PdfFileWriter()
		try:
			# read file to check the file
			pdf_file = PdfFileReader(file(save_path, 'rb'))
		except:
			self.logError('this maybe an error page')
			return 1
		return 0

	def mergePages(self):
		'''merge pages to a book.'''
		if os.path.exists(self.paper):
			self.logDebug('the target paper exists. ' + self.paper)
			return 0

		self.logDebug('start merging...')
		out_file = PdfFileWriter()
		# merge pages
		for page_url, save_path in self.pages:
			if not os.path.exists(save_path):
				self.logError('page not eixsts. ' + save_path)
				return 1
			
			try:
				# read file, add to out_file
				in_file = PdfFileReader(file(save_path, 'rb'))
				for in_page in range(0, in_file.getNumPages()):
					out_file.addPage(in_file.getPage(in_page))
			except:
				self.logError('failed to merge page ' + save_path)
				return 1

		# save out_file
		out_stream = file(self.paper, 'wb')
		out_file.write(out_stream)
		out_stream.close()
		self.logDebug('merge done.')
		return 0

	def parsePageUrls(self):
		'''Parse page urls from web page.'''
		year_month = time.strftime('%Y-%m', self.date)
		day = time.strftime('%d', self.date)
		master_url = os.path.join(self.base_url_2, 'html', year_month, day, 'node_85.htm')
		self.logDebug('parse pdf urls from page ' + master_url)
		# open master page
		u = urllib.urlopen(master_url)
		buf_page = u.read()
		# filter pdf urls
		re_pgn = re.compile(r'[\s\S]*(<div id="pgn"[\s\S]*?</div>)[\s\S]*')
		buf_pgn = re_pgn.match(buf_page).group(1)
		re_pdf = re.compile(r'\.\./\.\./\.\./page/([\s\S]*?pdf\.pdf)')
		pdf_urls = re_pdf.findall(buf_pgn)
		self.logDebug('get %d pdf urls from master page' % len(pdf_urls))
		if len(pdf_urls) == 0:
			self.logError('failed to parse pdf urls')
			return 1
		# generate page urls
		for pdf in pdf_urls:
			tmp_url = os.path.join(self.base_url_2, 'page', pdf)
			tmp_path = os.path.join(self.base_save_dir, tmp_url[len(self.base_url):])
			self.pages.append((tmp_url,tmp_path))

		return 0

	def generatePageUrls(self):
		'''Generate page urls of one day.'''
		# urls storages
		year_month = time.strftime('%Y-%m', self.date)
		day = time.strftime('%d', self.date)
		year_month_day = time.strftime('%Y%m%d', self.date)
		page_name = ''
		suffix = '_pdf.pdf'

		# generate index, Ax, Bx, Cx, Dx page urls
		# example: http://hzdaily.hangzhou.com.cn/dskb/page/3/2011-03/23/01/2011032301_pdf.pdf
		# example: http://hzdaily.hangzhou.com.cn/dskb/page/3/2011-03/23/A02/20110323A02_pdf.pdf
		edtions = [['', 1, 2], ['A', 2, 25], ['B', 1, 17], ['C', 1, 17], ['D', 1, 17]]
		for edtion in edtions:
			base_page_name = str(edtion[0])
			page_no_from = int(edtion[1])
			page_no_to = int(edtion[2])
			for page_no in range(page_no_from, page_no_to):
				page_name = '%s%.2d' % (base_page_name, page_no)
				tmp_url = os.path.join(
						self.base_url, 
						year_month, 
						day, 
						page_name, 
						year_month_day + page_name + suffix
						)
				tmp_path = os.path.join(self.base_save_dir, tmp_url[len(self.base_url):])
				self.pages.append((tmp_url,tmp_path))

		self.logDebug('generate %d page urls, should be 72.' % len(self.pages))
		return 0

def usage():
	print '''
	-h, --help	usage.
	-d, --date	specify date to download.
	'''
def main():
	try:
		opts, args = getopt.getopt(sys.argv[1:], 'hds:', ['help', 'date=', 'save='])
	except getopt.GetoptError, err:
		print str(err)
		sys.exit(1)

	date = time.localtime()
	save_dir = '/tmp/paper'
	for o, a in opts:
		if o in ('-h', '--help'):
			usage()
			sys.exit()
		elif o in ('-d', '--date'):
			date = time.strptime(a, '%Y-%m-%d')
		elif o in ('-s', '--save'):
			save_dir = a
		else:
			assert False, 'unhandled option'

	downloader = DSKBDownloader(save_dir, date)
	downloader.downloadPaper()

if __name__ == '__main__':
	main()
