#!/usr/bin/python
# _*_ coding=utf-8 _*_
# Filename: manga_downloader.py

import sys
import urllib
import re
import json
import os

class MangaDownloader:
	'''Manga downloader, download manga pictures from manhua.178.com
	'''

	def __init__(self, start_volume_url, save_dir):
		self.image_base_url = 'http://manhua.178.com/imgs'
		self.save_dir = save_dir
		self.manga_title = None

		self.cur_volume_url = start_volume_url
		self.cur_volume_name = None
		self.cur_volume_pages = []

		self.next_volume_url = None
		self.initRegex()

	def initRegex(self):
		self.r_manga_title = re.compile(r'[\s\S]*var g_comic_name = "(.*)"[\s\S]*')
		self.r_volume_name = re.compile(r'[\s\S]*var g_chapter_name = "(.*)"[\s\S]*')
		self.r_pages = re.compile(r"[\s\S]*var pages = '(\[.*\])'[\s\S]*")
		self.r_next_volume = re.compile(r'[\s\S]*下一话.*href="(.*\.shtml)">(.*)</a>[\s\S]*')
		self.r_image_suffix = re.compile(r'.*\.(.*)$')

	def parseVolumeUrl(self, volume_url):
		self.cur_volume_pages = []
		self.next_volume_url = None

		print 'parsing info from %s ' % volume_url
		# open volume_url
		opener = urllib.urlopen(volume_url)
		buff = opener.read()

		# parse manga title
		if not self.manga_title:
			m_manga_title = self.r_manga_title.match(buff)
			if not m_manga_title:
				print 'failed to find manga title'
				return 1
			self.manga_title = m_manga_title.group(1)
			print 'get manga title: %s' % self.manga_title
		else:
			print 'manga title exist: %s' % self.manga_title

		# parse current volume name
		m_volume_name = self.r_volume_name.match(buff)
		if not m_volume_name:
			print 'failed to find volume name'
			return 2
		self.cur_volume_name = m_volume_name.group(1)
		print 'get volumn name: %s' % self.cur_volume_name

		# parse current volume pages
		m_pages = self.r_pages.match(buff)
		if not m_pages:
			print 'failed to find pages urls'
			return 2
		pages = m_pages.group(1)
		j_pages = json.loads(pages)
		for p in j_pages:
			# os.path.join cant join sub-path startswith '/'
			# cut the head startswith '/'
			if p.startswith('/'):
				p = p[1:]
			self.cur_volume_pages.append(os.path.join(self.image_base_url, p))
		print 'get %d page urls' % len(self.cur_volume_pages)

		# parse next volume
		m_next_volume = self.r_next_volume.match(buff)
		if not m_next_volume:
			print 'failed to find next volume url'
			return 3
		self.next_volume_url = os.path.join(os.path.dirname(volume_url), m_next_volume.group(1))
		print 'get next volume url: %s' % self.next_volume_url

	def parseSuffix(self, page_url):
		suffix = 'jpg'
		m_suffix = self.r_image_suffix.match(page_url)
		if not m_suffix:
			print 'failed to parse image suffix, use default jpg'
		else:
			suffix = m_suffix.group(1)
		return suffix

	def downloadVolume(self):
		if len(self.cur_volume_pages) <= 0:
			print 'there is no page to download'
			return 1

		# check save dir
		tmp_path = os.path.join(self.save_dir, self.manga_title, self.cur_volume_name, 'tmp.jpg')
		if not os.path.exists(os.path.dirname(tmp_path)):
			os.makedirs(os.path.dirname(tmp_path))
			print '%s created' % str(os.path.dirname(tmp_path))

		page_index = 0
		for page in self.cur_volume_pages:
			image_suffix = self.parseSuffix(page)
			page_index += 1
			s_page_name = '%.3d.%s' % (page_index, image_suffix)
			save_path = os.path.join(self.save_dir, self.manga_title, self.cur_volume_name, str(s_page_name))
			if os.path.exists(save_path):
				print save_path, 'already exists, skip'
				continue
			if isinstance(page, unicode):
				page_url = str(page.encode('utf-8'))
			else:
				page_url = str(page.decode().encode('utf-8'))
			urllib.urlretrieve(page_url, save_path)
			print '%s --> %s' % (page_url, save_path)

	def downloadAll(self):
		self.next_volume_url = self.cur_volume_url
		while(self.next_volume_url):
			self.parseVolumeUrl(self.next_volume_url)
			self.downloadVolume()

if __name__ == '__main__':
	if len(sys.argv) < 3:
		print 'usage: %s start_url save_dir' % (sys.argv[0])
		sys.exit(1)
	start_url = sys.argv[1]
	save_dir = sys.argv[2]
	md = MangaDownloader(start_url, save_dir)
	md.downloadAll()

