#!/bin/env python
# -*- coding: utf8 -*-

from bs4 import BeautifulSoup

import sys
import os
import urllib2
import time
import re
from json import JSONDecoder
from urlparse import urljoin

def urlopen(url, timeout = 30, headers = None, refer=None):
	req = urllib2.Request(url)
	if refer: req.add_header('Referer', refer)
	req.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/37.0.2062.94 Chrome/37.0.2062.94 Safari/537.36')
	if headers:
		for k, v in headers.iteritems():
			req.add_header(k, v)

	# todo: support percentage with content-length, and save block
	r = urllib2.urlopen(req, timeout=timeout)
	html = r.read()
	return html

#def is_chapt_link(tag):
#	return tag.name =='a' and tag.has_attr('title') and tag.has_attr('target') and tag['target']=='_blank'

def to_num(text):
	n = 0
	for c in text:
		if c.isdigit():
			n = n * 10 + int(c)
		else:
			break
	return n
			

def parse_chapt_list(url):
	print "parsing chapter list from %s" % url
	html = urlopen(url)
	soup = BeautifulSoup(html)
	#for chap in soup.find_all(is_chapt_link):
	#	print chap
	#for chap in soup.find_all('a', attrs={'target': '_blank', 'class':'status0'}):
	#	print chap
	chap_list = soup.find('div', attrs={'class': 'chapter-list cf mt10'})
	#print chap_list
	chap_info = []
	for chap in chap_list.find_all('a'):
		#print chap['href'], to_num(chap.text)
		#chap_info[to_num(chap.text)] = cha['href']
		pg = chap.find('i')
		chap_info.append((to_num(chap.text), chap['href'], to_num(pg.text)))
	chap_info.sort(lambda x, y: cmp(x[0], y[0]))
	#print chap_info
	return chap_info


def parse_image_urls(url):
	html = urlopen(url)
	#print html
	soup = BeautifulSoup(html)
	
	cinfo_reg='var cInfo = (.*?);'
	for script in soup.find_all('script'):
		m = re.search(cinfo_reg, script.text)
		if m:
			cinfo = m.group(1)
			#print cinfo
			break

	cinfo = JSONDecoder(encoding='utf8').decode('%s' % cinfo.replace("'", '"'))
	#print cinfo
	return cinfo.get('fs', [])
	
def down_image(url, refer, dname):
	headers = {
		'Accept':'image/webp,*/*;q=0.8',
		'Accept-Encoding':'gzip,deflate,sdch',
		'Accept-Language':'zh-CN,zh;q=0.8,en;q=0.6',
		'Connection':'keep-alive',
		'Referer':refer,
	}

	print 'getting img %s ...'  % url
	img = urlopen(url, headers = headers)
	fname = os.path.basename(url)
	fpath = os.path.join(dname, fname)
	f = open(fpath, 'wb')
	f.write(img)
	f.close()
	
def down_chap(chapno, chap_url, img_urls, start_img):
	dname = "%03d" % chapno
	print '%d' % len(img_urls)
	if not os.path.exists(dname):
		os.mkdir(dname);
	for i, url in enumerate(img_urls):
		if i + 1 < start_img:
			print 'skip img %s' % url
			continue
		refer = '%s?p=%d' % (chap_url, i)
		down_image(url, refer, dname)
		start_img = 1

def down_comic(url, img_server, start_chap, start_img):
	chap_info = parse_chapt_list(url)
	print '%d chapters: %s' % (len(chap_info), chap_info)
	for info in chap_info:
		if info[0] < start_chap:
			print 'skip chapter %d' % info[0]
			continue
			
		print 'getting chapter %s ...'  % (str(info))
		chap_url = urljoin(url, info[1])
		#print url, info[1], chap_url
		fs_urls = parse_image_urls(chap_url)
		img_urls = [ urljoin(img_server, fs_url) for fs_url in fs_urls]
		if len(img_urls) != info[2]:
			print "********* may lost %d pages *********" % (info[2] - len(img_urls))
		#print img_urls
		down_chap(info[0], chap_url, img_urls, start_img)

if len(sys.argv) < 2:
	print "get comic from www.manhuadao.com"
	print ""
	print "Usage: %s comic_name [start_chapter] [start_img]" % sys.argv[0]
	print "   eg. %s mix 1 20" % sys.argv[0]
	sys.exit()

start_chap = 1
if len(sys.argv) > 2:
	start_chap = int(sys.argv[2])

start_img = 1
if len(sys.argv) > 3:
	start_img = int(sys.argv[3])

url='http://www.manhuadao.com/book/%s/' % sys.argv[1]

# from http://conf.manhuadao.com/configs.js?v=0731
img_server = 'http://t6.mangafiles.com:88'

down_comic(url, img_server, start_chap, start_img)

