# -*- coding: utf-8 -*-  
#爱漫画爬虫
import urllib    
import urllib2  
import re
import json
from pyquery import PyQuery as pq
from lxml import etree
import urllib
import os

class IManHuaSpider:
    def __init__(self):
        self.host = "http://www.imanhua.com"
        self.comic_url = "http://www.imanhua.com/comic/440"
        self.comic_name = ""
        self.save_path = "."
        self.chap_url_list = []
        self.chap_name_list = []
        self.chap_save_path = []
        self.bid="440" #漫画id
        self.cid="48471" #章节id
        self.page_sum = 0
        self.page_pic_names = []
        self.pic_servers=("http://t4.mangafiles.com", "http://t5.mangafiles.com", "http://t6.mangafiles.com", "http://c5.mangafiles.com")
        self.opener = urllib2.build_opener() 


    def get_chap_list(self):
        #print unicode(html[0],'utf-8')
        #d = pq(etree.fromstring(rps_html))
        print "============"
        d = pq(url=self.comic_url)
        self.comic_name = d(".bookIntro").find("h2").html()
        p = d("#subBookList").find("li")
        for x in p:
            chap_name = pq(x).find("a").attr("title")
            self.chap_name_list.append(chap_name)
            self.chap_url_list.append(pq(x).find("a").attr("href"))
            self.chap_save_path.append(self.save_path+"/"+self.comic_name+"/"+chap_name)

    def download_comic(self):
        #url = self.host+self.chap_url_list[0]
        #self.resolve_cinfo(url)
        for c in  range(0,len(self.chap_name_list)):
            print "===================================="
            print "开始下载章节："+self.chap_name_list[c]
            chap_url = self.host+self.chap_url_list[c]
            self.resolve_cinfo(chap_url)
            for p in range(0,self.page_sum):
                save_path = self.chap_save_path[c]+"/"+self.page_pic_names[p]
                if not os.path.exists(save_path):
                    print "第%s页下载中..." % (p+1)
                    pic_url = self.pic_servers[0]+"/Files/Images/"+str(self.bid)+"/"+str(self.cid)+"/"+self.page_pic_names[p]
                    req = urllib2.Request(pic_url)  
                    req.add_header('User-Agent', 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:26.0) Gecko/20100101 Firefox/26.0')  
                    #下载文件需要设置请求头的 Referer为当前页面
                    req.add_header('Referer',chap_url+"?p="+str(p+1))  
                    rsp = urllib2.urlopen(req)  
                    f = open(save_path,"wb")
                    f.write(rsp.read())
                    rsp.close
                    f.close
                else :
                    print "第%s页已下载，跳过..." % (p+1)




    def make_dirs(self):
        for d in self.chap_save_path:
            if not os.path.exists(d) : 
                os.makedirs(d)

    def resolve_cinfo(self,chap_url):

        #base32 编码
        def base36encode(number, alphabet='0123456789abcdefghijklmnopqrstuvwxyz'):
            """Converts an integer to a base36 string."""
            if not isinstance(number, (int, long)):
                raise TypeError('number must be an integer')
            base36 = ''
            sign = ''

            if number < 0:
                sign = '-'
                number = -number

            if 0 <= number < len(alphabet):
                return sign + alphabet[number]

            while number != 0:
                number, i = divmod(number, len(alphabet))
                base36 = alphabet[i] + base36

            return sign + base36
        #模仿网站的解析cinfo javascript
        def f1(p, a, c, k, e, d):
            def f2(c):
                x1 = "" if c<a else f2(int(c/a))
                c=c%a
                x2 =  unichr(c+29) if c >35  else base36encode(c)
                return x1+x2
            c=c-1
            while c>=0:
                if k[c] :
                    reg = r"\b"+f2(c)+r"\b"
                    p = re.sub(reg,k[c],p)
                c=c-1
            return p;


        d = pq(url=chap_url)
        js = d("script").html();
        if not re.search(r"function",js) is None:
            js = re.sub(r"function.+;}","",js)
            s =  re.search(r"eval\((.+)\)",js)
            cinfo_b = s.group(1)
            cinfo_b = eval("f1"+cinfo_b)
        else :
            cinfo_b = js
        cinfo_json = re.search(r"{.*}",cinfo_b)
        cinfo = json.loads(cinfo_json.group())

        self.bid = cinfo["bid"]
        self.cid = cinfo["cid"]
        self.comic_name = cinfo["cname"]
        self.page_sum = int(cinfo["len"])
        self.page_pic_names = cinfo["files"]

        return cinfo["files"]







import sys
reload(sys)
sys.setdefaultencoding("utf-8")
test = IManHuaSpider()
test.get_chap_list()
test.make_dirs()
test.download_comic()
#js=test.resolve_cinfo("http://www.imanhua.com/comic/440/list_47761.html")
#test.download_comic()


