import requests
import sys
import re
import os
import zipfile
import shutil
import execjs
import json
import urllib.parse
from fileUtils import FileHelper
import array

HOST = "https://www.36mh.com"
TARGET_HOST = "https://www.36mh.com/manhua/mingrijiangdeshuishoufu/"
IMG_HOST = "https://images.dmzj.com/"

DOWNLOAD_PATH = "./download/"


class ChapterItem():
    FIELDS = ["chapter_number", "title", "image_urls", "source_url","referer"]

    def __init__(self, chapter_number, title, image_urls, source_url,referer=None):
        self.chapter_number = chapter_number
        self.title = title or ""
        self.image_urls = image_urls or []
        self.source_url = source_url or ""
        self.referer = referer or ""

    def to_dict(self):
        return {field: getattr(self, field) for field in self.FIELDS}

class Crawler():
    def __init__(self):
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
            "Referer": HOST}
        self.session = requests.session()
        self.chapterList = []
    
    # 执行js获取图片链接
    def parse_js(self,html):  
            js_str = re.search('eval\((.*?)\)\n', html.text).group(1)
            js_str = js_str.replace('function(p,a,c,k,e,d)', 'function fun(p, a, c, k, e, d)')
            fun = """
                        function run(){
                                var result = %s;
                                return result;
                            }
                    """ % js_str
            pages = execjs.compile(fun).call('run')
            data = pages.split('=')[2][1:-2]
            url_list = json.JSONDecoder().decode(data)
            return url_list

    #函数功能：压缩指定路径下全部文件
    def zipDir(self,dirpath,outFullName):
        zip = zipfile.ZipFile(outFullName,"w",zipfile.ZIP_DEFLATED)
        for path,dirnames,filenames in os.walk(dirpath):
            # 去掉目标跟路径，只对目标文件夹下边的文件及文件夹进行压缩
            fpath = path.replace(dirpath,'')
            for filename in filenames:
                zip.write(os.path.join(path,filename),os.path.join(fpath,filename))
        zip.close()


    # 下载单个图片并保存
    def load_image(self,url,filePath,imageName):
        targetPic = self.session.get(url, headers=self.headers,timeout=(5,30))
        FileHelper.save_file(filePath,imageName,targetPic.content)

    # 下载多个图片
    def load_images(self): 
        for chapter in self.chapterList:
            self.headers["Referer"] = chapter.referer
            index = 1
            print("开始下载章节："+chapter.title+" "+chapter.referer)
            for imgs in chapter.image_urls:
                print(chapter.title+"-"+IMG_HOST+imgs+" 开始下载")
                orgName = urllib.parse.unquote(imgs).split('/')[-1]
                filePath = DOWNLOAD_PATH + self.comicName+"/"+chapter.title+"/"
                filePath = filePath.replace(' ','')
                self.load_image(IMG_HOST+imgs,filePath,str(index)+".jpg")
                index = index+1

             
       
    # 过滤章节   
    def filterChapters(self,chapterName):
        return True

    #获取章节信息
    def get_comic_chapters(self):
        print("开始获取章节信息...")
        chapterHtmls=re.compile(r'<div class="cartoon_online_border" (.*?)</div>', re.S).findall(self.detailHtml.content.decode('utf-8'))  
        for cPager in chapterHtmls:
            clis = re.compile(r'<li>(.*?)</li>', re.S).findall(cPager)
            chapterIndex = 1
            for cli in clis:
                cName = re.compile(r'<a .*?>\s*(.*?)\s*</a>', re.S).findall(cli)[0]
                cUrl = re.compile(r'href="\s*(.*?)\s*"', re.S).findall(cli)[0]
                cUrl= HOST+cUrl
                print(cName+":"+cUrl)
                if self.filterChapters(cName):
                    chapterHtml=self.session.get(cUrl,headers=self.headers,timeout=(5,30))
                    chapterCount = re.compile(r"g_max_pic_count = (.*?);").findall(chapterHtml.content.decode('utf-8'))[0]
                    cImageUrls = self.parse_js(chapterHtml)
                    self.chapterList.append(ChapterItem(chapterIndex,cName,cImageUrls,cUrl,chapterHtml.url))
                    chapterIndex = chapterIndex +1
            #         if chapterIndex>1:
            #             break
            # if chapterIndex>1:
            #             break
        if len(self.chapterList)>0:
            self.load_images()
        else:
            print ("未找到合适的章节")
                    


    #下载漫画信息
    def get_comic_info(self):
        self.detailHtml = self.session.get(TARGET_HOST,headers=self.headers,timeout=(5,30))
        self.comicName = re.compile(r'<div class="book-title">.*?(.*?)</span></h1>').findall(self.detailHtml.content.decode('utf-8'))[0]
        self.comicName = self.comicName.replace(' ','')
        #判断漫画是否已经下载过
        if os.path.exists(DOWNLOAD_PATH+self.comicName) and  len(os.listdir(DOWNLOAD_PATH+self.comicName))>0 :
	        print ("该漫画已下载过！开始覆盖")
	        # sys.exit()
        self.get_comic_chapters()

    def run(self):
        self.get_comic_info()

if __name__ == '__main__':
    crawler = Crawler()
    crawler.run()