import requests
import sys
import re
import os
import zipfile
import shutil
import execjs
import json
import urllib.parse
from fileUtils import FileHelper

TARGET_HOST = "https://manhua.fzdm.com/106/"
IMG_HOST = "http://p2.manhuapan.com/"
COMIC_NAME="出包王女Darkness"

DOWNLOAD_PATH = "./download/"


class ChapterItem():
    FIELDS = ["chapter_number", "title", "source_url"]

    def __init__(self, chapter_number, title, source_url):
        self.chapter_number = chapter_number
        self.title = title or ""
        self.source_url = source_url or ""

    def to_dict(self):
        return {field: getattr(self, field) for field in self.FIELDS}

class Crawler():
    def __init__(self):
        self.headers = {
            "User-Agent": "User-Agent:Mozilla/5.0"}
        self.session = requests.session()
        self.chapterList = []
    


    # 下载单个图片并保存
    def load_image(self,url,filePath,imageName):
        try:
            print('正在下载：'+url)
            targetPic = self.session.get(url, headers=self.headers,timeout=(60,180))
            FileHelper.save_file(filePath,imageName,targetPic.content)
        except ConnectionError:
            print("下载失败"+filePath+imageName)
        except requests.exceptions.ConnectTimeout:
            print("下载超时"+filePath+imageName)
        except requests.exceptions.ReadTimeout:
            print("读取超时"+filePath+imageName)

             
       
    # 过滤章节   
    def filterChapters(self,chapterName,chapterIndex):
        if chapterIndex>40 or chapterIndex<13:
            return False
        return True

   
    # 过滤章节图片   
    def filterImgs(self,chapterIndex,imgIndex):
        if chapterIndex==13 and imgIndex<16:
            return False
        return True





    def get_commic_chapter_imgs(self,chapterItem,pageNum):
        try:
            if self.filterImgs(chapterItem.chapter_number,pageNum+1):
                chpUrl = chapterItem.source_url+'index_'+str(pageNum)+'.html'
                chpHtml = self.session.get(chpUrl,headers=self.headers,timeout=(30,60))
                if chpHtml.status_code==404:
                    print("已经下载完毕..."+chapterItem.title)
                    return
                imgUrls = re.compile(r'var mhurl="(.*?)"', re.S).findall(chpHtml.content.decode('utf-8'))
                if len(imgUrls)>0:
                    imgUrl = IMG_HOST+imgUrls[0]
                    filePath = DOWNLOAD_PATH + COMIC_NAME+"/"+chapterItem.title+"/"
                    self.load_image(imgUrl,filePath,str(pageNum+1)+'.jpg')
                else:
                    print("图片Html解析失败"+chpUrl)
                    
                if '最后一页了' in chpHtml.content.decode('utf-8'):
                    return        
            
            self.get_commic_chapter_imgs(chapterItem,pageNum+1)
        except ConnectionError:
            print("连接图片详情页失败")
        except requests.exceptions.ConnectTimeout:
            print("连接图片详情页超时")
        except requests.exceptions.ReadTimeout:
            print("读取图片详情页超时")
        # else:
        #     print("下载图片失败"+imgUrl)

    #获取章节信息
    def get_comic_chapters(self):
        print("开始获取章节信息...")
        chapterHtmls=re.compile(r'<div id="mhlistad"></div>(.*?)<div id="content">', re.S).findall(self.detailHtml.content.decode('utf-8'))[0]  
        
        clis = re.compile(r'<li\s*(.*?)\s*</li>', re.S).findall(chapterHtmls)
        chapterIndex = 1
        for cli in reversed(clis):
            cName = re.compile(r'<a .*?>\s*(.*?)\s*</a>', re.S).findall(cli)[0]
            cUrl = re.compile(r'href="\s*(.*?)\s*"', re.S).findall(cli)[0]
            cUrl= TARGET_HOST+cUrl
            print(cName+":"+cUrl)
            if self.filterChapters(cName,chapterIndex):
                self.get_commic_chapter_imgs(ChapterItem(chapterIndex,cName.replace(COMIC_NAME,'').rstrip(),cUrl),0)
            chapterIndex = chapterIndex +1
          

    #下载漫画信息
    def get_comic_info(self):
        try:
            self.detailHtml = self.session.get(TARGET_HOST,headers=self.headers,timeout=(15,30))
            # self.comicName = re.compile(r"g_comic_name = \"(.*?)\"").findall(self.detailHtml.content.decode('utf-8'))[0]
            # self.comicName = self.comicName.replace(' ','')
            #判断漫画是否已经下载过
            if os.path.exists(DOWNLOAD_PATH+COMIC_NAME) and  len(os.listdir(DOWNLOAD_PATH+COMIC_NAME))>0 :
                print ("该漫画已下载过！开始覆盖")
                # sys.exit()
            self.get_comic_chapters()
        except ConnectionError:
            print("连接目标页面失败"+TARGET_HOST)
        except requests.exceptions.ConnectTimeout:
            print("连接目标页面超时"+TARGET_HOST)
        except requests.exceptions.ReadTimeout:
            print("读取目标页面超时"+TARGET_HOST)
        

    def run(self):
        self.get_comic_info()

if __name__ == '__main__':
    crawler = Crawler()
    crawler.run()