import requests
import re
import json
from base import ChapterItem
from base import CrawlerBase

class Crawler(CrawlerBase):
    
    def __init__(self,host,target_host,img_host,comic_name,commic_id):
        super().__init__()
        self.SITE = '99manhua'
        self.HOST = host
        self.TARGET_HOST = target_host
        self.IMG_HOST = img_host
        self.COMIC_NAME = comic_name 
        self.COMIC_ID = commic_id 



    #获取章节信息
    def get_comic_chapters(self):
        if not self.dbase.get(self.COMIC_ID+'_isChaptersLoadFinish',False) :
            chapterHtmls=re.compile(r"<div class='cVolList'>\s*(.*?)\s*</div></div>", re.S).findall(self.detailHtml.content.decode('utf-8'))[0]  
            clis = re.compile(r'<div>\s*(.*?)\s*</div>', re.S).findall(chapterHtmls+"</div>")
            chapterIndex = 1
            #标志是否加载完所有章节信息
            isLoadAllChapters = True
            for cli in reversed(clis):
                cName = re.compile(r'<a .*?>\s*(.*?)\s*</a>', re.S).findall(cli)[0]
                cUrl = re.compile(r"href='\s*(.*?)\s*'", re.S).findall(cli)[0]
                cUrl= self.HOST+cUrl
                print("开始下拉章节信息...")
                print(cName+":"+cUrl)
                try:
                    chapterHtml=self.get_html(cUrl)
                    cImageUrlStrs = re.compile(r'var sFiles="\s*(.*?)\s*"', re.S).findall(chapterHtml.content.decode('utf-8'))[0]
                    cImageUrls = str.split(cImageUrlStrs,'|')
                    imgsCount = len(cImageUrls)
                    chapter = ChapterItem(chapterIndex,cName,cImageUrls,cUrl)
                    self.chapterList.append(chapter)
                except Exception as e:
                    isLoadAllChapters = False
                    msg = "URL链接访问异常！ url={}".format(cName+cUrl+"\n"+e.__str__())
                    print(msg)
                chapterIndex = chapterIndex +1
            self.dbase[self.COMIC_ID+'_isChaptersLoadFinish'] = isLoadAllChapters
            self.dbase[self.COMIC_ID+'_chapters'] = self.chapterList
            self.log(json.dumps(self.chapterList, default=lambda o: o.__dict__, sort_keys=True, indent=4))  
            # print(json.dumps(self.chapterList, default=lambda o: o.__dict__, sort_keys=True, indent=4))
        else:
            self.chapterList = self.dbase[self.COMIC_ID+'_chapters']
            # print(json.dumps(self.chapterList, default=lambda o: o.__dict__, sort_keys=True, indent=4))

        if len(self.chapterList)>0:
            self.load_chapters()
        else:
            print ("未找到合适的章节")
                    


    #下载漫画信息
    def get_comic_info(self):
        try:
            if not self.dbase.get(self.COMIC_ID+'_isChaptersLoadFinish',False) :
                self.detailHtml = self.get_html(self.TARGET_HOST)
                # print(self.detailHtml.content.decode('utf-8'))
                # self.comicName = re.compile(r"g_comic_name = \"(.*?)\"").findall(self.detailHtml.content.decode('utf-8'))[0]
                # self.comicName = self.comicName.replace(' ','')
                #判断漫画是否已经下载过
                # if os.path.exists(DOWNLOAD_PATH+COMIC_NAME) and  len(os.listdir(DOWNLOAD_PATH+COMIC_NAME))>0 :
                #     print ("该漫画已下载过！开始覆盖")
                    # sys.exit()
            self.get_comic_chapters()
        except Exception as e:
            msg = "URL链接访问异常！ url={}".format(self.TARGET_HOST+"\n"+e.__str__())
            print(msg)
            self.get_comic_info_error()
        


if __name__ == '__main__':
    # crawler = Crawler("http://99.hhxxee.com","http://99.hhxxee.com/comic/992190","http://99.94201314.net/dm04","齋女傳說","992190")
    # crawler = Crawler("http://99.hhxxee.com","http://99.hhxxee.com/comic/9910492","http://99.94201314.net/dm08/","東京食尸鬼","9910492")
    # crawler = Crawler("http://99.hhxxee.com","http://99.hhxxee.com/comic/994230/","http://99.94201314.net/dm08/","要塞學園","994230")
    # crawler = Crawler("http://99.hhxxee.com","http://99.hhxxee.com/comic/9923361/","http://99.94201314.net/dm10/","學院里的殺人游戲","9923361")
    crawler = Crawler("http://99.hhxxee.com","http://99.hhxxee.com/comic/9914874/","http://99.94201314.net/dm12/","寄生獸醫","9914874")
    crawler.run()

    