import requests
import re
import json
from base import ChapterItem
from base import CrawlerBase

class Crawler(CrawlerBase):
    
    def __init__(self,host,target_host,img_host,comic_name,commic_id):
        super().__init__('动漫啦',host,target_host,img_host,comic_name,commic_id)



    #获取章节信息
    def get_comic_chapters(self):
        if not self.dbase.get(self.COMIC_ID+'_isChaptersLoadFinish',False) :
            chapterHtmls=re.compile(r'<div class="cy_plist" id="play_0">\s*(.*?)\s*</div>', re.S).findall(self.detailHtml.content.decode('utf-8'))[0]
            clis = re.compile(r'<li>\s*(.*?)\s*</li>', re.S).findall(chapterHtmls)
            chapterIndex = 1
            #标志是否加载完所有章节信息
            isLoadAllChapters = True
            for cli in reversed(clis):
                cName = re.compile(r'<p>\s*(.*?)\s*</p>', re.S).findall(cli)[0]
                cUrl = re.compile(r"href='\s*(.*?)\s*'", re.S).findall(cli)[0]
                cId = str.split(cUrl,'/')[-1]
                print("开始下拉章节信息...")
                print(cName+":"+cUrl)
                try:
                    chapterHtml=self.get_html(cUrl+"all.html")
                    cImages = re.compile(r'<div class="imgListBox">\s*(.*?)\s*<footer>', re.S).findall(chapterHtml.content.decode('utf-8'))[0]
                    cImageUrls = re.compile(r'data-src="\s*(.*?)\s*" title', re.S).findall(cImages)
                    imgsCount = len(cImageUrls)
                    chapter = ChapterItem(cId,chapterIndex,cName,cImageUrls,cUrl)
                    self.chapterList.append(chapter)
                except Exception as e:
                    isLoadAllChapters = False
                    msg = "URL链接访问异常！ url={}".format(cName+cUrl+"\n"+e.__str__())
                    print(msg)
                chapterIndex = chapterIndex +1
            self.dbase[self.COMIC_ID+'_isChaptersLoadFinish'] = isLoadAllChapters
            self.dbase[self.COMIC_ID+'_chapters'] = self.chapterList
            self.log(json.dumps(self.chapterList, default=lambda o: o.__dict__, sort_keys=True, indent=4))  
            # print(json.dumps(self.chapterList, default=lambda o: o.__dict__, sort_keys=True, indent=4))
        else:
            self.chapterList = self.dbase[self.COMIC_ID+'_chapters']
            # print(json.dumps(self.chapterList, default=lambda o: o.__dict__, sort_keys=True, indent=4))

        if len(self.chapterList)>0:
            self.load_chapters()
        else:
            print ("未找到合适的章节")
                    


    #下载漫画信息
    def get_comic_info(self):
        try:
            if not self.dbase.get(self.COMIC_ID+'_isChaptersLoadFinish',False) :
                self.detailHtml = self.get_html(self.TARGET_HOST)
                print(self.detailHtml.content.decode('utf-8'))
                # self.comicName = re.compile(r"g_comic_name = \"(.*?)\"").findall(self.detailHtml.content.decode('utf-8'))[0]
                # self.comicName = self.comicName.replace(' ','')
                #判断漫画是否已经下载过
                # if os.path.exists(DOWNLOAD_PATH+COMIC_NAME) and  len(os.listdir(DOWNLOAD_PATH+COMIC_NAME))>0 :
                #     print ("该漫画已下载过！开始覆盖")
                    # sys.exit()
            self.get_comic_chapters()
        except Exception as e:
            msg = "URL链接访问异常！ url={}".format(self.TARGET_HOST+"\n"+e.__str__())
            print(msg)
            self.get_comic_info_error()
        


if __name__ == '__main__':
    crawler = Crawler("https://www.dongman.la","https://www.dongman.la/manhua/detail/6740/","https://dmlcn.xn--ekro1rmnqwtgnnnca.com/j/jj/1/","军鸡","6740")
    crawler.run()

    