import requests
import re
import json
from base import ChapterItem
from base import CrawlerBase

class Crawler(CrawlerBase):
    
    def __init__(self,host,target_host,img_host,comic_name,commic_id):
        super().__init__('98manhua',host,target_host,img_host,comic_name,commic_id)


    def load_chapter(self,chapter):
        index = 1
        filePath = self.DOWNLOAD_PATH + self.COMIC_NAME+"/"+chapter.title+"("+str(len(chapter.image_urls))+"p)/"
        print("开始下载章节"+str(index)+"："+filePath)
        for imgs in chapter.image_urls:
            imgUrl = self.IMG_HOST+chapter.chapter_id+'/'+imgs
            if not self.filterImgs(filePath+str(index)+".jpg"):
                print('正在下载'+str(index)+":"+imgUrl)
                self.loadImgCount=0
                self.load_image(imgUrl,filePath,str(index)+".jpg")
            index = index+1   


    #获取章节信息
    def get_comic_chapters(self):
        if not self.dbase.get(self.keyIsLoadedChapters,False) :
            chapterHtmls=re.compile(r'<div class="chapter-list cf mt10"\s*(.*?)\s*</div><div class="chapter-list cf mt10">', re.S).findall(self.detailHtml.content.decode('utf-8'))[0]  
            print('chapterHtmls load success....')
            clis = re.compile(r'<li>\s*(.*?)\s*</li>', re.S).findall(chapterHtmls)
            chapterIndex = 1
            #标志是否加载完所有章节信息
            isLoadAllChapters = True
            for cli in clis:
            # for cli in reversed(clis):
                cName = re.compile(r'<span>(.*?)</span>', re.S).findall(cli)[0]
                cName =  re.sub("[\s+\.\!\/_,$%^*(+\"\']+|[+——！。？、~@#:￥%……&*（）]+".encode('utf-8').decode('utf-8'), "".encode('utf-8').decode('utf-8'),cName)
                print('cName load success....')
                cUrl = re.compile(r'href="\s*(.*?)\s*"', re.S).findall(cli)[0]
                cUrl= self.HOST+cUrl
                print("开始下拉章节信息...")
                print(cName+":"+cUrl)
                try:
                    chapterHtml=self.get_html(cUrl)
                    cImageUrlStrs = re.compile(r"'fs':\[\s*(.*?)\s*\],'fc':", re.S).findall(chapterHtml.content.decode('utf-8'))
                    if  len(cImageUrlStrs)>0:
                        cImageUrls = str.split(cImageUrlStrs[0].replace("'",''),',')
                        chapter = ChapterItem(cUrl.split('/')[-1].replace('.html',''),chapterIndex,cName,cImageUrls,cUrl)
                        self.chapterList.append(chapter)
                    else:
                        print('解析图片地址出错：'+cUrl)
                except Exception as e:
                    isLoadAllChapters = False
                    msg = "2URL链接访问异常！ url={}".format(cName+cUrl+"\n"+e.__str__())
                    print(msg)
                chapterIndex = chapterIndex +1
            self.dbase[self.keyIsLoadedChapters] = isLoadAllChapters
            self.dbase[self.keyChapters] = self.chapterList
            self.log(json.dumps(self.chapterList, default=lambda o: o.__dict__, sort_keys=True, indent=4))  
            print(json.dumps(self.chapterList, default=lambda o: o.__dict__, sort_keys=True, indent=4))
        else:
            self.chapterList = self.dbase[self.keyChapters]
            print(json.dumps(self.chapterList, default=lambda o: o.__dict__, sort_keys=True, indent=4))

        if len(self.chapterList)>0:
            self.load_chapters()
        else:
            print ("未找到合适的章节")
                    


    #下载漫画信息
    def get_comic_info(self):
        try:
            if not self.dbase.get(self.keyIsLoadedChapters,False) :
                self.detailHtml = self.get_html(self.TARGET_HOST)
                # print(self.detailHtml.content.decode('utf-8'))
                # self.comicName = re.compile(r"g_comic_name = \"(.*?)\"").findall(self.detailHtml.content.decode('utf-8'))[0]
                # self.comicName = self.comicName.replace(' ','')
                #判断漫画是否已经下载过
                # if os.path.exists(DOWNLOAD_PATH+COMIC_NAME) and  len(os.listdir(DOWNLOAD_PATH+COMIC_NAME))>0 :
                #     print ("该漫画已下载过！开始覆盖")
                    # sys.exit()
            self.get_comic_chapters()
        except Exception as e:
            msg = "1URL链接访问异常！ url={}".format(self.TARGET_HOST+"\n"+e.__str__())
            print(msg)
            # self.get_comic_info_error()
        


if __name__ == '__main__':
    # crawler = Crawler("https://www.98comic.com","https://www.98comic.com/comic/13001/","https://www.98comic.com/g.php?","Take On Me","13001")
    crawler = Crawler("https://www.98comic.com","https://www.98comic.com/comic/29821/","https://www.98comic.com/g.php?","金瓶梅","29821")
    crawler.run()

    