import requests
import re
import json
from base import ChapterItem
from base import CrawlerBase

class Crawler(CrawlerBase):
    
    def __init__(self,host,target_host,img_host,comic_name,commic_id):
        super().__init__('webtoons',host,target_host,img_host,comic_name,commic_id)
        self.DEFAULT_HEADERS = {
            "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
            "referer": "https://www.webtoons.com/"}


    def load_chapter(self,chapter):
        index = 1
        filePath = self.DOWNLOAD_PATH + self.COMIC_NAME+"/"+chapter.title+"("+str(len(chapter.image_urls))+"p)/"
        print("开始下载章节"+str(index)+"："+filePath)
        for imgs in chapter.image_urls:
            imgUrl = imgs
            if not self.filterImgs(filePath+str(index)+".jpg"):
                print('正在下载'+str(index)+":"+imgUrl)
                self.loadImgCount=0
                self.load_image(imgUrl,filePath,str(index)+".jpg")
            index = index+1   


    #获取章节信息
    def get_comic_chapters(self):
        if not self.dbase.get(self.keyIsLoadedChapters,False) :
            chapterHtmls=re.compile(r'<div class="episode_lst">\s*(.*?)\s*</ul>', re.S).findall(self.detailHtml.content.decode('utf-8'))[0]
            print('chapterHtmls load success....')
            print(chapterHtmls)
            clis = re.compile(r'<li data-\s*(.*?)\s*</li>', re.S).findall(chapterHtmls)
            chapterIndex = 1
            #标志是否加载完所有章节信息
            isLoadAllChapters = True
            for cli in clis:
            # for cli in reversed(clis):
            #     print(cli)
                cName = re.compile(r'<span class="subj">(.*?)</span>', re.S).findall(cli)[0]
                print('cName load success....'+cName)
                cUrl = re.compile(r'href="\s*(.*?)\s*"', re.S).findall(cli)[0]
                print(">>>>>>>>>>>>>>>>>>>>>>>>>开始下拉章节信息>>>>>>>>>>>>>>>>>>>>>>>>>")
                print(cName+" "+cUrl)
                try:
                    chapterHtml=self.get_html(cUrl)
                    # print(chapterHtml.content.decode('utf-8'))
                    cImageUrlStrs=re.compile(r'class="_images" data-url="\s*(.*?)\s*" rel="nofollow"', re.S).findall(chapterHtml.content.decode('utf-8'))
                    print(cImageUrlStrs)
                    if len(cImageUrlStrs) > 0:
                        chapter = ChapterItem(cUrl.split('/')[-1].replace('.html',''),chapterIndex,cName,cImageUrlStrs,cUrl)
                        self.chapterList.append(chapter)
                    else:
                        print('解析图片地址出错：'+cUrl)
                except Exception as e:
                    isLoadAllChapters = False
                    msg = "2URL链接访问异常！ url={}".format(cName+cUrl+"\n"+e.__str__())
                    print(msg)
                chapterIndex = chapterIndex +1
            # self.dbase[self.keyIsLoadedChapters] = isLoadAllChapters
            # self.dbase[self.keyChapters] = self.chapterList
            # self.log(json.dumps(self.chapterList, default=lambda o: o.__dict__, sort_keys=True, indent=4))
            print(json.dumps(self.chapterList, default=lambda o: o.__dict__, sort_keys=True, indent=4))
        else:
            self.chapterList = self.dbase[self.keyChapters]
            print(json.dumps(self.chapterList, default=lambda o: o.__dict__, sort_keys=True, indent=4))

        if len(self.chapterList)>0:
            self.load_chapters()
        else:
            print ("未找到合适的章节")
                    


    #下载漫画信息
    def get_comic_info(self):
        try:
            if not self.dbase.get(self.keyIsLoadedChapters,False) :
                self.detailHtml = self.get_html(self.TARGET_HOST)
                # print(self.detailHtml.content.decode('utf-8'))
                # self.comicName = re.compile(r"g_comic_name = \"(.*?)\"").findall(self.detailHtml.content.decode('utf-8'))[0]
                # self.comicName = self.comicName.replace(' ','')
                #判断漫画是否已经下载过
                # if os.path.exists(DOWNLOAD_PATH+COMIC_NAME) and  len(os.listdir(DOWNLOAD_PATH+COMIC_NAME))>0 :
                #     print ("该漫画已下载过！开始覆盖")
                    # sys.exit()
            self.get_comic_chapters()
        except Exception as e:
            msg = "1URL链接访问异常！ url={}".format(self.TARGET_HOST+"\n"+e.__str__())
            print(msg)
            # self.get_comic_info_error()
        


if __name__ == '__main__':
    # crawler = Crawler("https://www.98comic.com","https://www.98comic.com/comic/13001/","https://www.98comic.com/g.php?","Take On Me","13001")
    crawler = Crawler("https://www.webtoons.com","https://www.webtoons.com/zh-hant/thriller/sweethome/%E9%87%91%E5%9D%8E%E6%AF%94%E4%BD%9C%E8%80%85%E7%9A%84%E6%96%B0%E4%BD%9C%E7%8D%B5%E6%A7%8D%E5%B0%91%E5%B9%B4%E9%A0%90%E5%91%8A/viewer?title_no=1289&episode_no=143","","Sweet Home","1289")
    crawler.run()

    