#!/usr/local/bin/python3
#coding=utf-8
import requests
import re
import os
from lxml import etree 
from multiprocessing.pool import ThreadPool as Pool
from multiprocessing import cpu_count


SUCCESS = "\033[1;32m[+]\033[0m"
FAILED = "\033[1;31m[-]\033[0m"
WARRING = "\033[1;33m[!]\033[0m"

class Five:
    def __init__(self):
        self.url = "http://comic2.kukudm.com/comiclist/2335/"
        self.Path = "/Users/hades_x/Desktop/Cartoon/"
        self.pool = Pool(cpu_count())
    
    def Html_Selector(self,url,xpath):
        headers = {
                'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.75 Safari/537.36'
                }
        Content = requests.get(url,headers = headers).text.encode("iso-8859-1").decode("gbk")
        selector = etree.HTML(Content)
        return selector.xpath(xpath)

    def main(self):
        result = self.Html_Selector(self.url,"//td[@colspan='2']//dd/a/@href")
        Hom_urls = []

        for current in result[4::4]:
            temp_list = []
            for j in range(result.index(current),result.index(current)+4):
                temp_list.append(result[j])
            Hom_urls.append(temp_list)

        for urls in Hom_urls:
            main_url,back_url1,back_url2,back_url3 = urls
            #主要URL
            #print(main_url)
            #备用URL(1)
            #print(back_url1)
            #备用URL(2)
            #print(back_url2)
            
            back_url2 = back_url2.replace("1.htm","")
            print(back_url2)
            for i in range(1,99):
                Current_url = back_url2+str(i)+'.htm'
                if requests.get(Current_url).status_code != 200:
                        print('已经到底了QAQ')
                        break
                else:
                        print(Current_url)
                        self.get_Img_url(Current_url)

    def get_Img_url(self,img_url):
        script = self.Html_Selector(img_url,"//script/text()")
        result = re.search("IMG SRC='(.*?)'",script[0]).group(1)
        RealImg_url = result.replace("\"+m201304d+\"","http://n5.1whour.com/")
        print(RealImg_url)
        self.pool.apply_async(self.Download,(RealImg_url,))
        #http://n5.1whour.com/newkuku/2017/10/29/五等分的花嫁/第01话/00017JR.jpg

    def Download(self,RealImg_url):
        #['五等分的花嫁', '第01话', '00017JR.jpg'] 
        '''
        漫画名字---->cartoon_name
        章节名字---->page
        图片名字---->Img_name
        '''
        cartoon_name,page,Img_name = RealImg_url.split("/")[-3:]
        Current_path = self.Path+'/'+cartoon_name+'/'+page
        if os.path.exists(Current_path):
            print("文件夹已经存在,跳过新建文件夹步骤...")
            pass
        else:
                os.makedirs(Current_path)
        Img_content = requests.get(RealImg_url).content
        Current_path = Current_path+'/'+Img_name
        if not os.path.exists(Current_path):
            with open(Current_path,'wb') as f:
                    f.write(Img_content)
            print(SUCCESS+"下载"+Img_name+"成功.")
        else:
                print(WARRING+"文件"+Img_name+"已经存在,跳过下载")
            
            

if __name__ == "__main__":
    five = Five()
    five.main()
    

