﻿
# -*- coding: UTF-8 -*-
#http://www.xeyblog.com/articles/6/article.php
import requests
import sys
import re
import os
import zipfile
import shutil
import execjs
import json
import urllib.parse

HOST = "https://manhua.dmzj.com"
TARGET_HOST = "https://manhua.dmzj.com/renyadao/"

# 执行js获取图片链接
def parse_js(html):  
        js_str = re.search('eval\((.*?)\)\n', html.text).group(1)
        js_str = js_str.replace('function(p,a,c,k,e,d)', 'function fun(p, a, c, k, e, d)')
        fun = """
                     function run(){
                            var result = %s;
                            return result;
                        }
                """ % js_str
        pages = execjs.compile(fun).call('run')
        data = pages.split('=')[2][1:-2]
        url_list = json.JSONDecoder().decode(data)
        return url_list

#函数功能：压缩指定路径下全部文件
def zipDir(dirpath,outFullName):
    zip = zipfile.ZipFile(outFullName,"w",zipfile.ZIP_DEFLATED)
    for path,dirnames,filenames in os.walk(dirpath):
        # 去掉目标跟路径，只对目标文件夹下边的文件及文件夹进行压缩
        fpath = path.replace(dirpath,'')
        for filename in filenames:
            zip.write(os.path.join(path,filename),os.path.join(fpath,filename))
    zip.close()

#将待下载漫画的详情页作为传入参数
# url=sys.argv[1]PY
# if sys.getdefaultencoding() != 'utf-8':
#     reload(sys)
#     sys.setdefaultencoding('utf-8')
#下载漫画详情页
ses=requests.session()
headers={"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18362",'Referer':HOST}
detailHtml=ses.get(TARGET_HOST,headers=headers,timeout=(5,30))
#print(detailHtml.content)
#分析网页内容以获取漫画名
comicName=re.compile(r"g_comic_name = \"(.*?)\"").findall(detailHtml.content.decode('utf-8'))
comicName=comicName[0]
print(comicName)
#判断漫画名是否已在complete目录下存在，若存在，则说明漫画已下载过，推出脚本执行
if os.path.exists("./complete/"+comicName) and  len(os.listdir("./complete/"+comicName))>0 :
	print ("该漫画已下载过！xxxx")
	sys.exit()

#分析网页内容以获取章节地址和章节名
result=re.compile(r'<div class="cartoon_online_border" (.*?)</div>', re.S).findall(detailHtml.content.decode('utf-8'))  

# print(result)



for row in result :
    chapterList=re.compile(r'<li>(.*?)</li>', re.S).findall(row)
    for li in chapterList:
        #章节名
        chapterName = re.compile(r'<a .*?>\s*(.*?)\s*</a>', re.S).findall(li)

        #过滤
        if len(chapterName)>0:
            numbers = re.findall(r'\d+', chapterName[0])
            if len(numbers)>0:
                number = int(numbers[0])
                if(number<171):
                    continue
        else:
            continue    

        #章节详情url
        chapterDetailUrl = re.compile(r'href="\s*(.*?)\s*" >', re.S).findall(li)
        if len(chapterList)>0 and len(chapterDetailUrl)>0:
            chapterName = chapterName[0]
            chapterDetailUrl = HOST + chapterDetailUrl[0]
            print(chapterName+":"+chapterDetailUrl)
            chapterHtml=ses.get(chapterDetailUrl,headers=headers,timeout=(5,30))
            #章节漫画数量
            chapterCount = re.compile(r"g_max_pic_count = (.*?);").findall(chapterHtml.content.decode('utf-8'))
            if len(chapterCount)>0:
                chapterName = chapterName+"("+chapterCount[0]+")"
            imageUrls = parse_js(chapterHtml)
            pageIndex=1
            for url in imageUrls:
                imagename = urllib.parse.unquote(url)
                imagename = imagename.split('/')[-1]
                url = "http://images.dmzj.com/" + url
                headers["Referer"] = detailHtml.url
                targetPic = ses.get(url, headers=headers,timeout=(5,30))
                # with open("-".join(imagename.split("/")[1:]), 'wb') as file:
                #     file.write(r.content)
                # print("%s, 保存成功" % "-".join(imagename.split("/")[1:]))
                filePath = "./download/"+comicName+"/"+chapterName+"/"
                if not os.path.exists(filePath):
                    os.makedirs(filePath)
                imagename2 = str(pageIndex)+".jpg"
                if pageIndex <10:
                    imagename2 = "0"+imagename2
                f=open(filePath+imagename2,"wb")
                f.write(targetPic.content)
                f.close()
                pageIndex = pageIndex+1
                print(chapterName+"/"+imagename+" 下载成功")
            
print ("下载已完成！")                 










# 	chapter_name=re.compile(r"chapter_name.*?,").findall(row)
# 	chapter_name=re.sub(r"chapter_name\":\"","",re.sub(r"\",","",chapter_name[0])).decode("unicode_escape",errors = 'ignore')	#此处将章节名进行转码
# 	os.makedirs(unicode("../download/"+comicName)+"/"+chapter_name)
# 	ID=re.compile(r"\"id\":.*?,").findall(row)
# 	Id=re.sub(r",","",re.sub(r"\"id\":","",ID[0]))	#获取章节id
# 	comic_ID=re.compile(r"\"comic_id\":.*?,").findall(row)
# 	comic_id=re.sub(r",","",re.sub(r"\"comic_id\":","",comic_ID[0]))		#获取漫画id
# 	comicurl="http://m.dmzj.com/view/"+comic_id+"/"+Id+".html"		#获取章节地址
# 	headers={"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18362",'Referer':comicurl}
# 	tarHtml=ses.get(comicurl,headers=headers)       #下载章节网页
# 	pattern=re.compile(r'\[\"http.*?"\]')
# 	result=pattern.findall(tarHtml.content)
# 	pattern=re.compile(r'https.*?[np]g')
# 	result=pattern.findall(result[1])
# 	print ("下载中："+comicurl+"...")
# 	for picurl in result :
# 		downurl=re.sub(r"u","\\u",re.sub(r"\\","",picurl)).decode("unicode_escape",errors = 'ignore')
# 		targetPic=ses.get(downurl,headers=headers)
# 		pageNum=re.sub(r"/(.*)/","",re.sub(r"https:","",downurl))
# 		f=open("../download/"+comicName+"/"+chapter_name+"/"+pageNum,"w")
# 		f.write(targetPic.content)
# 		f.close()
# #加入已下载列表
# os.makedirs("./complete/"+comicName);
# print ("加入已下载目录:已完成！")
# print ("压缩中...")
# zipDir("../download/"+comicName,"../download/"+comicName+".zip")
# print ("压缩:已完成！")
# shutil.rmtree("../download/"+comicName)
# print ("删除原文件：已完成！")