# 本程序针对快看commic
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
import time
import random
import re
import os
import requests
import sys




# 日志类
class Logger(object):
    def __init__(self, fileN="Default.log"):
        self.terminal = sys.stdout
        self.log = open(fileN, "a+")
 
    def write(self, message):
        self.terminal.write(message)
        self.log.write(message)
        self.flush() #每次写入后刷新到文件中，防止程序意外结束
    def flush(self):
        self.log.flush()
##############################################################################################




# 返回browser对象
def get_selenium_object(url):
    path = r'D:\Program\Python\geckodriver.exe'
    firefox_options = Options() #创建配置对象
    firefox_options.add_argument('--headless')
    firefox_options.add_argument('--disable-gpu')
    firefox_options.add_argument("user-agent='Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'")
    browser = webdriver.Firefox(executable_path=path,options=firefox_options)
    browser.get(url)
    return browser
##############################################################################################




# 传入browser对象，进行滚动操作，获取每张图片的链接，返回图片链接列表和章节名称
def pulldown(browser):

    browser.execute_script("""
        (function () {
        var y = 0;
        var step = 100;
        window.scroll(0, 0);
        function f() {
        if (y < document.body.scrollHeight) {
        y += step;
        window.scroll(0, y);
        setTimeout(f, 100);
        } else {
        window.scroll(0, 0);
        document.title += "scroll-done";
        }
        }
        setTimeout(f, 1000);
        })();
    """)
    print("下拉中...")
    # time.sleep(180)
    while True:
        if "scroll-done" in browser.title:
            break
    else:
        print("还没有拉到最底端...")
        time.sleep(3)
    
    page = browser.page_source
    browser.close()
    
    regexp_img = '<img noinit=".*?" class="img" data-src="(.*?)" src=".*?" lazy=".*?">'
    regexp_chapter_name = '<title data-n-head=".*?">(.*?)</title>'
    
    imglist = re.findall(regexp_img, page, re.S)
    chapter_name = re.findall(regexp_chapter_name, page, re.S)[-1]
    chapter_name = chapter_name.split('|')[0]
    chapter_name = chapter_name.replace(' ', '')
    # print(chapter_name)
    
    return imglist, chapter_name
##############################################################################################    


    
    
# selenium获取到图片链接之后，逐张下载编号保存
# 传入图片链接列表、章节文件夹名称    
def download_img(imglist, chapter_dir):
    count = 1
    headers = {
            'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:79.0) Gecko/20100101 Firefox/79.0'
            }
    print('本章总共：' + str(len(imglist)) + '张图片')
    for url in imglist:
        url_new = url.replace('amp;', '')
        img_data = requests.get(url_new, headers = headers).content
        if count < 10:
            Image_No = '00' + str(count)
        else:
            if count < 100:
                Image_No = '0' + str(count)
            else:
                Image_No = str(count)

        with open(chapter_dir + '/' + Image_No + '.jpg', 'wb') as img_file:
            img_file.write(img_data)
            time.sleep(0.1)
        print('第' + str(count) + '张图片' + '下载完成！')
        count = count + 1
##############################################################################################    




# 输入开始章节数、结束章节数开启下载，两者相等且大于0时全部下载
def Download(start_chapter, end_chapter):

    # 获取每个章节漫画的链接和漫画名，执行速度很快
    chapter_list, commic_name = get_chapter('https://www.kuaikanmanhua.com/web/topic/720/')
    chapter_num = len(chapter_list)
    print('漫画名：' + commic_name + '      ' + '全部共：' + str(chapter_num) + '章')
    
    if start_chapter < 0 or end_chapter < 0:
        return
    else:
        if start_chapter > end_chapter:
            return
        else:
            if start_chapter == end_chapter:
                start_chapter = 1
                end_chapter = chapter_num

    print(time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time())) + '开始下载《' + commic_name + '》第  ' + str(start_chapter) + '~' + str(end_chapter) + '  章')
    print('********************************************************************************************')
    
    # 创建漫画名文件夹
    if not os.path.exists(commic_name):
            os.mkdir(commic_name)
    
    # 针对每个章节使用selenium滚动加载获取每张图片的链接，然后下载保存图片，速度很慢，需做日志
    count = 1      # 章节计数
    for chapter in chapter_list:
    
        if count < start_chapter:
            count = count + 1
            continue
        else:
            if count > end_chapter:
                return
        
        browser = get_selenium_object(chapter)
        imglist, chapter_name = pulldown(browser)
        
        if count < 10:
            Chapter_No = '00' + str(count)
        else:
            if count < 100:
                Chapter_No = '0' + str(count)
            else:
                Chapter_No = str(count)
        chapter_dir = commic_name + '/' + Chapter_No + '_' + chapter_name
        
        if not os.path.exists(chapter_dir):
            os.mkdir(chapter_dir)
        
        print(time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time())) + '开始下载《' + commic_name + '》第--' + str(count) + '--章')
        download_img(imglist, chapter_dir)
        print(time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time())) + '完成下载《' + commic_name + '》第--' + str(count) + '--章')
        
        count = count + 1
    
    print('********************************************************************************************')
    print(time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time())) + '完成下载《' + commic_name + '》第  ' + str(start_chapter) + '~' + str(end_chapter) + '  章')




    
# 从漫画目录页解析出所有章节的链接，注意章节链接需要倒序并且编号，做好日志
def get_chapter(url_catalog):
    headers = {
            'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:79.0) Gecko/20100101 Firefox/79.0'
            }
    html = requests.get(url_catalog, headers = headers).content.decode()
    # print(html)
    # regexp = '<a href="(.*?)"><span>(.*?)</span> <!----></a>'
    regexp_div = '<div class="title fl">(.*?)</div>'
    regexp_a   = '<a href="(.*?)">'
    regexp_h3  = '<h3 class="title">(.*?)</h3>'
    
    div_list = re.findall(regexp_div, html, re.S)
    div_list = str(div_list)
    a_list   = re.findall(regexp_a, div_list, re.S)
    commic_name = re.findall(regexp_h3, html, re.S)[-1]
    
    chapter_list = []
    
    for i in a_list:
        chapter_list.append('https://www.kuaikanmanhua.com' + i)
    
    # 章节列表倒序，使下载时从第一季第一章开始，和网站布局有关
    chapter_list.reverse()
    
    return chapter_list, commic_name
##############################################################################################    

if __name__ == '__main__':

    # 将输出定向到日志文件
    sys.stdout = Logger("log.txt")

    Download(63, 152)
        
        
    # 下载的时候不管是章节文件夹还是图片名称，都要加上数字前缀，避免拼接错位   
        
        
        