import requests
from selenium.common import exceptions
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support import expected_conditions as EC
import logging
import threading
import multiprocessing
import re
import os


def down_comic(html_url):
    # html_url = 'https://www.mh160.xyz/kanmanhua/11106/176289.html?p=1'
    option = Options()
    option.add_argument('-headless')
    driver = webdriver.Chrome(options=option)
    driver.get(html_url)
    try:
        while True:
            ele_select = driver.find_elements_by_xpath('//select[@id="k_pageSelect"]/option')
            print('当前网页url：{}'.format(driver.current_url))
            print('本章页数:{}'.format(len(ele_select) - 1))
            for i in range(1, len(ele_select)):
                try:
                    pic_url_list = re.findall('<img src="(.*?)" id="qTcms_pic', driver.page_source)
                    pic_url = pic_url_list[0]
                    print('图片url：{}'.format(pic_url))
                except IndexError as e:
                    print('未找到图片连接，网页url:{}'.format(html_url))
                # 文件路径处理
                data_name_list = pic_url.split('/')
                comic_name = data_name_list[-3]
                comic_chapter = data_name_list[-2]
                comic_pic_name = data_name_list[-1]
                print('动漫名：{}， 动漫章节：{}， 动漫图片名称：{}'.format(comic_name, comic_chapter, comic_pic_name))
                # 文件路径
                file_path = os.getcwd() + os.sep + comic_name + os.sep + comic_chapter
                # D:\Users\asus-pc\PycharmProjects\comic_download\old_down\斗罗大陆\01
                pic_name = file_path + os.sep + comic_pic_name
                # D:\Users\asus-pc\PycharmProjects\comic_download\old_down\斗罗大陆\01\0001.jpg
                failed_path = os.getcwd() + os.sep + comic_name + os.sep + 'failed_down'
                print('文件路径:{}\n图片名:{}'.format(file_path, pic_name))
                # 创建图片文件路径
                if not os.path.exists(file_path):
                    os.makedirs(file_path)
                # 创建失败图片保存路径
                if not os.path.exists(failed_path):
                    os.makedirs(failed_path)
                # 保存图片
                for re_down in range(10):
                    try:
                        pic_response = requests.get(pic_url)  # 获取网页
                        print('请求图片状态码: %s' % pic_response.status_code)
                        with open(pic_name, 'wb') as f:  # 打开写入到path路径里-二进制文件，返回的句柄名为f
                            f.write(pic_response.content)  # 往f里写入r对象的二进制文件
                        f.close()
                        pic_response.close()
                        print('图片下载成功：{}'.format(pic_url))
                        break
                    except requests.exceptions.RequestException as e:
                        print('图片:{}下载失败,重试:{}次数'.format(pic_url, re_down))
                        with open(failed_path, 'a', encoding='utf8') as f:
                            f.write('网页：{}，图片url：\n'.format(pic_url))
                # 点击下一页
                try:
                    driver.find_element_by_id('k_next').click()
                except exceptions.ElementClickInterceptedException:
                    driver.find_element_by_id('k_Pic_nextArr').click()
            print('*' * 80)
            # 点击下一章
            driver.find_element_by_id('k_Pic_nextArr').click()
            # 判断是否是最后一页
            if EC.alert_is_present()(driver):
                print('最后一页')
                break
    except Exception as e:
        logging.basicConfig(filename=os.getcwd() +'/log/syserror.log', level=logging.DEBUG,
                            format='%(asctime)s %(filename)s [line:%(lineno)d] %(levelname)s %(message)s')
        logger = logging.getLogger(__name__)
        logger.exception(e)
    finally:
        driver.quit()


if __name__ == '__main__':
    url = 'https://www.mh160.xyz/kanmanhua/11106/224941.html'
    down_comic(url)
    # # 多线程
    # urls = ['https://www.mh160.xyz/kanmanhua/11106/176373.html',
    #        'https://www.mh160.xyz/kanmanhua/11106/376017.html',
    #        'https://www.mh160.xyz/kanmanhua/11106/577312.html']
    # threading_list = []
    # for url in urls:
    #     threading_list.append(threading.Thread(target=down_comic, args=(url, urls)))
    #
    # for t in threading_list:
    #     t.start()

    # # 进程
    # urls = ['https://www.mh160.xyz/kanmanhua/11106/176373.html',
    #         'https://www.mh160.xyz/kanmanhua/11106/376017.html',
    #         'https://www.mh160.xyz/kanmanhua/11106/577312.html']
    # porcess_list = []
    # for url in urls:
    #     porcess_list.append(multiprocessing.Process(target=down_comic, args=(url, urls)))
    #
    # for p in porcess_list:
    #     p.start()
