# _*_ coding: utf-8 _*_
"""
author: lianGang
email: 1542652663@qq.com
工具 用于爬取特定网站 漫画
1.访问 首页
2.搜索 对应漫画名
3.点击漫画页签 进入分卷叶 提取分卷信息
4. 遍历分卷
4.1 单卷 下载没一页画面 另存 命名

注意 窗口切换
"""
import os
import warnings
from selenium import webdriver
from selenium.webdriver import Chrome
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
import requests
import time
import re
import traceback

warnings.filterwarnings("ignore")

headers_set = {
    "Referer": "http://www.mangabz.com/",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36"
}

driver_path = "C:\\ET-tools\\chromedriver.exe"
main_dir = os.getcwd()
download_dir = main_dir + "\\download"


def cutSpecialCharacter(string):
    # < > /  \ |: * ?
    for i in ["<", ">", "/", "\/", "|", ":", "*", "?", "？"]:
        pass


def locateWindowByTitle(driver, title):
    for wd in driver.window_handles:
        if title in driver.title:
            return
        else:
            driver.switch_to.window(wd)
    assert "driver cannot find window name contain {}".format(title)


def locateWindowByUrl(driver, url):
    for wd in driver.window_handles:
        if url == driver.current_url:
            return
        else:
            driver.switch_to.window(wd)
    assert "driver cannot find window url contain {}".format(url)


def getPageByRe(chapter_pages):
    result = list()
    for index, i in enumerate(chapter_pages):
        temp = re.search(r'[0-9]{1,3}', i)
        if temp is not None:
            result.append(int(temp.group()))
        else:
            result.append(1)
    return result


class ComicSpider(object):
    headless = False

    def __init__(self, comic_name, web_host):
        self.comic_name = comic_name
        self.web_host = web_host

    def _prepare_work(self):
        self.comic_dir = download_dir + "\\" + self.comic_name
        if not os.path.exists(self.comic_dir):
            os.makedirs(self.comic_dir)

        self.session = requests.Session()
        # web_session = driver.get_cookies()
        # for k, y in web_session[0].items():
        #     session.cookies.set(k,y)

    def _goWebIndex(self):
        # 前往首页 搜索
        opt = Options()
        if self.headless:
            opt.add_argument("headless")
        driver = Chrome(executable_path=driver_path, options=opt)
        driver.implicitly_wait(5)
        driver.get(self.web_host)
        lang_option = driver.find_element_by_xpath('//a[@class="lang-option active"]').text
        if lang_option != "简体":
            driver.find_element_by_xpath('//a[@class="lang-option active"]').click()
            time.sleep(1)
            driver.find_element_by_xpath('//a[@class="lang-option noactive"]').click()
            time.sleep(1)

        main_window = driver.current_window_handle
        main_url = driver.current_url

        # 输入搜索漫画名
        driver.find_element_by_id("txtKeywords").send_keys(comic_name)
        # driver.find_element_by_xpath('//a[@class="input-form-btn"]').click()    # headless 报错
        driver.find_element_by_id("txtKeywords").send_keys(Keys.ENTER)    # 须定位
        time.sleep(2)
        self.driver = driver
        self.driver_wait = WebDriverWait(self.driver, 3)

    def _goChapterMenu(self):
        # 章节目录 页
        # //a[contains(text(), "我和嫂子的同居生活")] 模糊匹配
        goal_xpath = '//a[@title="{}"]'.format(self.comic_name)
        goal_xpath2 = '//a[contains(text(),"{}")]'.format(self.comic_name)
        locateWindowByTitle(self.driver, self.comic_name)
        # driver.find_element_by_xpath(goal_xpath).click()
        wait = WebDriverWait(self.driver, 5)
        try:
            goal_comic = wait.until(EC.element_to_be_clickable((By.XPATH, goal_xpath)))
            goal_comic.click()
        except Exception:
            print(traceback.format_exc())
            print("切换模糊匹配漫画")
            goal_comic = wait.until(EC.element_to_be_clickable((By.XPATH, goal_xpath2)))
            goal_comic.click()
        self.menu_wd = self.driver.current_window_handle
        self.menu_url = self.driver.current_url
        try:
            show_more = '//a[text()="展开全部章节"]'
            wait.until(EC.element_to_be_clickable((By.XPATH, show_more))).click()
        except Exception:
            print(traceback.format_exc())
            print("不展开章节")
        # try:
        #     wait.until(EC.element_to_be_clickable((By.XPATH, show_more)))
        # except Exception:
        #     pass
        comic_list_xpath = '//div[@class="detail-list-form-con"]/a'
        pages_xpath = '//div[@class="detail-list-form-con"]/a/span'
        self.comic_list = self.driver.find_elements_by_xpath(comic_list_xpath)
        chapter_pages = self.driver.find_elements_by_xpath(pages_xpath)
        # chapter_pages = [i.get_property("text") for i in chapter_pages] # span 标签 获取属性报错
        chapter_pages = [i.text for i in chapter_pages]
        self.chapter_pages = getPageByRe(chapter_pages)
        self.chapter_urls = [i.get_property("href") for i in self.comic_list]
        chapter_names = [i.get_property("text") for i in self.comic_list]
        self.chapter_names = [''.join([i for i in value.split(" ") if i != ' ']) for value in chapter_names]

    def _goChapterDetail(self, i):
        # 章节详情页
        if i == 0:
            # time.sleep(2)
            # self.comic_list[i].click() # 目录页跳转详情页 点击事件 无头模式下报错

            js = 'window.open("{}")'.format(self.chapter_urls[i])
            self.driver.execute_script(js)

        chapter_url = self.chapter_urls[i]
        chapter_page = self.chapter_pages[i]
        patter = r'["<", ">", "/", "\/", "|", ":", "*", "?"]' # 允许中文？
        chapter_name = re.sub(patter, "", self.chapter_names[i])
        # chapter_name = self.chapter_names[i]
        chapter_dir = self.comic_dir + "\\" + chapter_name
        self.test_name = chapter_dir
        os.makedirs(chapter_dir) if not os.path.exists(chapter_dir) else None


        if i==0:
            locateWindowByUrl(self.driver, chapter_url)
        else:
            self.driver.get(chapter_url)
        print("downloading {} {}".format(self.comic_name, self.chapter_names[i]))

        # 漫画一话页数 需要滚动到底
        # js_down = 'window.scrollTo(0,document.body.scrollHeight)'
        # driver.execute_script(js_down)
        # time.sleep(1)
        # comic_page_size = driver.find_element_by_xpath('//span[@class="bottom-page2"]').text
        # comic_page_size =int(comic_page_size[1:])
        # current_page = driver.find_element_by_xpath('//span[@class="bottom-page2/span"]').text
        # current_page = int(current_page)

        # 漫画点击 不滚动 点击图片切换下一页 直到 图片不切换或者 //div[text()="当前章节已读完"]/  可视
        current_url = self.driver.current_url
        new_url = current_url + "#ipg1"
        # print(new_url)
        for i in range(chapter_page):
            try:
                print(self.driver.current_url)
                current_num = str(i + 1)
                page_img_xpath = '//div[@class="readForm"]/div/img'
                page_img_url = self.driver.find_element_by_xpath(page_img_xpath).get_property("src")
                response = self.session.get(page_img_url, headers=headers_set)
                current_file = chapter_dir + "\\" + current_num + ".jpg"
                if i==0 and os.path.exists(current_file):
                    print("{} {} existed".format(self.comic_name, self.chapter_names[i]))
                    break
                with open(current_file, "wb") as f:
                    f.write(response.content)

                # 切换方式一
                # goal_comic = self.driver_wait.until(EC.element_to_be_clickable((By.XPATH, page_img_xpath)))
                # goal_comic.click()
                # 切换方式二
                new_url = current_url + "#ipg" + str(i + 2)
                # print(new_url)
                self.driver.get(new_url)
                self.driver.refresh()
                time.sleep(0.3)

            except Exception as e:
                errmsg = traceback.format_exc()
                print(errmsg)
                # time.sleep(50)
            # wait = WebDriverWait(driver, 3)
            # page_img = wait.until(EC.presence_of_element_located((By.XPATH, page_img_xpath)))   # 加载不全 截图有问题
            # page_img = wait.until(EC.presence_of_all_elements_located((By.XPATH, page_img_xpath)))
            # page_img = driver.find_element_by_xpath(page_img_xpath)
            # page_img.screenshot(comic_name + ".png")    # 截图容易有小问题 广告也有
            # page_img_url = driver.find_element_by_xpath(page_img_xpath).get_property("src")
            # js = 'window.open("{}")'.format(page_img_url)
            # driver.execute_script(js)
        time.sleep(0.5)
        # self.driver.close()
        # self.driver.switch_to.window(self.menu_wd)    # 取消窗口切换

    def start(self):
        self._prepare_work()
        self._goWebIndex()
        self._goChapterMenu()
        chapter_size = len(self.comic_list)
        for i in range(chapter_size):
        # for i in range(2):
            self._goChapterDetail(i)
            print("{} 合计 {}话， 待下载 {}话".format(self.comic_name, chapter_size, chapter_size -i -1))
        self.driver.quit()


if __name__ == "__main__":
    web_host = "http://www.mangabz.com/"
    comic_names = ["沙雕转生开无双"]
    for comic_name in comic_names:
        spy = ComicSpider(comic_name, web_host)
        spy.headless = True
        spy.start()
    pass

