import scrapy
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from urllib import request
from urllib.parse import quote
from math import ceil
from lxml import etree
import os
import time

from caomei.items import ImgItem
from caomei.settings import IMAGES_STORE


class CmSpider(scrapy.Spider):
    name = "cm"
    allowed_domains = ["www.ikanhm.xyz", "r.ikanhm.xyz", "mxs12.cc"]  # 这里加上需要访问的域名
    # start_urls = ["https://www.ikanhm.xyz/book/479"]  # 寄宿日记
    # start_urls = ["https://www.ikanhm.xyz/book/748"]
    # start_urls = ["https://www.ikanhm.xyz/book/570"]  # 姐姐爱做菜
    # start_urls = ["https://www.ikanhm.xyz/book/488"]  # 朋友的妈妈
    # start_urls = ["https://www.ikanhm.xyz/book/415"]  # 漂亮干姐姐
    # start_urls = ["https://www.ikanhm.xyz/book/411"]  # 弱点
    # start_urls = ["https://www.ikanhm.xyz/book/127"]  # 享乐补习街
    # start_urls = ["https://www.ikanhm.xyz/book/388"]  # 健身教练
    # start_urls = ["htthttps://www.ikanhm.xyz/book/371"]  # 妹妹的义务
    # start_urls = ["https://www.ikanhm.xyz/book/505"]  # 梦幻速食店
    # start_urls = ["https://www.ikanhm.xyz/book/592"]  # 舞蹈系學姊們
    start_urls = ["https://www.ikanhm.xyz/book/7"]  # 朋友, 女朋友
    # start_urls = ["https://www.ikanhm.xyz/book/584"]  # 姊姊們的調教
    # start_urls = ["https://www.ikanhm.xyz/book/605"]  # 幸福小島

    # 这里是爬取单个章节的部分
    # start_urls = ["https://www.ikanhm.xyz/chapter/24617"]  # 舞蹈系學姊們 第1话

    common_link = 'https://mxs12.cc'  # 公共路径

    def __init__(self):
        # 谷歌浏览器驱动地址
        # https://googlechromelabs.github.io/chrome-for-testing/
        # http://chromedriver.storage.googleapis.com/index.html

        # 创建一个参数对象
        chrome_options = Options()

        # 用来控制chrome以无界面模式打开
        chrome_options.add_argument('--headless')
        chrome_options.add_argument('--disable-gpu')

        # 指定浏览器地址
        chrome_options.binary_location = r'D:\software\browser\Google\Chrome\Application\chrome.exe'

        # 解决 handshake failed; returned -1, SSL error code 1, net_error -101
        chrome_options.add_experimental_option("excludeSwitches", ["enable-logging"])
        chrome_options.add_argument('ignore-certificate-errors')

        # 静止图片和css加载
        prefs = {"profile.managed_default_content_settings.images": 2, 'permissions.default.stylesheet': 2}
        chrome_options.add_experimental_option('prefs', prefs)

        # 浏览器驱动地址
        browser_path = r'D:\browserDriver\chromedriver_107.0.5304.62.exe'

        service = Service(executable_path=browser_path)

        # 谷歌驱动路径，第二个参数是配置无界面模式
        # 在 selenium 的 ‘3.141.0’ 及以上版本中，你应该使用 ‘service’ 参数而不是 ‘executable_path’ 来指定 chromedriver 的位置。
        self.browser = webdriver.Chrome(service=service, options=chrome_options)
        # self.browser = webdriver.Firefox(executable_path=r'D:\plugins\scrapyPlugins\geckodriver.exe')
        self.browser.set_page_load_timeout(300)

        # # 设置当前浏览器在屏幕上的位置
        # self.browser.set_window_position(x=1200, y=800)
        #
        # # 设置当前浏览器窗口的大小
        # self.browser.set_window_size(width=500, height=200, windowHandle='current')

    def start_requests(self):
        # 在start_urls中顺序爬取，在此直接中止302重定向即可
        yield scrapy.Request(self.start_urls[0],
                             meta={
                                 'dont_redirect': True,
                                 'handle_httpstatus_list': [302]
                             },
                             callback=self.parse)

    # 爬取所有章节的方法，可能爬不全，配合单个章节方法使用
    def parse(self, response):
        # self.browser.implicitly_wait(10)

        # show_all_button = self.browser.find_element(By.ID, 'detail-list-more')
        # show_all_button.click()
        #
        # time.sleep(2)

        comic_name = response.xpath('//div[@class="info"]/h1/text()').get()
        print("comic name", comic_name)

        # 拼接漫画名到保存路径
        comic_dir = str(IMAGES_STORE) + '\\' + comic_name
        # comic_dir = os.path.join(IMAGES_STORE, comic_name)

        comic_list = response.xpath('//*[@id="detail-list-select"]/li')  # 这里不能加getall()，后面循环还要用xpath提取

        for comic_item in comic_list:
            chapter_name = comic_item.xpath('./a/text()').get()
            part_link = comic_item.xpath('./a/@href').get()
            chapter_link = self.common_link + part_link
            print("chapter name：", chapter_name, "，chapter link：", chapter_link)

            chapter_dir = comic_dir + '\\' + chapter_name
            # chapter_dir = os.path.join(comic_dir, chapter_title)

            # 创建章节目录
            # if not os.path.exists(chapter_dir):
            #     os.makedirs(chapter_dir)
            #     try:
            #         os.makedirs(chapter_dir, exist_ok=True)  # 如果目录已经存在，不会抛出异常
            #     except PermissionError:
            #         print("没有足够的权限来创建目录：", chapter_dir)
            #     except Exception as e:
            #         print("无法创建目录：", chapter_dir, "错误信息：", e)
            #     else:
            #         print("成功创建目录：", chapter_dir)

            # print("chapter path：", chapter_dir)

            # meta中的dont_redirect和handle_httpstatus_list，dont_filter都是阻止重定向的
            yield scrapy.Request(url=chapter_link, meta={
                'dont_redirect': True,
                'handle_httpstatus_list': [302],
                'chapter_dir': chapter_dir,
                'comic_name': comic_name,
                'chapter_name': chapter_name,
                'chapter_link': chapter_link
            }, callback=self.to_chapter, dont_filter=True)

    def to_chapter(self, response):
        chapter_dir = response.meta["chapter_dir"]

        page_height_js = 'return action = document.body.scrollHeight'  # 返回当前页面高度js
        page_height = self.browser.execute_script(page_height_js)
        print("initial page_height", page_height)

        num = ceil(page_height / 500)

        # 用来记录下拉高度
        i = 1
        while True:
            height = 500 + i * 500

            js = 'window.scrollTo(0,' + str(height) + ')'
            self.browser.execute_script(js)
            time.sleep(0.5)

            if i >= num:
                time.sleep(3)

                new_page_height = self.browser.execute_script(page_height_js)

                new_num = ceil(new_page_height / 500)

                # 图片加载完成之后可能页面更长，需要重新获取页面高度，
                # 如果前后一致表示已经到页面的底部了，这时退出循环
                if num == new_num:
                    print("to the end！")
                    print("page height：", height)

                    chapter_img_link_list = response.xpath('//*[@id="content"]/div[2]/div/div/img/@data-original')
                    break
                else:
                    num = new_num

            i = i + 1

        item = ImgItem()
        item['comic_name'] = response.meta["comic_name"]
        item['comic_link'] = self.start_urls[0]
        item['chapter_name'] = response.meta["chapter_name"]
        item['chapter_link'] = response.meta["chapter_link"]
        item['chapter_dir'] = chapter_dir
        for chapter_img_link in chapter_img_link_list:
            img_link = chapter_img_link.get()
            print("original chapter img：", img_link)
            # img_link = img_link.replace("r.ikanhm.xyz", "69.197.146.58")
            # print("chapter img：", img_link)
            item['img_link'] = img_link
            yield item

    def close(self, reason):
        print("spider closed")

        self.browser.quit()

    # 爬取单个章节的parse方法
    def parse_single(self, response):
        # 漫画名称
        comic_name = response.xpath("//div[@class='header']//a[@class='comic-name']/text()").get()
        print("comic name", comic_name)

        # 章节部分链接
        comic_part_link = response.xpath("//div[@class='header']//div[@class='crumbs']/a[@class='comic-name']/@href").get()
        comic_link = self.common_link + comic_part_link
        print("comic link", comic_link)

        # 章节名称
        chapter_name = response.xpath("//div[@class='header']/div[@class='row']/h1/text()").get()

        # 移除章节名称中的漫画名称
        if len(chapter_name) > 0 and chapter_name.find(comic_name) != -1:
            chapter_name = chapter_name.replace(comic_name, "").strip()
            print("chapter name", chapter_name)
        else:
            print("chapter name is empty!")

        page_height_js = 'return action = document.body.scrollHeight'  # 返回当前页面高度js
        page_height = self.browser.execute_script(page_height_js)
        print("initial page height", page_height)

        num = ceil(page_height / 500)

        # 用来记录下拉高度
        i = 1
        while True:
            height = 500 + i * 500

            js = 'window.scrollTo(0,' + str(height) + ')'
            self.browser.execute_script(js)
            time.sleep(0.5)

            if i >= num:
                time.sleep(3)

                new_page_height = self.browser.execute_script(page_height_js)

                new_num = ceil(new_page_height / 500)

                # 图片加载完成之后可能页面更长，需要重新获取页面高度，
                # 如果前后一致表示已经到页面的底部了，这时退出循环
                if num == new_num:
                    print("to the end！")
                    print("page height：", height)

                    chapter_img_link_list = response.xpath('//*[@id="content"]/div[2]/div/div/img/@data-original')
                    break
                else:
                    num = new_num

            i = i + 1

        item = ImgItem()
        item['comic_name'] = comic_name
        item['comic_link'] = comic_link
        item['chapter_name'] = chapter_name
        item['chapter_link'] = self.start_urls[0]
        # item['chapter_dir'] = chapter_dir
        for chapter_img_link in chapter_img_link_list:
            img_link = chapter_img_link.get()
            print("original chapter img：", img_link)
            item['img_link'] = img_link
            yield item