"""
爬虫思路：
（一）数据来源分析
    1、需求分析
        获取每一话的 “标题” 与 "链接"
        每一话的图片 img  src=""  ==> img_url
    2、接口分析
        用selenium  ==> 所见即所得
        img  src=""  ==> img_url

（二）爬虫代码实现
    1、发送请求
    2、获取数据
    3、解析数据
    4、保存数据

selenium库的使用：
    0、配置浏览器驱动（Python根目录）
    1、实例化webdriver对象
    2、配置options参数
    3、发送请求
    4、元素定位
    5、内容获取
        - 获取文本内容
        - 获取元素属性
    6、定义行为链
        - 实例化ActionChain对象
        - 定义行为
        - 执行行为链
"""
# 第一步：导入webdriver模块
import os
import time
import math
import httpx
from selenium import webdriver
from selenium.webdriver.common.by import By
# 显示等待库
from selenium.webdriver.support.ui import WebDriverWait             # 等待对象
from selenium.webdriver.support import expected_conditions as EC    # 预期的条件


class ChapterCrawler:

    def __init__(self, url):
        # 初始化
        self.chrome = webdriver.Chrome(executable_path="chromedriver.exe")
        self.url = url
        self.li_a = []
        # selenium如何设置cookie
        # 1、要满足如下格式
        self.cookies = [
            {
                "name": "__login_his_sync",
                "domain": "www.mkzhan.com",
                'value': '0',
            },
            {
                "name": "tourist_expires",
                "domain": 'www.mkzhan.com',
                'value': '1',
            },
            {
                "name": "LOGINSIGN",
                "domain": "www.mkzhan.com",
                'value': '64174088%3A8fd0f8ca8873b4e3463fb3c9061f6492',
            },
            {
                "name": "uInfo",
                "domain": "www.mkzhan.com",
                'value': '%7B%22uid%22%3A%2264174088%22%2C%22sign%22%3A%228fd0f8ca8873b4e3463fb3c9061f6492%22%2C%22mobile%22%3A%220%22%2C%22email%22%3A%22%22%2C%22username%22%3A%22mk_647dd308332c4%22%2C%22nickname%22%3A%22%E6%BC%AB%E5%8F%8B301284%22%2C%22sex%22%3A%220%22%2C%22citycode%22%3A%220%22%2C%22birthday%22%3A%220%22%2C%22username_x%22%3A%221%22%2C%22password_x%22%3A%221%22%2C%22avatar%22%3A%22http%3A%2F%2Foss.mkzcdn.com%2Fdefault%2Fmember%2Favatar%2F10.png%22%2C%22avatar_pendant_id%22%3A%220%22%2C%22avatar_pendant%22%3A%22%22%2C%22role%22%3A%220%22%2C%22is_author%22%3A%220%22%2C%22identify%22%3A%220%22%2C%22is_first_vip%22%3A%221%22%2C%22is_first_sign_pay%22%3A%221%22%2C%22is_first_sign_pay_apple%22%3A%221%22%2C%22register_time%22%3A%221685967624%22%2C%22signd_type%22%3A%220%22%2C%22signd_type_apple%22%3A%220%22%2C%22theme_id%22%3A%220%22%2C%22status%22%3A%221%22%2C%22is_young%22%3A%220%22%2C%22comic_read_time_length%22%3A%220%22%2C%22cityname%22%3A%22%E5%8C%97%E4%BA%AC%E5%B8%82%20%E4%B8%9C%E5%9F%8E%E5%8C%BA%22%2C%22gold%22%3A%220%22%2C%22integral%22%3A%220%22%2C%22integral_expire%22%3A%220%22%2C%22card_count%22%3A%220%22%2C%22coupon_count%22%3A%220%22%2C%22ticket%22%3A%220%22%2C%22vip_end_time%22%3A%221687263624%22%2C%22read_card_end_time%22%3A%220%22%2C%22is_vip%22%3A%221%22%2C%22vip_type%22%3A%222%22%2C%22vip_baron_time%22%3A%221687263624%22%2C%22vip_viscount_time%22%3A%221687263624%22%2C%22diamond%22%3A%220%22%2C%22honour_time_knight%22%3A%220%22%2C%22honour_time_duke%22%3A%220%22%2C%22honour_time_king%22%3A%220%22%2C%22lucky%22%3A%220%22%7D',
            },
            {
                "name": "redirect_url",
                "domain": "www.mkzhan.com",
                'value': '%2Fuser%2F',
            },
        ]
        self.url_list = []
        self.title = ""
        self.chapter_title = ""

    def login(self):
        self.chrome.get(self.url)
        # print(self.chrome.get_cookies())

        # 2、把所有之前默认设置的cookie删除
        self.chrome.delete_all_cookies()

        # 3、add_cookie必须在请求完网站才能设置
        for cookie in self.cookies:
            self.chrome.add_cookie(cookie)

        # 定位元素关闭登录SVIP
        try:
            wait = WebDriverWait(self.chrome, 10)
            element = wait.until(
                EC.presence_of_element_located((By.XPATH, '//span[@class="layui-layer-setwin"]/a'))
            )
            element.click()
        except Exception as e:
            print(e)
            pass

    def get_list(self):
        self.chrome.find_element(By.XPATH, '//span[@class="down"]').click()

        # 定位到ul > li > a标签
        # 定位属性多值，解释：一个class属性有多个值，比如class="chapter__list-box clearfix hide"
        # 1、Xpath -- contains()
        # li_a = self.chrome.find_elements(By.XPATH, '//li[contains(@class, "j-chapter-item")]/a')

        # 2、CSS_SELECTOR
        self.li_a = self.chrome.find_elements(By.CSS_SELECTOR, '.j-chapter-item.chapter__item a')  # .是class  #是id
        self.title = self.chrome.find_element(By.CSS_SELECTOR, '.comic-title.j-comic-title').text

    def parse_list(self):

        for element in self.li_a:
            # 获取文本内容
            # content = element.text
            # print(content)
            # 获取元素属性值
            link = element.get_attribute('data-hreflink')
            self.url_list.append('https://www.mkzhan.com' + link)

    def get_comic(self):
        for url in self.url_list:

            # 发送请求
            self.chrome.get(url)
            # 显示等待
            WebDriverWait(self.chrome, 10).until(
                EC.presence_of_element_located((By.XPATH, '//div[@id="pages-tpl"]//img'))
            )
            # 获取页面总高度
            total_height = self.chrome.execute_script("return document.body.scrollHeight")

            # 计算25%的高度
            view_height = self.chrome.execute_script("return window.screen.availHeight;")
            scroll_height = math.ceil(view_height * 0.2)

            # 滚动页面,直到结束
            while True:
                # 滚动25%
                self.chrome.execute_script("window.scrollTo(0, {screen_height});".format(screen_height=scroll_height))

                # 重新获取页面总高度
                total_height = self.chrome.execute_script("return document.body.scrollHeight")

                # 更新scroll_height
                scroll_height += view_height * 0.2

                # 检查是否滚动到底部
                if scroll_height > total_height:
                    self.chrome.execute_script("window.scrollTo(0, document.body.scrollHeight);")
                    break
                time.sleep(0.2)

            # 定位元素
            img_list = self.chrome.find_elements(By.XPATH, '//div[@id="pages-tpl"]//img')
            self.chapter_title = self.chrome.find_element(By.XPATH, '//a[@class="last-crumb"]').text
            print(f"正在获取【{self.chapter_title}】...")
            self.make_dir()  # 建立存储图片的文件夹

            for i, img in enumerate(img_list):
                src = img.get_attribute("src")
                #  实例化一个client对象
                with httpx.Client() as client:
                    # 发送请求
                    response = client.get(url=src)
                    # response.text     # 取文本     string
                    # response.json()   # 取json    dict
                    # response.content  # 取二进制   binary
                    with open(f'comic/{self.title}/{self.chapter_title}/{i+1}.jpg', 'wb') as f:
                        f.write(response.content)

            # return None  # 测试用的，只让他循环一次

    def make_dir(self):
        # 创建comic文件夹
        if not os.path.exists('comic'):
            os.mkdir('comic')
        # 创建某一个漫画的文件夹  妖神记
        if not os.path.exists(f'comic/{self.title}'):
            os.mkdir(f'comic/{self.title}')
        # 创建每一话的文件夹    第一话：重生
        if not os.path.exists(f'comic/{self.title}/{self.chapter_title}'):
            os.mkdir(f'comic/{self.title}/{self.chapter_title}')

    def run(self):
        # 执行功能
        self.login()     # 登录
        self.get_list()  # 获取列表
        self.parse_list()  # 解析列表
        self.get_comic()  # 获取每一话的图片

        # 退出
        input('>>>')
        self.chrome.quit()


if __name__ == '__main__':
    print('例如：https://www.mkzhan.com/207622/')
    url = input("请输入你要获取的漫画详情页URL\n>>>:")
    cc = ChapterCrawler(url)
    cc.run()
