# _*_ coding: utf-8 _*_
# @Time: 2023/3/28 23:25
# @Author: ??
# @File: 91hanman

import requests
from urllib.parse import quote
from lxml import etree
import os
from re import sub

base_url = 'https://9y03.xyz'
kw = input('请输入要下载的查询词:')

# 通用请求
def get_page(url):
    headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
        'cookie': 'SL_G_WPT_TO=zh; SL_GWPT_Show_Hide_tmp=1; SL_wptGlobTipTmp=1',
        'referer': 'https://9y03.xyz/',
    }
    try:
        with requests.get(url, headers=headers, timeout=(20,50)) as response:
            if response.status_code == 200:
                return response.text
            else:
                return 'code error!'
    except ConnectionError as e:
        return

def parse(html):
    """解析网页获取漫画的完整下载链接和标题"""
    dom_tree = etree.HTML(html)
    r_url = dom_tree.xpath('//p[@class="comic__title"]/a/@href')[0]
    # name = dom_tree.xpath('//p[@class="comic__title"]/a/text()')[0]
    detail_url = base_url + r_url
    # print(detail_url)  # out: https://9y03.xyz/comic/mimijiaohua

    detail_page = get_page(detail_url)
    detail_page_tree = etree.HTML(detail_page)
    lis = detail_page_tree.xpath('//div[@class="chapter__list clearfix"]/ul/li')
    for li in lis:
        chapter_link = li.xpath('./a/@href')[0]
        chapter_name = li.xpath('./a/text()')[0].strip()
        # print(chapter_name, chapter_link)
        mango_url = base_url + chapter_link  # out: https://9y03.xyz/chapter/3683  是第一章的链接
        yield chapter_name, mango_url

def get_chapter(pics_url):
    """
    获取章节下的所有图片url
    :return  img_url, img_name --> generator
    """
    pic_page_html = get_page(pics_url)
    pic_page_tree = etree.HTML(pic_page_html)
    divs = pic_page_tree.xpath('/html/body/div[2]/div[5]')
    for div in divs:
        img_url = div.xpath('./div/img/@data-original')
        img_name = div.xpath('./div/img/@alt')
        # print(img_name,img_url)  # out: list
        yield list(zip(img_url, img_name))  # 不好处理，只能给他们打包在一起了

def download(title, img_data):
    """
    保存每一章的所有图片
    title: 章节名
    url_data: 章节中的每一张图片地址 --> 生成器对象
    """
    folder_path = f'{kw}\\'
    folder_path = os.path.join(folder_path, title)
    os.path.exists(folder_path) or os.makedirs(folder_path)

    title = sub(r'[\\/:\*\?"<>\|]', '', title)  # 去除文件名可能出现的非法字符
    print(f'正在下载 >>> {title} <<<')
    for data in img_data:
        for url, name in data:
            # 有点复杂了，但不想继续想更好的解决了 get_chapter 返回的数据是 [(url,name),(...)...]
            img_content = requests.get(url).content
            file_path = os.path.join(folder_path, name)
            with open(file_path, mode='wb')as f:
                f.write(img_content)

def main():
    url = f'https://9y03.xyz/index.php/search?key={quote(kw)}'
    html = get_page(url)
    # parse(html)
    g_data = parse(html)
    for title, url in g_data:
        g_pics_url_data = get_chapter(url)
        download(title, g_pics_url_data)

if __name__ == '__main__':
    main()