'''
import requests
import parsel
import os

url = 'https://www.mkzhan.com/207622/'

hesders = {
    'cookie': 'UM_distinctid=182aa4bf14d7ab-00ada9aeafeaef-45410429-100200-182aa4bf14e678; CNZZDATA1262045698=1404179754-1660711535-%7C1660711535; __login_his_sync=0; tourist_expires=1; CNZZDATA1261814609=1122735279-1660714011-https%253A%252F%252Fwww.mkzhan.com%252F%7C1660714011; redirect_url=%2F207622%2F476064.html; readMode=scroll; cn_1262045698_dplus=%7B%22distinct_id%22%3A%20%22182aa4bf14d7ab-00ada9aeafeaef-45410429-100200-182aa4bf14e678%22%2C%22%24_sessionid%22%3A%200%2C%22%24_sessionTime%22%3A%201660714591%2C%22%24dp%22%3A%200%2C%22%24_sessionPVTime%22%3A%201660714591%7D',
    'referer': 'https://www.mkzhan.com/207622/',
    'sec-fetch-dest': 'document',
    'sec-fetch-mode': 'navigate',
    'sec-fetch-site': 'same-origin',
    'sec-fetch-user': '?1',
    'upgrade-insecure-requests': '1',
    'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36',
}
response = requests.get(url=url, headers=hesders)
#print(response.text)

selector = parsel.Selector(response.text)
#第一次解析，提前li标签
#attr()属性选择器，选择标签里面的属性
#get（） 获取第一个标签数据内容＞＞＞字符串
#getall（）获取所有标签数据内容＞＞＞列表
lis = selector.css('.chapter__list-box li')
for li in list(reversed(lis[2:])):
    chapter_id = li.css('a::attr(data-chapterid)').get()
    title = li.css('a::text').getall()[-1].strip()#strip()取出左右空格

    index_url = 'https://comic.mkzcdn.com/chapter/content/v1/'
    data = {
        'chapter_id': chapter_id,
        'comic_id': '207622',
        'format': '1',
        'quality': '1',
        'sign': '706674ae2ff6b6afa060e3325afdf198',
        'type': '1',
        'uid': '57216438',
    }
    json_data = requests.get(url=index_url, params=data, headers=hesders).json()
    img_url_list = json_data['data']['page']
    filename = f'{title}\\'
    if not os.path.exists(filename):
        os.mkdir(filename)
    page = 1
    for img in img_url_list:
        img_url = img['image']
        img_content = requests.get(url=img_url, headers=hesders).content
        print(img_url)
        with open(filename + str(page) + '.jpg', mode='wb') as f:
            f.write(img_content)
        page += 1
    break

'''



# 爬取动漫之家----Yaoshenji

# 导入库
import re, os, time, requests
from bs4 import BeautifulSoup

# 保存目录
save_dir = '妖神记'
if save_dir not in os.listdir('./'):
    os.mkdir(save_dir)

# 1 获取所有章节名称和章节链接

# 目标网址
url ='https://www.dmzj.com/info/yaoshenji.html'

# 发送请求，获取响应
response =requests.get(url)


# 提取数据，解析网页
soup = BeautifulSoup(response.text, 'lxml')

# 图片位于 ul 标签 下的 li 标签，具体链接是 a 标签
list_con_li = soup.find('ul', class_="list_con_li autoHeight")
cartoon_list = list_con_li.find_all('a')

# 章节名称、链接
charpter_names = []
charpter_urls  = []

for cartoon in cartoon_list:
    href = cartoon.get('href')
    name = cartoon.text
    charpter_names.insert(0,name)    # 没有使用 append 是因为章节是倒序排列，所以用 insert
    charpter_urls.insert(0,href)


# 2 根据每个章节链接获取所有图片链接
for i, url in enumerate(charpter_urls):
    """
    返回章节目录列表的索引位置
    """
    name = charpter_names[i]

    # 为每一章节创建目录
    # 避免创建文件夹报错 把 . 去掉
    while '.' in name:
        name = name.replace('.', '')

    # 创建章节目录
    charpter_save_dir = os.path.join(save_dir, name)
    if name not in os.listdir(save_dir):
        os.mkdir(charpter_save_dir)

        response = requests.get(url)
        html = BeautifulSoup(response.text, 'lxml')

        # 获取script 标签里的内容
        script_info = html.script

        # 找长度为 13 或 14 的数字，取出来并 转为 str 字符串
        pics = re.findall('\|(\d{13,14})\|', str(script_info))
        # print(pics)

        # 遍历取出来的数字
        for j, pic in enumerate(pics):

            # 有的是 13 有的是 14 ，对 13 位的数字末位补 0
            if len(pics) == 13:
                pics[j] = pic + '0'

        # 图片排序
        pics = sorted(pics, key=lambda x:int(x))

        charpter_hou = re.findall('\|(\d{5})\|', str(script_info))[0]
        charpter_qian = re.findall('\|(\d{4})\|', str(script_info))[0]

        headers = {
            'Referer': url,  # 初级反爬
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'
        }
        # 拼接链接网址

        for idx, pic in enumerate(pics):
            # 如果最后一位数字是 0
            if pic[:-1] == '0':
                # 【：-1】 表示切片时不要最后一位，取到最后一位前面
                url = 'https://images.dmzj.com/img/chapterpic/' + charpter_qian + '/' + charpter_hou + '/' + pic[:-1] + '.jpg'
            else:
                url = 'https://images.dmzj.com/img/chapterpic/' + charpter_qian + '/' + charpter_hou + '/' + pic + '.jpg'
            print(url)

            # 保存图片名和图片保存图片路径
            pic_name =  '%03d.jpg' %(idx +1)  # 001-002-003.....199  避免图片数量超过设置，扩大一些
            pic_save_path = os.path.join(charpter_save_dir, pic_name)

            # 发送请求，下载图片
            resp = requests.get(url, headers = headers)
            # 如果状态码 正常 200，保存图片文件
            if resp.status_code == 200:
                with open(pic_save_path, 'wb') as f:
                    f.write(resp.content)
            else:
                print('链接异常')






