#https://www.mkzhan.com/211692/
#/211692/644916.html
import requests#爬虫第三方库
import OS模块#系统模块->操作文件
from pyquery import PyQuery as pq#数据提取第三方库
from fake_useragent import UserAgent#浏览器代理第三方库
#创建浏览器代理对象
ua=UserAgent()
#创建一个浏览器的请求头
headers = {
    #请求数据格式
    'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    #在浏览器当中的用户信息->用户信息
    'cookie':'UM_distinctid=1716941d927652-0fb3ea40be025c-335e4e71-144000-1716941d929197; __login_his_sync=0; tourist_expires=1; Hm_lvt_407473d433e871de861cf818aa1405a1=1586608862; _GUID=d8eec002-35da-1ad7-ac12-9ca1c3cb4309; CNZZDATA1261814609=674853875-1586608471-https%253A%252F%252Fwww.mkzhan.com%252F%7C1586608471; readMode=scroll; CNZZDATA1262045698=1069876723-1586605088-https%253A%252F%252Fwww.baidu.com%252F%7C1586607119; redirect_url=%2F211692%2F; Hm_lpvt_407473d433e871de861cf818aa1405a1=1586610847; cn_1262045698_dplus=%7B%22distinct_id%22%3A%20%221716941d927652-0fb3ea40be025c-335e4e71-144000-1716941d929197%22%2C%22%24_sessionid%22%3A%200%2C%22%24_sessionTime%22%3A%201586610857%2C%22%24dp%22%3A%200%2C%22%24_sessionPVTime%22%3A%201586610857%7D',
    #随机生成浏览器代理
    'user-agent':ua.random,
    #防盗链接（请求来源）
    'referer': 'https://www.mkzhan.com/search/?keyword=%E6%96%97%E7%BD%97%E5%A4%A7%E9%99%86'
}

def Index():# 斗罗大陆的首页页面，提取详细(＾－＾)V的漫画地址
    url='https://www.mkzhan.com/211692/'
    response=requests.get(url,headers=headers,timeout=5).text
    # print(response)
    #初始化
    doc=pq(response)
    #class 类选择器->通过类选择器获取下级数据
    chapter=doc('.chapter__list-box.clearfix.hide li a').items()
    list_url=[]
    for i in chapter:
      #通过属性取到对应的后缀
        suffix=i.attr('data-hreflink')
        details_url='https://www.mkzhan.com'+str(suffix)
        print(details_url)
        #添加到列表里面
        list_url.append(details_url)
        #列表反转
    manhua=list_url[::-1]
    Images_data(manhua)
count=1
def Images_data(manhua):#获取详情页的数据，得到详情页的漫画地址，
    #声明全局变量 避免数据重复
    global count
    for i in manhua:
        response=requests.get(i,headers=headers,timeout=5).text
        doc=pq(response)
        name_path='D:/漫画/第{}话/'.format(count)
        #查看文件路径
        folder=OS模块.path.exists(name_path)
        #判断文件是否存在 如果不存在就重新创建文件夹
        if not folder:
            OS模块.mkdir(name_path)
        else:
            print('文件夹已存在')
        #类选择器 提取图片
        img_url=doc('.rd-article__pic.hide img').items()
        num=1
        for src in img_url:
            print('正在下载第{}话第{}张...请稍等'.format(count,num))
            #得到图片地址
            tupian=src.attr('data-src')
            #对图片地址发送请求
            img_response=requests.get(tupian,headers=headers,timeout=5)
                                    #w 文件存在就写入 不存在就创建 b 进行进制读写
            with open(name_path+'{}.jpg'.format(num),'wb')as f:
                f.write(img_response.content)
                num+=1
        count+=1

if __name__ == '__main__':
    Index()