import requests
import os # 操作系统提供的接口函数
import re # 正则表达式对象
from bs4 import BeautifulSoup
from contextlib import closing
from tqdm import tqdm
import time
#from urllib.request import urlretrieve



# target = 'https://www.dmzj.com/info/yaoshenji.html'
# r = requests.get(url=target)
# bs = BeautifulSoup(r.text)
# list_con_li = bs.find('ul',class_='list_con_li')
# comic_list = list_con_li.find_all('a')
# chapter_names = []
# chapter_urls = []
# for comic in comic_list:
    # href = comic.get('href')
    # name = comic.text
    # chapter_names.insert(0,name)
    # chapter_urls.insert(0,href)
    
# print(chapter_names)
# print(chapter_urls)

# 下载本页面的图片试试
# url = 'https://www.dmzj.com/view/yaoshenji/41917.html'
# r = requests.get(url=url)
# html = BeautifulSoup(r.text,'lxml')
# script_info = html.script
# pics = re.findall('\d{13,14}',str(script_info))
# chapterpic_hou = re.findall('\|(\d{5})\|',str(script_info))[0]
# chapterpic_qian = re.findall('\|(\d{4})\|',str(script_info))[0]
# for pic in pics:
    # url = 'https://images.dmzj1.com/img/chapterpic/'+chapterpic_qian+'/'+chapterpic_hou+'/'+pic+'.jpg'
    # print(url)
    # urlretrieve(url, pic+'.jpg')

# 最简单的下载方法
# from urllib.request import urlretrieve
# test = 'https://images.dmzj1.com/img/chapterpic/3059/93328/1523074484507.jpg'
# urlretrieve(test, '1523074484507.jpg')

# os.path.join(path1[, path2[, ...]])	把目录和文件名合成一个路径
# os.listdir(path)
# 返回path指定的文件夹包含的文件或文件夹的名字的列表。

save_div = '妖神记'
if save_div not in os.listdir('./'): #os.listdir('') 列出指定目录下的文件和目录名称
    os.mkdir(save_div)
# 目标地址
target = 'https://www.dmzj.com/info/yaoshenji.html'

# 获取章节链接和名称
r = requests.get(url=target)
bs = BeautifulSoup(r.text,'lxml')
list_con_li = bs.find('ul',class_='list_con_li')
cartoon_list = list_con_li.find_all('a')
chapter_names = []
chapter_urls = []
for cartoon in cartoon_list[:10]:
    href = cartoon.get('href')
    name = cartoon.text
    chapter_names.insert(0,name)
    chapter_urls.insert(0,href)
# print(chapter_names,chapter_urls)

# 下载漫画
for i,url in enumerate(tqdm(chapter_urls)):
    # todo 不写这个貌似也能下载？
    download_header = {
        'Referer':url
    }
    name = chapter_names[i]
    # 去掉目录中所有的点
    while '.' in name:
        name = name.replace('.','')
    # 创建章节目录
    chapter_save_dir = os.path.join(save_div,name)
    if name not in os.listdir(save_div):
        os.mkdir(chapter_save_dir)
        r = requests.get(url=url)
        html = BeautifulSoup(r.text,'lxml')
        script_info = html.script
        # 匹配出长度为13或14的字符串为图片名称，前面还要拼上长度为4和5的两组数
        pics = re.findall('\d{13,14}',str(script_info))
        for j,pic in enumerate(pics):
            if len(pic) == 13:
                pics[j] = pic + '0'
        sorted(pics,key=lambda x:int(x))
        chapterpic_hou = re.findall('\|(\d{5})\|', str(script_info))[0]
        chapterpic_qian = re.findall('\|(\d{4})\|', str(script_info))[0]
        
        for idx,pic in enumerate(pics):
            # 排序完了恢复图片地址
            if pic[-1] == '0':
                url = 'https://images.dmzj1.com/img/chapterpic/' +chapterpic_qian+'/'+chapterpic_hou+'/'+pic[:-1]+'.jpg'
            else:
                url = 'https://images.dmzj1.com/img/chapterpic/' +chapterpic_qian+'/'+chapterpic_hou+'/'+pic+'.jpg'
            pic_name = '%03d.jpg' % (idx+1)
            pic_save_path = os.path.join(chapter_save_dir, pic_name)
            print(url)
            with closing(requests.get(url,stream = True)) as response:  
                chunk_size = 1024  
                content_size = int(response.headers['content-length'])  
                if response.status_code == 200:
                    with open(pic_save_path, "wb") as file:  
                        for data in response.iter_content(chunk_size=chunk_size):  
                            file.write(data)  
                else:
                    print('链接异常')
        time.sleep(10)
            


















