import requests
import os
import time
import threading
import math
from bs4 import BeautifulSoup


# 创建文件夹
def create_dir(name):
    if not os.path.exists(name):
        os.makedirs(name)


# 用于下载页面
def download_page(url):
    headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0"}
    r = requests.get(url, headers)
    r.encoding = 'utf-8'
    return r.text


# 获取首页的图片列表路径
def get_pic_link_map(url):
    html = download_page(url)  # 下载界面
    soup = BeautifulSoup(html, 'html.parser')
    pic_list = soup.find('div', "boxs").find_all('li')  # 找到界面所有图片
    title_pic_map = {}
    for i in pic_list:
        pic_link = i.find('a').get('href')
        pic_num_str = i.find('p').contents[0]
        pic_num = pic_num_str[4: -2]
        pic_title = i.find('p', 'p_title').find('a').contents[0]
        sub_link = pic_link[0: pic_link.rfind('/', 1) + 1]
        src = i.find('img').get('src')  # 拿到图片的具体 url
        page_num = src[src.rfind('/', 1) - 5: src.rfind('/', 1)]
        sub_list = []

        # 这里拼接每个图集的分页的访问路径
        for j in range(math.ceil(int(pic_num) / 4)):
            if j == 0:
                src = sub_link + page_num + '.html'
            elif j > 1:
                src = sub_link + page_num + '_%d' % j + '.html'
            else:
                continue
            sub_list.append(src)
        title_pic_map[pic_title] = sub_list
    return title_pic_map


# 获取每个页面的大图
def get_pic_list(title, html):
    soup = BeautifulSoup(html, 'html.parser')
    pic_list = soup.find('div', 'content').find('center').find_all('img')

    # 获取图集名称
    # pic_list = soup.find_all('div', 'content')[0].contents[1].contents
    # text = pic_list[0].get('alt')
    # dir_text = text[0:text.rfind('第', 1)]
    create_dir('picture/{}'.format(title))
    get_pic(pic_list, title)  # 循环调用get_pic函数获取图片


# 循环获取当前页面的图片,并保存
def get_pic(pic_list, text):
    for i in pic_list:
        link = i.get('src')
        headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0"}
        r = requests.get(link, headers)  # 下载图片，之后保存到文件
        with open('picture/{}/{}'.format(text, link.split('/')[-1]), 'wb') as f:
            f.write(r.content)
            time.sleep(1)  # 休息一下，不要给网站太大压力，避免被封


# 线程方法
def execute(title, url):
    page_html = download_page(url)
    get_pic_list(title, page_html)


# 主方法
def main():
    create_dir('picture')
    link = 'https://www.meitulu.com/t/meizi/'
    html_map = get_pic_link_map(link)
    for key in html_map.keys():
        print(str(key) + ': ' + str(html_map[key]))
        html_list = html_map[key]
        queue = [i for i in html_list]  # 所有含有大图的页面
        threads = []
        while len(queue) > 0:
            for thread in threads:
                if not thread.is_alive():
                    threads.remove(thread)
            while len(threads) < 5 and len(queue) > 0:  # 最大线程数设置为 5
                url = queue.pop(0)
                thread = threading.Thread(target=execute, args=(str(key).rstrip(), url,))
                thread.setDaemon(True)
                thread.start()
                print('{}正在下载{}页'.format(threading.current_thread().name, url))
                threads.append(thread)



if __name__ == '__main__':
    main()
