import requests
import os
import time
import threading
import math
from bs4 import BeautifulSoup


# 创建文件夹
def create_dir(name):
    if not os.path.exists(name):
        os.makedirs(name)


# 用于下载页面
def download_page(url):
    headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0"}
    r = requests.get(url, headers)
    r.encoding = 'utf-8'
    return r.text


# 获取首页的图片列表路径
def get_pic_link(url):
    html = download_page(url)  # 下载界面
    soup = BeautifulSoup(html, 'html.parser')
    pic_list = soup.find('div', id='main').find('div', 't z').find_all('tr', {'align': 'center'})  # 找到界面所有图片
    link_list = []
    for i in pic_list:
        pic_link = i.find('h3').find('a').get('href')
        sub_link = pic_link[pic_link.rfind('/', 1): -4]
        sub_url = 'http://a1.0e8c2c0f.rocks/pw/html_data/16/2004' + sub_link + 'html'
        # sub_url = 'https://a1.0e8c2c0f.rocks/pw/html_data/14/2004' + sub_link + 'html'
        link_list.append(sub_url)
    return link_list


# 获取每个页面的大图
def get_pic_list(html):
    soup = BeautifulSoup(html, 'html.parser')
    pic_list = soup.find('div', 'tpc_content').find('div', 'f14').find_all('img')
    title = soup.find_all('span', id='subject_tpc')[0].get_text()
    create_dir('picture/{}'.format(title))
    get_pic(pic_list, title)  # 循环调用get_pic函数获取图片


# 循环获取当前页面的图片,并保存
def get_pic(pic_list, text):
    # time.sleep(1)  # 休息一下，不要给网站太大压力，避免被封
    for i in pic_list:
        link = i.get('src')
        headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0"}
        r = requests.get(link, headers)  # 下载图片，之后保存到文件
        with open('picture/{}/{}'.format(text, link.split('/')[-1]), 'wb') as f:
            f.write(r.content)



# 线程方法
def execute(url):
    page_html = download_page(url)
    get_pic_list(page_html)


# 主方法
def main():
    create_dir('picture')
    link = 'http://a1.0e8c2c0f.rocks/pw/thread.php?fid=16'
    # link = 'https://a1.0e8c2c0f.rocks/pw/thread.php?fid=14'
    html_list = get_pic_link(link)
    queue = [i for i in html_list]
    threads = []
    while len(queue) > 0:
        for thread in threads:
            if not thread.is_alive():
                threads.remove(thread)
        while len(threads) < 6 and len(queue) > 0:   # 最大线程数设置为 5
            url = queue.pop(0)
            if url.find('.html') != -1:
                thread = threading.Thread(target=execute, args=(url,))
                thread.setDaemon(True)
                thread.start()
                print('{}正在下载{}页'.format(threading.current_thread().name, url))
                threads.append(thread)


if __name__ == '__main__':
    main()
