import os
import threading
import time
from urllib import request

from bs4 import BeautifulSoup


# 创建文件夹
def create_dir(name):
    if not os.path.exists(name):
        os.makedirs(name)


# 用于下载页面
def download_page(url):
    # 这是代理IP
    proxy = [{'http': '211.94.69.74:8080'}, {'http': '113.128.90.252:48888'}
        , {'http': '113.128.91.92:48888'}]
    # 创建ProxyHandler
    proxy_support = request.ProxyHandler(proxy[2
                                         ])
    # 创建Opener
    opener = request.build_opener(proxy_support)
    # 添加User Angent
    opener.addheaders = [('User-Agent',
                          'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36')]
    # 安装OPener
    # request.install_opener(opener)
    # 使用自己安装好的Opener
    # response = request.urlopen(url)
    # 如果不想安装也可以直接使用opener来执行
    response = opener.open(url)
    # 读取相应信息并解码
    html = response.read().decode("utf-8")
    # 打印信息
    return html


def download_img(url):
    # 这是代理IP
    proxy = [{'http': '211.94.69.74:8080'}, {'http': '113.128.90.252:48888'}
        , {'http': '113.128.91.92:48888'}]
    # 创建ProxyHandler
    proxy_support = request.ProxyHandler(proxy[2])
    # 创建Opener
    opener = request.build_opener(proxy_support)
    # 添加User Angent
    opener.addheaders = [('User-Agent',
                          'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36')]
    # 安装OPener
    request.install_opener(opener)
    # 使用自己安装好的Opener
    # response = request.urlopen(url)
    # 如果不想安装也可以直接使用opener来执行

    title = url.split('/')[-1].split('?')[0] + '.jpg'
    request.urlretrieve(url, filename='bing/{}'.format(title))
    time.sleep(1)  # 休息一下，不要给网站太大压力，避免被封


# 获取页面路径
def get_pic_link(url):
    sub_list = []
    for i in range(20):
        if i == 0:
            src = url
        elif i > 1:
            src = url + '/?p=' + str(i)
        else:
            continue
        sub_list.append(src)
    return sub_list


# 获取每个页面的大图
def get_pic_list(html):
    soup = BeautifulSoup(html, 'html.parser')
    con_div = soup.find_all('div', 'container')
    for item_div in con_div:
        pic_item_list = item_div.find_all('div', 'item')
        if len(pic_item_list) > 0:
            get_pic(pic_item_list)  # 循环调用get_pic函数下载图片


# 循环获取当前页面的图片,并保存
def get_pic(pic_list):
    for i in pic_list:
        title = i.find('div', 'description').find('h3').text
        href = i.find('div', 'progressive').find('a').get('href')
        href = href[0: href.rfind('=') + 1] + 'download'
        link = 'https://bing.ioliu.cn' + href
        download_img(link)


# 线程方法
def execute(url):
    page_html = download_page(url)
    get_pic_list(page_html)


# 主方法
def main():
    create_dir('bing')
    link = 'https://bing.ioliu.cn'
    html_list = get_pic_link(link)
    queue = [i for i in html_list]  # 所有含有大图的页面
    threads = []
    while len(queue) > 0:
        for thread in threads:
            if not thread.is_alive():
                threads.remove(thread)
        while len(threads) < 5 and len(queue) > 0:  # 最大线程数设置为 5
            url = queue.pop(0)
            thread = threading.Thread(target=execute, args=(url,))
            thread.setDaemon(True)
            thread.start()
            print('{}正在下载{}页'.format(threading.current_thread().name, url))
            threads.append(thread)


if __name__ == '__main__':
    main()
