import os
from time import time

import requests
from bs4 import BeautifulSoup
from queue import Queue
from threading import Thread

headers = {
'accept': 'image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
'cache-control': 'no-cache',
'pragma': 'no-cache',
'referer': 'https://fabiaoqing.com/',
'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'image',
'sec-fetch-mode': 'no-cors',
'sec-fetch-site': 'cross-site',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.',
'Cookie': 'H_PS_PSSID=36547_38106_38271_38130_37910_37862_38173_38289_38226_38035_37923_38314_26350_22157_37881; ZFY=:AiKUSBMLrq:Bgu3cNSpZRJybPKK7YZItd4NvBQZelvDY:C; BAIDUID_BFESS=461CD7DA37D87D2CBFB259DCB774C46D:FG=1'
    }
class DownloadBiaoqingbao(Thread):

    def __init__(self, queue):
        Thread.__init__(self)
        self.queue = queue
        self.path = 'biaoqingbao'
        if not os.path.exists(self.path):
            os.makedirs(self.path)

    def run(self):
        while True:
            url = self.queue.get()
            try:
                # print(url)
                download_biaoqingbaos(url, self.path)
            finally:
                self.queue.task_done()


def download_biaoqingbaos(url, path):

    response = requests.get(url=url, headers=headers)
    soup = BeautifulSoup(response.content, 'lxml')
    img_list = soup.find_all('img', class_='ui image lazy')

    for img in img_list:
        image = img.get('data-original')
        title = img.get('title')
        print('下载图片： ', title)

        try:
            response = requests.get(image)
            if response.status_code == 200:
                with open(os.path.join(path, title + os.path.splitext(image)[-1]), 'wb') as f:
                    img = response.content
                    f.write(img)
            else:
                print('下载失败：{}  {}'.format(response.url, response.status_code))            
            
                
        except OSError:
            print('length  failed')
            break


if __name__ == '__main__':

    start = time()

    # 构建所有的链接
    _url = 'https://fabiaoqing.com/biaoqing/lists/page/{page}.html'
    urls = [_url.format(page=page) for page in range(1, 4328+1)]

    queue = Queue()

    # 创建线程
    for x in range(10):
        worker = DownloadBiaoqingbao(queue)
        worker.daemon = True
        worker.start()

    # 加入队列
    for url in urls:
        queue.put(url)

    queue.join()

    print('下载完毕耗时：  ', time()-start)