import requests
from bs4 import BeautifulSoup  # 解析html
import threading  # 多线程
import re  # 正则匹配
import time  # 时间

all_urls = []  # 我们拼接好的每一页链接
all_img_urls = []  # 所有图片链接
g_lock = threading.Lock()  # 初始化一个锁


class Spider():
    # 构造函数，初始化数据使用
    def __init__(self, target_url, headers):
        self.target_url = target_url
        self.headers = headers

    # 获取所有的想要抓取的URL
    def getUrls(self):
        # 获取末页
        response = requests.get(target_url % 1, headers=headers).text
        html = BeautifulSoup(response, 'html.parser')
        res = html.find(class_='wrap no_a').attrs['href']  # 找到末页的标签提取末页的链接
        page_num = int(re.findall('(\d+)', res)[0])  # 正则匹配 页码数
        global all_urls
        # 循环得到拼接URL
        for i in range(1, page_num + 1):
            url = self.target_url % i
            all_urls.append(url)


# 负责提取图片链接
class Producer(threading.Thread):

    def run(self):
        headers = {
            'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0'
        }
        global all_urls
        while len(all_urls) > 0:
            g_lock.acquire()  # 在访问all_urls的时候，需要使用锁机制
            page_url = all_urls.pop(0)  # 通过pop方法移除第一个元素，并且返回该值
            g_lock.release()  # 使用完成之后及时把锁给释放，方便其他线程使用
            try:
                print("分析" + page_url)
                response = requests.get(page_url, headers=headers, timeout=3).text
                html = BeautifulSoup(response, 'html.parser')
                pic_link = html.find_all(class_='egeli_pic_li')[:-1]
                global all_img_urls
                g_lock.acquire()  # 这里还有一个锁
                for i in pic_link:
                    link = i.find('img')['src'].replace('edpic_360_360', 'edpic_source')
                    all_img_urls.append(link)
                g_lock.release()  # 释放锁
                # time.sleep(0.1)
            except:
                pass


class DownPic(threading.Thread):

    def run(self):
        headers = {
            'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0'
        }
        while True:
            global all_img_urls
            # 上锁
            g_lock.acquire()
            if len(all_img_urls) == 0:  # 如果没有图片了，就解锁
                # 不管什么情况，都要释放锁
                g_lock.release()
                break
            else:
                t = time.time()
                down_time = str(round(t * 1000))  # 毫秒级时间戳
                pic_name = 'D:\\test\\' + down_time + '.jpg'
                pic = all_img_urls.pop(0)
                g_lock.release()
                response = requests.get(pic, headers=headers)
                with open(pic_name, 'wb') as f:
                    f.write(response.content)
                    f.close()
                print(pic_name + '   已下载完成！！！')


if __name__ == "__main__":

    headers = {
        'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0'
    }

    target_url = 'https://tu.enterdesk.com/chongwu/%d.html'  # 图片集和列表规则

    print('开始获取所有图片页链接！！！')
    spider = Spider(target_url, headers)
    spider.getUrls()
    print('完成获取所有图片页，开始分析图片链接！！！')

    threads = []
    for x in range(10):
        gain_link = Producer()
        gain_link.start()
        threads.append(gain_link)

    # join 线程同步 主线程任务结束之后 进入阻塞状态 等待其他的子线程执行结束之后 主线程在终止
    for tt in threads:
        tt.join()

    print('分析图片链接完成，开始多线程下载！！！')
    for x in range(20):
        download = DownPic()
        download.start()
