#https://www.cnblogs.com/nikecode/p/11130801.html
import requests

import threading   #多线程模块
from lxml import etree #xpath方式爬取
import time #时间模块

import os

all_img_urls = []       #图片列表页面的数组
g_lock = threading.Lock()  #初始化一个锁

pic_links = []            #图片地址列表

all_urls = []  # 我们拼接好的图片集和列表路径

class Spider():
    # 构造函数，初始化数据使用
    def __init__(self, target_url, headers):
        self.target_url = target_url
        self.headers = headers

    # 获取所有的想要抓取的URL
    def getUrls(self, start_page, page_num):
        global all_urls
        # 循环得到URL
        for i in range(start_page, page_num + 1):
            url = self.target_url % i
            all_urls.append(url)


# 生产者，负责从每个页面提取图片列表链接
class Producer(threading.Thread):

    def run(self):
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',
            'HOST': 'www.umei.cc'
        }
        global all_urls
        while len(all_urls) > 0:
            g_lock.acquire()  # 在访问all_urls的时候，需要使用锁机制
            page_url = all_urls.pop()  # 通过pop方法移除最后一个元素，并且返回该值
            g_lock.release()  # 使用完成之后及时把锁给释放，方便其他线程使用
            try:
                print("分析" + page_url)
                response = requests.get(page_url, headers=headers, timeout=2)
                html_data= etree.HTML(response.text)
                all_pic_link = html_data.xpath("//a[@class='TypeBigPics']/@href")
                print(all_pic_link)
                global all_img_urls
                g_lock.acquire()  # 这里还有一个锁
                all_img_urls += all_pic_link  # 这个地方注意数组的拼接，没有用append直接用的+=也算是python的一个新语法吧
                #print(all_img_urls)
                g_lock.release()  # 释放锁
                time.sleep(0.5)
            except:
                pass


# 消费者
class Consumer(threading.Thread):
    def run(self):
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',
            'HOST': 'www.umei.cc'
        }
        global all_img_urls  # 调用全局的图片详情页面的数组
        print("%s is running " % threading.current_thread)
        while len(all_img_urls) > 0:
            g_lock.acquire()
            img_url = all_img_urls.pop()
            g_lock.release()
            try:
                response = requests.get(img_url,headers=headers)
                html_data = etree.HTML(response.content.decode())
                title = html_data.xpath("//div[@class='ArticleTitle']/strong/text()")
                all_pic_src = html_data.xpath("//div[@class='ImageBody']/p/a/img/@src")
                pic_dict = {title[0]: all_pic_src[0]} # python字典
                global pic_links
                g_lock.acquire()
                pic_links.append(pic_dict)  # 字典数组
                print(pic_links)
                #print(title + "获取成功")
                g_lock.release()

            except:
                print("有问题")
            time.sleep(0.5)


class DownPic(threading.Thread):

    def run(self):
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',
        }
        while True:  # 这个地方写成死循环，为的是不断监控图片链接数组是否更新
            global pic_links
            # 上锁
            g_lock.acquire()
            if len(pic_links) == 0:  # 如果没有图片了，就解锁
                # 不管什么情况，都要释放锁
                g_lock.release()
                continue
            else:
                pic = pic_links.pop()
                g_lock.release()
                # 遍历字典列表
                for key,value in pic.items():
                    print("==================",key,value)
                    path = key.strip()
                    is_exists = os.path.exists(path)
                    # 判断结果
                    if not is_exists:
                        # 如果不存在则创建目录
                        # 创建目录操作函数
                        os.makedirs(path)
                        print(path + '目录创建成功')
                    else:
                        # 如果目录存在则不创建，并提示目录已存在
                        print(path + '目录已存在')
                    filename = path + "/" + key+".jpg"
                    if os.path.exists(filename):
                        continue
                    else:
                        response = requests.get(url=value,headers=headers)
                        with open(filename,'wb') as f:
                            f.write(response.content)
                            f.close()



if __name__ == "__main__":
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',
        'HOST': 'www.umei.cc',
    }
    target_url = "http://www.umei.cc/bizhitupian/meinvbizhi/%d.htm"  # 图片集和列表规则

    spider = Spider(target_url, headers)
    spider.getUrls(1, 16)

    threads= []
    # 开启两个线程去访问
    for x in range(2):
        t = Producer()
        t.start()
        threads.append(t)

    for tt in threads:
        tt.join()

    print("进行到我这里了")

    # 开启10个线程去获取链接
    for x in range(10):
        ta = Consumer()
        ta.start()

    for x in range(10):
        down = DownPic()
        down.start()