import os

import requests
import threading  # 多线程模块
import re  # 正则表达式模块
import time  # 时间模块

# 获取自定义的图片集和列表路径
imgs_url = []
all_info = []
# 图片地址列表
pic_links = []
# 定义锁
g_lock = threading.Lock()


class First_get(object):
    # 初始化函数
    def __init__(self, target_url, headers):
        self.target_url = target_url
        self.headers = headers

    def getUrls(self, start_page, page_num):
        global imgs_url
        # 循环获取网址url
        for i in range(start_page, page_num + 1):
            url = self.target_url % i
            imgs_url.append(url)


# 生产者，负责从每个页面提取图片列表链接
class Producer(threading.Thread):
    def run(self):
        headers = {
            'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0',
            'HOST': 'www.meizitu.com'
        }
        global imgs_url
        while len(imgs_url) > 0:
            # 再访问的时候上锁
            g_lock.acquire()
            # 通过pop移除最后一个元素，并且返回这个值
            page_url = imgs_url.pop()
            # 释放锁
            g_lock.release()
            try:
                # 异常捕获
                print("分析" + page_url)
                response = requests.get(page_url, headers=headers, timeout=3)
                all_pic_link = re.findall('<a target=\'_blank\' href="(.*?)">', response.text, re.S)
                global all_info
                g_lock.acquire()
                all_info += all_pic_link
                print(all_info)
                g_lock.release()
            except:
                print("出错了")
                # time.sleep(0.5)


# 消费者
class Consumer(threading.Thread):
    def run(self):
        headers = {
            'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0',
            'HOST': 'www.meizitu.com'
        }
        global all_info
        print("%s is running" % threading.current_thread)
        while len(all_info) > 0:
            g_lock.acquire()
            img_url = all_info.pop()
            g_lock.release()
            try:
                response = requests.get(img_url, headers=headers)
                # 获取的网站编码是gb2312
                response.encoding = "gb2312"
                title = re.search('<title>(.*?) | 妹子图</title>', response.text).group(1)
                all_pic_src = re.findall('<img alt=.*?src="(.*?)" /><br />', response.text, re.S)
                pic_dict = {title: all_pic_src}  # 以字典存贮
                global pic_links
                g_lock.acquire()
                pic_links.append(pic_dict)
                print(title + "成功获取")
                g_lock.release()
            except:
                print("有毛病")
                # time.sleep(0.5)


# 存放图片
class Img_Down(threading.Thread):
    def run(self):
        headers = {
            'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0',
            'HOST': 'mm.chinasareview.com',
        }
        while True:
            global pic_links
            g_lock.acquire()
            if len(pic_links) == 0:
                g_lock.release()
                continue
            else:
                pic = pic_links.pop()
                g_lock.release()
                # 迭代数据
                for key, values in pic.items():
                    path = key.rstrip("\\")
                    is_exists = os.path.exists(path)
                    # 判断结果
                    if not is_exists:
                        # 如果不存再目录就创建目录
                        os.makedirs(path)
                        print(path + "目录创建成功")
                    else:
                        # 如果目录存在就不创建
                        print("目录已存在")
                    for pic in values:
                        filename = path + "/" + pic.split("/")[-1]
                        if os.path.exists(filename):
                            continue
                        else:
                            try:
                                response = requests.get(pic, headers=headers)
                                with open(filename, "wb") as f:
                                    f.write(response.content)
                            except Exception as e:
                                print(e)
                                pass


def main():
    headers = {
        'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0',
        'HOST': 'www.meizitu.com'
    }
    # 图片集和列表规则
    """
    该网站分页规则：http://www.meizitu.com/a/pure_1.html
                    http://www.meizitu.com/a/pure_2.html
                    http://www.meizitu.com/a/pure_3.html
                    http://www.meizitu.com/a/pure_4.html
    """
    target_url = 'http://www.meizitu.com/a/pure_%d.html'
    # 创建对象
    spider = First_get(target_url, headers)
    spider.getUrls(5, 10)
    # print(imgs_url)
    threads = []
    # 开启两个线程访问
    for x in range(2):
        t = Producer()
        t.start()
        threads.append(t)
    for tt in threads:
        tt.join()
    for i in range(10):
        ta = Consumer()
        ta.start()
    for d in range(10):
        down = Img_Down()
        down.start()
    print("到这里了")


if __name__ == '__main__':
    main()
