# -*- coding:utf-8 -*-
# 多线程写法
"""
第一页：https://sc.chinaz.com/tupian/index.html
第二页：https://sc.chinaz.com/tupian/index_2.html
第三页：https://sc.chinaz.com/tupian/index_3.html

1. 创建两个队列对象
    -- 第一个队列存放前十页的url地址
    -- 第二个队列存放图片的地址和名称
2.
"""
from bs4 import BeautifulSoup
from threading import Thread
from queue import Queue
import requests

headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36"
}


# 生产者类
class Put_Thread(Thread):
    def __init__(self, url_queue, img_queue):
        super().__init__()
        self.url_queue = url_queue
        self.img_queue = img_queue

    def run(self):
        while True:
            # 从url_queue队列当中获取前十页的url地址
            url = self.url_queue.get()
            resp = self.get_data_index(url)         # 获取到网页源码
            self.parse_data(resp)                   # 对源代码解析
            # 判断队列为空之后，循环停止
            if self.url_queue.empty():
                break

    # 向网站发起请求
    def get_data_index(self, url):
        response = requests.get(url, headers=headers)
        response.encoding = "utf-8"
        if response.status_code == 200:
            return response.text
        else:
            return None

    # 解析数据
    def parse_data(self, response):
        soup = BeautifulSoup(response, "lxml")
        data_list = soup.find_all("img", class_="lazy")
        for data in data_list:
            title = data.get("alt")
            href = "https:" + data.get("data-original").replace("_s", "")
            # 将图片的地址和名称放到元组当中添加到队列
            self.img_queue.put((title, href))


# 消费者类
class Get_Thread(Thread):
    def __init__(self, url_queue, img_queue):
        super().__init__()
        self.url_queue = url_queue
        self.img_queue = img_queue

    def run(self):
        while True:
            try:
                # 在队列当中获取数据，一旦队列阻塞时间超过三秒(队列不在添加数据， 而且全部已经获取完毕)
                data = self.img_queue.get(timeout=3)
                title, href = data
                with open("./image/" + title + ".jpg", "wb") as f:
                    resp = requests.get(href).content
                    f.write(resp)
                    print(title, "保存成功")
            except:
                break


def main():
    # 存放前十页的url
    url_queue = Queue()
    # 存放图片的地址和名称
    img_queue = Queue()
    # 将第一页的url添加到队列中
    url_queue.put("https://sc.chinaz.com/tupian/index.html")
    # 将2-10页的url添加到url_queue队列中
    for num in range(2, 11):
        url_queue.put("https://sc.chinaz.com/tupian/index_{}.html".format(num))

    for x in range(10):
        thread_one = Put_Thread(url_queue, img_queue)
        thread_one.start()
        thread_two = Get_Thread(url_queue, img_queue)
        thread_two.start()


if __name__ == '__main__':
    main()

