# -*- coding:utf-8 -*-
# 爬虫：
#     目标网站：https://www.duitang.com/search/?kw=%E7%BE%8E%E5%A5%B3&type=feed
#     任务：使用生产者消费者模式抓取前三页图片（不是前三组）

from urllib.parse import urlencode
from threading import Thread
from queue import Queue
import requests
import re

headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36"
}


class PutThread(Thread):
    def __init__(self, url_queue, img_queue):
        super().__init__()
        self.url_queue = url_queue
        self.img_queue = img_queue

    def run(self):
        while True:
            try:
                url = self.url_queue.get(timeout=3)
                resp = self.get_data_index(url)
                self.parse_data(resp)
                # 判断队列为空之后，循环停止
                if self.url_queue.empty():
                    break
            except:
                break

    def get_data_index(self, url):
        response = requests.get(url, headers=headers)
        response.encoding = "utf-8"
        if response.status_code == 200:
            return response.text
        else:
            return None

    def parse_data(self, resp):
        url_list = re.findall(r'"path":"(.*?)"', resp)
        for url in url_list:
            title = url.split("/")[-1]
            self.img_queue.put((title, url))


class GetThread(Thread):
    def __init__(self, url_queue, img_queue):
        super().__init__()
        self.url_queue = url_queue
        self.img_queue = img_queue

    def run(self):
        while True:
            try:
                # 在队列当中获取数据，一旦队列阻塞时间超过三秒(队列不在添加数据， 而且全部已经获取完毕)
                data = self.img_queue.get(timeout=3)
                title, href = data
                with open("./image/" + title + ".jpg", "wb") as f:
                    resp = requests.get(href).content
                    f.write(resp)
                    print(title, "保存成功")
            except:
                break


def main():
    # 存放前十页的url
    url_queue = Queue()
    # 存放图片的地址和名称
    img_queue = Queue()
    # 将第一页url添加队列中
    url = "https://www.duitang.com/napi/blogv2/list/by_search/?"
    for num in range(0, 16):
        params = {
            "kw": "美女",
            "after_id": "{}".format(num * 24),
            "type": "feed",
            "include_fields": "top_comments,is_root,source_link,item,buyable,root_id,status,like_count,like_id,sender,album,reply_count,favorite_blog_id",
            "_type": "",
            "_": "1695453939175"
        }
        new_url = url + urlencode(params)
        url_queue.put(new_url)

    for x in range(5):
        t1 = PutThread(url_queue, img_queue)
        t1.start()
        t2 = GetThread(url_queue, img_queue)
        t2.start()


if __name__ == '__main__':
    main()
