# -*- coding: utf-8 -*-
# @Author: yqbao
# @GiteeURL: https://gitee.com/yqbao
# @Date: 2019/8/26 12:33
# @Version: v.0.0
"""
多线程爬取豆瓣图书 TOP 250
"""
import requests
import time
import pymongo
from threading import Thread
from threading import Lock
from queue import Queue
from pyquery import PyQuery as Pq

client = pymongo.MongoClient(host='localhost', port=27017)  # 初始化MongoDB对象
db = client['douban']  # 连接数据库
collection = db['book_top250']


class Producer(Thread):
    """生产者，获取数据"""
    headers = {
        'Cookie': '',
        'Host': 'book.douban.com',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36',
    }

    def __init__(self, page_queue, book_queue, *args, **kwargs):
        super(Producer, self).__init__(*args, **kwargs)
        self.page_queue = page_queue
        self.book_queue = book_queue

    def run(self):
        while True:
            if self.page_queue.empty():  # 如果页面队列为空则结束循环
                break
            url = self.page_queue.get()  # 从页面队列中取出一个值
            try:
                req = requests.get(url=url, headers=self.headers)  # 封装请求头并请求网页
                if req.status_code == 200:  # 成功，则获取响应的html内容，并返回
                    doc = Pq(req.content.decode('utf-8'))  # 初始化PyQuery
                    items = doc('table tr.item')  # 获取<table>...</table>图书信息列表
                    for item in items.items():  # 在信息列表中解析需要的数据
                        book_dict = {
                            'title': item.find('td div.pl2 a').text(),
                            'image': item.find('td a.nbg img').attr('src'),
                            'writer': item.find('td p.pl').text(),
                            'evaluate ': item.find('div.star span.pl').text(),
                            'quote': item.find('p.quote span.inq').text(),
                        }
                        self.book_queue.put(book_dict)  # 将书籍信息放入书籍队列
                time.sleep(2)  # 每完成一页信息获取，便休息2s
            except requests.exceptions.RequestException:
                print('页面获取错误')


class Consumer(Thread):
    """消费者，保存数据"""

    def __init__(self, page_queue, book_queue, *args, **kwargs):
        super(Consumer, self).__init__(*args, **kwargs)
        self.page_queue = page_queue
        self.book_queue = book_queue

    def run(self):
        while True:
            if self.book_queue.empty() and self.page_queue.empty():
                break
            result = self.book_queue.get()
            try:
                Lock().acquire()  # 加锁
                if collection.insert_one(result):  # 数据插入
                    print('存储到MongoDB成功')
                Lock().release()  # 解锁
            except Exception as e:
                print(e.args)


def main():
    page_queue = Queue(10)  # 页面url队列
    book_queue = Queue(100)  # 书籍队列
    for k in range(10):  # 将页面url放入页面队列
        url = 'https://book.douban.com/top250?start={}'.format(k * 25)
        page_queue.put(url)
    for x in range(5):  # 开启5分生产者线程
        data = Producer(page_queue, book_queue)
        data.start()
    for v in range(5):  # 开启5分消费者线程
        book = Consumer(page_queue, book_queue)
        book.start()


if __name__ == '__main__':
    main()
