import scrapy

from rankinglistspider.db import db


class BaseSpider(scrapy.Spider):
    def parse(self, response):
        pass

    def __init__(self, name=None, **kwargs):
        super().__init__(name, **kwargs)

    @staticmethod
    def close(spider, reason):
        # 在此网站爬虫结束时执行更新latest表操作
        spider.log('%s crawl completed' % spider.name)
        site_id = spider.site_id
        # 找出此新闻站里面的所有ranking_data items  => old
        # 要创建list拷贝，不然cursor的内容会是新的items
        old_news_items = list(db.ranking_data_latest.find({'site_id': site_id}, {'_id': 1}))
        # 删除old ranking_data items
        for old_news_item in old_news_items:
            db.ranking_data_latest.remove({'_id': old_news_item['_id']})

        # 分组[此网站site_id，按category_id分组取10个]找出此新闻站里面所有的ranking_data items => new
        new_news_items = db.ranking_data.aggregate(
            [
                {'$match': {'site_id': site_id}},
                {'$sort': {'create_time': -1}},
                {'$group': {'_id': '$category_id', 'items': {'$push': '$_id'}}},
                {'$project': {'items': {'$slice': ["$items", 10]}}}
            ]
        )
        # 插入new ranking_data items
        for new_news_items_group in new_news_items:
            for new_news_item_id in new_news_items_group['items']:
                db.ranking_data_latest.insert(db.ranking_data.find_one({'_id': new_news_item_id}))

        return super().close(spider, reason)
