# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import random
from copy import deepcopy

from apps.tax_news.tax_news.spiders import DEFAULT_TAX_NEWS_IMAGE_COVER, DEFAULT_INDUSTRY_NEWS_IMAGE_COVER
from components.pipelines.public.mysql_pipeline import MysqlPipeline


class TaxNewsPipeline(MysqlPipeline):
    tmp_image_list = []

    def close_spider(self, spider):
        super().close_spider(spider)
        spider.logger.info(f"{spider.name} 采集执行完毕 开始清洗数据")
        if spider.name.endswith("tax_news"):
            sql = """select *
            from net_tax_news
            where update_time > current_date()
            and (JSON_LENGTH(image_list) = 0 or image_list is null)
            order by publish_time desc """
            datas = self.to_db.find(sql, to_json=True)
            spider.logger.info(f"{spider.name} 清洗数据数量 {len(datas)}")
            batch_size = 1000
            for i in range(0, len(datas), batch_size):
                sub_data = datas[i:i + batch_size]
                for data in sub_data:
                    data["image_list"] = [self.get_images("tax")]
                self.to_db.add_batch_smart("net_tax_news", sub_data, update_columns=["image_list"])
        elif spider.name.endswith("industry_news"):
            sql = """select *
            from net_industry_news
            where update_time > current_date()
            and (JSON_LENGTH(image_list) = 0 or image_list is null)
            order by publish_time desc """
            datas = self.to_db.find(sql, to_json=True)
            spider.logger.info(f"{spider.name} 清洗数据数量 {len(datas)}")
            batch_size = 1000
            for i in range(0, len(datas), batch_size):
                sub_data = datas[i:i + batch_size]
                for data in sub_data:
                    data["image_list"] = [self.get_images("industry")]
                self.to_db.add_batch_smart("net_industry_news", sub_data, update_columns=["image_list"])
        spider.logger.info(f"{spider.name} 采集执行完毕 清洗数据完成")

    def get_images(self, image_type):
        if self.tmp_image_list:
            random.shuffle(self.tmp_image_list)
            image = self.tmp_image_list.pop()
            return image
        else:
            if image_type == "tax":
                self.tmp_image_list = deepcopy(DEFAULT_TAX_NEWS_IMAGE_COVER)
            else:
                self.tmp_image_list = deepcopy(DEFAULT_INDUSTRY_NEWS_IMAGE_COVER)
            random.shuffle(self.tmp_image_list)
            image = self.tmp_image_list.pop()
            return image
