# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
import logging

import requests
import json
from datetime import datetime

from bson import ObjectId
from itemadapter import ItemAdapter
from pymongo.collection import Collection

from .utils import date, hash_md5, pubdate_common, push_data, prettify_xpath_html
from .package.database import *
from .package.database.db_mongo import db
from .package.bloom_redis import BloomFilter
from .settings import KEY_DUPLICATE

api_url_map = {
    "post": 'http://120.79.55.91:80/api/crawler/posts',
    "info": 'http://120.79.55.91:80/api/crawler/information',
    "quick": 'http://120.79.55.91:80/api/crawler/newsflash'
}


class DB_MAP:
    '''
    mongodb数据表实例
    '''
    db_info = Collection(db, 'info')
    db_post = Collection(db, 'post')
    db_quick = Collection(db, 'quick')

    def __init__(self):
        self.create_index()

    def create_index(self, *args, **kwargs):
        # 创建内容唯一键 防止数据重复入库
        self.db_info.create_index([('uuid', 1)], unique=True)
        self.db_post.create_index([('uuid', 1)], unique=True)
        self.db_quick.create_index([('uuid', 1)], unique=True)

DB_MAP()

def post_spider_data(url, items):
    '''
    # 提交爬虫数据
    '''

    json_data = {
        "list": [items]
    }
    headers = {
        'Authorization': '4e866ec420b78f3b8b6e65491cdbc149',
        'Content-Type': 'application/json'
    }
    response = requests.post(url, headers=headers, json=json_data)
    return response.json()


class NewsPreFixPipeline:
    '''
    管道数据存储前数据前处理
    增加 uuid 数据唯一标识,
    uuid: MD5(站点名 + 标题)
    '''
    def process_item(self, item, spider):
        site_type = spider.__dict__.get('site_type')
        if not site_type:
            return item
        __uuid = item.get('originalUrl', '') + item['content']
        uuid = hash_md5(__uuid)
        item['uuid'] = uuid
        item['spider_time'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")

        item.setdefault('images', []) # 默认值
        return item


class NewsSaveMongodbPipeline:
    '''
    管道数据过滤 采用redis hash值过滤
    '''
    def open_spider(self, spider):
        pass

    def process_item(self, item, spider):
        site_type = spider.__dict__.get('site_type')
        inserted_id = None


        if site_type not in api_url_map:
            logging.info(f'错误的size_type类型: {site_type}')
            return item

        # 翻译 and 上传oss
        new_body, images = prettify_xpath_html(item['content'], spider.language, 'ko')
        item['images'] = images
        item['content'] = new_body
        # 存储
        if site_type == 'post':
            inserted_id = DB_MAP.db_post.insert_one(item).inserted_id
        elif site_type == 'quick':
            inserted_id = DB_MAP.db_quick.insert_one(item).inserted_id
        elif site_type == 'info':
            inserted_id = DB_MAP.db_info.insert_one(item).inserted_id

        item['id'] = str(inserted_id)
        item['_id'] = str(inserted_id)

        # 数据上报
        code, result = push_data.push_data_api(api_url_map[site_type], dict(item))
        logging.info(f'size_type: {site_type} insert_one: {inserted_id}, api_result: {result}')
        return item

    def close_spider(self, spider):
        pass


class NewsSaveRedisPipeline():
    '''
    记录item数据的source_url至redis 用来去重
    '''
    def __init__(self):
        pass

    def process_item(self, item, spider):
        source_url = item.get('originalUrl', '')
        site_type = spider.__dict__.get('site_type')
        if not site_type:
            logging.info('site_type error：{} {}'.format(spider.name, site_type))
            return item
        if not redis_client.sysmenber(KEY_DUPLICATE, hash_md5(source_url)) and site_type != 'quick':
            redis_client.sadd(KEY_DUPLICATE, hash_md5(source_url))
            logging.info('add redis success：{}'.format(source_url))

