# -*- coding: utf-8 -*-
# Created on 2018-02-27 19:15:01
# Project: abc_base_spider

import datetime
import uuid

from plugin.configs import configs
from plugin.utils.common_utils import get_today_str, get_time, dumps_resault
from pyspider.libs.base_handler import BaseHandler


class AbcBaseHandler(BaseHandler):
    source_identity = 'abc_base'
    ds_id = '0'
    filter_prefix = 'cr_filter'
    log_prefix = 'cr_log'
    # 参考 http://www.runoob.com/http/http-content-type.html
    CONTENT_TYPE_PDF = 'application/pdf'
    CONTENT_TYPE_HTML = 'text/html'
    CONTENT_TYPE_XLS = 'application/vnd.ms-excel'
    CONTENT_TYPE_XLSX = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
    CONTENT_TYPE_DOC = 'application/msword'
    CONTENT_TYPE_DOCX = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
    CONTENT_TYPE_JPG = 'application/x-jpg'
    CONTENT_TYPE_PNG = 'application/x-png'

    SAVE_FILE = 'file'
    SAVE_TEXT = 'text'
    is_china_timezone = True

    # 全局配置, 会对生成的所有任务生效.
    crawl_config = {
        "itag": 'v1',
        "headers": {
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
        },
        "retries": 5,
        "timeout": 120,
        "connect_timeout": 120,
        "validate_cert": False,
    }

    def gen_task_seq(self):
        return self.source_identity + '_' + datetime.datetime.now().strftime('%Y%m%d%H%M%S')

    def get_filter_name(self):
        return self.filter_prefix + '_' + self.source_identity

    def get_log_name(self, day=None):
        if day is None:
            day = get_today_str()
        return self.log_prefix + '_' + day

    def init_crawler_log(self, task_seq):
        # 初始化计数器
        self.r_db.hset(self.get_log_name(), task_seq, 0)

    def is_data_existed(self, *args):
        for key in args:
            if self.r_db.sismember(self.get_filter_name(), key):
                return True
        return False

    def add_new_data(self, key):
        self.r_db.sadd(self.get_filter_name(), key)

    def log_new_data(self, redis_data, task_seq, key, queue=configs['redis_table'], log_name=None, save_type=None,
                     extra_filter=None):
        """
        收获新数据
        :param redis_data: 准备序列化到队列的数据
        :param task_seq: 任务编号
        :param key: 数据去重标志
        :param queue: 队列名称；有特殊要求，配置新的队列名
        :param log_name: 日志名，默认当天
        :param save_type: file，下载文件并上传; text，直接上传文本；mysql：直接保存mysql
        :param extra_filter: list/tuple 用于补充过滤器，如law 中的title等
        :return:
        """
        # 异常数据监测
        is_correct, check_msg = self.check_data(redis_data)

        if not is_correct:
            redis_data['error_msg'] = check_msg
            self.r_db.rpush('abc_error_data', dumps_resault(redis_data))
            raise Exception('redis_data is error %s' % check_msg)

        trace_id = redis_data.get('trace_id', None)
        if trace_id is None:
            trace_id = str(uuid.uuid1()).replace('-', '')
        if log_name is None:
            log_name = self.get_log_name()

        redis_data['task_seq'] = task_seq
        redis_data['source_identity'] = self.source_identity
        redis_data['crawl_time'] = get_time()
        redis_data['trace_id'] = trace_id

        # 设置默认的save_type，
        if save_type is None:
            if self.source_identity.startswith('notice_law'):
                save_type = self.SAVE_TEXT
            else:
                save_type = self.SAVE_FILE
        redis_data['save_type'] = save_type

        redis_data['key'] = key
        redis_data['source_id'] = self.ds_id
        redis_data['is_china_timezone'] = self.is_china_timezone

        item = redis_data['item']
        self._correct_item(item)
        # trace_id 埋点
        item['trace_id'] = trace_id
        item['source_id'] = self.ds_id

        '''
        callback_mongo, 下载器需要在下载、上传完成后更新对应数据库
        {
            'mongo_url': configs['mongo_cr']['mongo_uri'], #必要
            'mongo_db': configs['mongo_cr']['mongo_db'],　#必要
            'mongo_col': 'hb_files_new',　#必要
            'fields': fields_utils.DOWNLOADED_FIELDS,　#默认更新传入redis中item所有字段
            'key': {'file_url': file_url}　#默认file_url
        }
        '''
        # 默认写入 original_data
        if redis_data.get('callback_mongo') is None:
            # 相较于老版只能基于file_url去重，添加字典去重模式
            if isinstance(key, dict):
                redis_data['callback_mongo'] = {
                    'mongo_url': configs['mongo_or']['mongo_uri'],
                    'mongo_db': configs['mongo_or']['mongo_db'],
                    'mongo_col': self.source_identity,
                    'key': key,
                }
                key = key['file_url']
            else:
                redis_data['callback_mongo'] = {
                    'mongo_url': configs['mongo_or']['mongo_uri'],
                    'mongo_db': configs['mongo_or']['mongo_db'],
                    'mongo_col': self.source_identity,
                    'key': {'file_url': key},
                }

        if self.source_identity.startswith('industry_'):
            # 补齐行业的数据
            if redis_data.get('find_dict') is None:
                redis_data['find_dict'] = {'file_url': key}
        elif self.source_identity.startswith('notice_'):
            if redis_data.get('find_dict') is None:
                redis_data['find_dict'] = {'file_url': key}
        elif self.source_identity.startswith('edb'):
            # 补齐edb数据
            if redis_data.get('find_dict') is None:
                redis_data['find_dict'] = {'oss_path': key}
            # edb数据，依旧写入edb_source
            redis_data['callback_mongo'] = {
                'mongo_url': configs['mongo_or']['mongo_uri'],
                'mongo_db': configs['mongo_or']['mongo_db'],
                'mongo_col': 'edb_source', 'key': {'file_url': key},
            }

            if item.get('oss_path') is None:
                item['oss_path'] = key
            if item.get('code') is None:
                item['code'] = self.source_identity

        # text 类型，如果直接 做base64处理
        req_data = redis_data['requests_data']
        if save_type == self.SAVE_TEXT:
            redis_data['requests_data'] = req_data.decode(errors='replace') if isinstance(req_data, bytes) else req_data

        redis_data['item'] = item

        # 去重，文件队列，源更新
        # 因key值类型添加了字典情况，对应做出调整
        self.r_db.sadd(self.get_filter_name(), key)
        if extra_filter:
            for filter_key in extra_filter:
                self.r_db.sadd(self.get_filter_name(), filter_key)
        self.r_db.hincrby(log_name, task_seq)
        self.r_db.rpush(queue, dumps_resault(redis_data))

    @staticmethod
    def _correct_item(item):
        file_type = item.get('file_type')
        if file_type is not None:
            if file_type[0] != '.':
                file_type = '.' + file_type
            file_type = file_type.lower()
            item['file_type'] = file_type

    def check_data(self, redis_data):
        """
        数据检查
        :param redis_data:
        :return:
        """
        is_correct = True
        requests_data = redis_data['requests_data']
        item = redis_data['item']
        file_type = item.get('file_type')
        msg = []
        if file_type is None:
            is_correct = False
            msg.append('file_type is None')
        if file_type[0] != '.':
            is_correct = False
            msg.append('file_type not startswith dot, %s' % file_type)
        time_input = item.get('time')
        if time_input and not isinstance(time_input, datetime.datetime.now().__class__):
            is_correct = False
            msg.append('time not datetime type# %s' % time_input)
        source_url = item.get('source_url')
        file_url = item.get('file_url')
        if source_url and not source_url.startswith('http'):
            is_correct = False
            msg.append('source_url not http type#%s' % source_url)
        if file_url and not file_url.startswith('http'):
            is_correct = False
            msg.append('file_url not http type#%s' % file_url)
        if isinstance(requests_data, {}.__class__):
            request_url = requests_data['url']
            if request_url and not request_url.startswith('http'):
                is_correct = False
                msg.append('request_url not http type#%s' % request_url)
        check_msg = '\n'.join(msg)
        return is_correct, check_msg
