import json
import random
import re
import time
import requests
from config import config
from crawler.exceptions import UrlRuleNotExistException
from crawler.items import *
from crawler.logger import monitor_logger
from crawler.utils.func_tools import get_url_uuid, get_website_domain_new
from scrapy.utils.serialize import ScrapyJSONEncoder

default_serialize = ScrapyJSONEncoder().encode


class DatabasePipeline:
    server = None
    site_domain_map = {}
    url_clean_flag = 0
    lasted_clean_time = 0

    @staticmethod
    def _send_send_item_log(item, status, response_text=''):
        """记录入库日志"""
        if isinstance(item, str):
            item = json.loads(item)
        task = item['origin']

        monitor_logger.info({
            **task,
            'service': item['origin']['kernelCode'],
            'step': 'send_item',
            'state': status,
            'other': task.get('spiderTraceId', ''),
            'text': '-'.join([
                str(item.get('uuid')),
                str(item.get('type')),
                str(item['origin'].get('mediaId', 0)),
                response_text
            ]),
            'logTime': int(time.time() * 1000),
            'cost': 0
        })

    @staticmethod
    def _get_item_key():
        """获取待插入的item_key"""
        item_key = 'spider.items%(order)s.spider' % {
            'order': f'-{random.randint(1, 5)}'
        }
        return item_key

    def _clean_url(self, item, spider):
        """url清洗"""
        # 默认拉取配置开关是关闭的
        self.url_clean_flag = 1

        # 和上次拉取配置时间比大于60，拉取配置开关打开，并记录上次拉取配置的时间
        now_clean_time = time.time()
        if now_clean_time - self.lasted_clean_time > 60:
            self.url_clean_flag = 0
            self.lasted_clean_time = now_clean_time

        if self.url_clean_flag == 0:
            try:
                self.site_domain_map = self.get_url_rule(spider)
            except:
                pass

        # 第一次清洗拉取失败，直接抛出错误
        if not self.site_domain_map and self.lasted_clean_time == 0:
            raise UrlRuleNotExistException

        # 进行全域名匹配
        url = item['url']
        domain = get_website_domain_new(url)
        domain_rules = self.site_domain_map.get(domain, None)

        # 如果未匹配到则在进行一轮跟域名匹配
        # if not domain_rules:
        #     val = tldextract.extract(url)
        #     root_domain = '{}.{}'.format(val.domain, val.suffix)
        #     domain_rules = self.site_domain_map.get(root_domain, None)

        # 规则匹配及正则替换
        if domain_rules:
            for domain_rule in domain_rules:
                match_rule = domain_rule['matchRule']
                replace_rule = domain_rule['replaceRule']
                replace_url = re.sub(r'{}'.format(match_rule), r'{}'.format(replace_rule), url)
                if replace_url:
                    item['url'] = replace_url
                    item['uuid'] = get_url_uuid(item['url'])

        return default_serialize(item)

    @staticmethod
    def get_url_rule(spider):
        url = config.CLEAN_HOST + '?signature=1data_crawler_data_cleaning'
        response = requests.get(url, timeout=3)
        site_domain_map = json.loads(response.text)['site_domain_map']
        spider.logger.info('Pull Url Rule Success!')
        return site_domain_map

    # def _publish_seed(self, body, spider):
    #     """发送种子"""
    #     body = json.dumps(dict(body), ensure_ascii=False)
    #     spider.logger.info('Send Item, Item: ' + body)
    #     connection.send_seed(body, channel=self.server)
    #
    def _insert(self, task):
        """发送插入任务"""
        connection.publish_insert_task(task, channel=self.server, routing_key=self._get_item_key())

    def process_item(self, item, spider):
        self.server = spider.server

        if isinstance(item, SeedItem):
            self._publish_seed(item, spider)
        else:
            try:
                if len(item['title']) > 1000 or len(item['content']) > 1000 * 1000:
                    self._send_send_item_log(item, 500, response_text='Text To Long')
                    spider.logger.info('Content To Long: ' + json.dumps(item))
                    return item
            except:
                pass
                # # 小于明天凌晨的时间戳才能入库
                # tomorrow_time = int(time.mktime((datetime.date.today() + datetime.timedelta(days=1)).timetuple()) * 1000)
                # if item['pubTime'] < tomorrow_time:

            # 清洗url
            item = self._clean_url(item, spider)
            spider.logger.info('Publish Insert Task, Task: ' + item)
            try:
                self._insert(json.loads(item))
                self._send_send_item_log(item, 200)
            except Exception as e:
                spider.logger.error(e)
                self._send_send_item_log(item, 500)
        return item
