import json
import os
import re
from urllib.parse import urlparse

import scrapy
from config_spider.items import Item


class BaseSpiser(scrapy.Spider):
    Item = Item

    # 这个是框架设置一些配置信息，custom_settings则会覆盖这个
    custom_settings_frame = None

    def __init__(self, **kwargs):
        kwargs = {k: eval(v) for k, v in kwargs.items()}
        super().__init__(**kwargs)

    @classmethod
    def update_settings(cls, settings):
        if not cls.custom_settings_frame:
            cls.custom_settings_frame = {}
        for setting_env_name in [x for x in os.environ.keys() if x.startswith('CRAWLAB_SETTING_')]:
            setting_name = setting_env_name.replace('CRAWLAB_SETTING_', '')
            setting_value = os.environ.get(setting_env_name)
            if setting_value.lower() == 'true':
                setting_value = True
            elif setting_value.lower() == 'false':
                setting_value = False
            elif re.search(r'^\d+$', setting_value) is not None:
                setting_value = int(setting_value)
            elif re.search(r'^\{.*\}$', setting_value.strip()) is not None:
                setting_value = json.loads(setting_value)
            elif re.search(r'^\[.*\]$', setting_value.strip()) is not None:
                setting_value = json.loads(setting_value)
            else:
                pass
            cls.custom_settings_frame.update({setting_name: setting_value})
        if cls.custom_settings_frame.get("FILTER_URL") or settings.get("FILTER_URL"):
            print("开启url过滤")
            cls.custom_settings_frame.update({
                "SCHEDULER": "scrapy_redis_bloomfilter.scheduler.Scheduler",
                "SCHEDULER_DUPEFILTER_KEY": "crawlab_spider:bloomfilter",
                "DUPEFILTER_CLASS": "scrapy_redis_bloomfilter.dupefilter.RFPDupeFilter",
                "REDIS_URL": f"redis://:{os.environ.get('CRAWLAB_REDIS_PASSWORD')}@{os.environ.get('CRAWLAB_REDIS_ADDRESS')}:{os.environ.get('CRAWLAB_REDIS_PORT') or 6379}/{os.environ.get('CRAWLAB_REDIS_DB') or 1}",
                "DUPEFILTER_DEBUG": True,
                "SCHEDULER_PERSIST": True
            })
        settings.setdict({**(cls.custom_settings_frame or {}), **(cls.custom_settings or {})}, priority='spider')

    def get_real_url(self, response, url):
        if isinstance(url, str):
            if re.search(r'^https?', url):
                return url
            elif re.search(r'^\/\/', url):
                u = urlparse(response.url)
                return u.scheme + ":" + url
        return response.urljoin(url)