import json

import arrow
import requests
import logging

from wass import settings

from models.user import User
from utilities.errors import (
    APIScannerCreateTaskFail,
    APIScannerException,
    APIScannerNotOk,
    ServerError,
    TaskExistingError
)
from utilities.utils import DateTimeEncoder, trans_textbox_to_dict
from utilities.enums import (
    TaskType,
    EXTaskType,
    SecurityEventPlugins,
    ScanJobName,
    NetWorkType,
    ScanEventName,
)


logger = logging.getLogger('app')
SCANNER_SERVER = settings.SCANNER_SERVER
AVAILABLE_JOB_QUEUE = settings.AVAILABLE_JOB_QUEUE


class ScannerRequest(object):
    def ajax(self, **kwargs):
        kwargs['url'] = SCANNER_SERVER + kwargs['url']
        info = {'label': 'scanner'}
        info.update(kwargs)
        logger.info(info)

        try:
            data = kwargs.get('data')
            if data:
                kwargs['data'] = json.dumps(data, cls=DateTimeEncoder)
            resp = requests.request(**kwargs)
            # or后面的条件表示任务在调度丢失, 需要重建, 不属于错误
            if resp.ok or (resp.status_code == 404 and resp.json().get('code') == 5):
                logger.info(
                    {'label': 'scanner', 'code': resp.status_code, 'resp': resp.json()}
                )
                return resp.json()
            elif resp.status_code == 409 and resp.json().get('code') == 6 and 'TaskExistingError' in resp.json().get('message', ''):
                raise TaskExistingError()
            else:
                logger.error(
                    {'label': 'scanner', 'code': resp.status_code, 'resp': resp.json()}
                )
                raise APIScannerNotOk()
        except TaskExistingError as e:
            raise e
        except Exception as e:
            logger.exception(e)
            raise APIScannerException()

    def get(self, url: str, params: dict = None):
        return self.ajax(method='GET', url=url, params=params)

    def post(self, url: str, body: dict):
        return self.ajax(method='POST', url=url, data=body)


class SchedulerServer:
    server = SCANNER_SERVER

    def _action(self, uri, body):
        scanner_request = ScannerRequest()
        return scanner_request.post(uri, body)


class Scheduler(SchedulerServer):
    server = SCANNER_SERVER
    job_names = ['']
    priority = 2
    task_type = ''

    def __init__(
        self,
        task_id='',
        target_url='',
        origin_ip='',
        filter_events=None,
        task_config=None,
        job_config_map=None,
        flex_job_config_map=None,
        target_id='',
        user_id='',
        addition=None,
    ):

        self.task_id = task_id
        self.target_url = target_url
        self.origin_ip = origin_ip
        self.filter_events = filter_events if filter_events else []
        self.task_config = task_config if task_config else {}
        self.job_config_map = job_config_map if job_config_map else {}
        self.flex_job_config_map = flex_job_config_map if flex_job_config_map else {}
        self.target_id = target_id
        self.user_id = user_id
        self.addition = addition if addition else {}
        self.job_names = [job_name for job_name in self.job_names]

        if not isinstance(self.job_config_map, dict):
            self.job_config_map = self.job_config_map.to_dict()

        if not isinstance(self.flex_job_config_map, dict):
            self.flex_job_config_map = self.flex_job_config_map.to_dict()

        if not isinstance(self.task_config, dict):
            self.task_config = self.task_config.to_dict()

        self.is_deep_malscan = self.flex_job_config_map.get("is_deep_malscan", False)

        self.init_config()

    def get_task_id_body(self):
        """获取task_id的字典

        Returns:
            [type]: [description]
        """
        return {"task_id": self.task_id}

    def get_task_id_with_force_body(self):
        """获取task_id的字典, 带有is_force参数

        Returns:
            [type]: [description]
        """
        return {"task_id": self.task_id, "is_force": True}

    def get_all_body(self):
        """获取request body

        Returns:
            [type]: [description]
        """
        return {
            "task_id": self.task_id,
            "target_url": self.target_url,
            "origin_ip": self.origin_ip,
            "job_names": self._get_cleaned_job_names(),
            "filter_events": self.filter_events,
            "task_config": self.task_config,
            "job_config_map": self.job_config_map,
            "flex_job_config_map": self.flex_job_config_map,
            "addition": self.addition,
            'target_id': self.target_id,
            'user_id': self.user_id,
        }

    def _init_filter_events(self):
        return []

    def _init_task_config(self):
        base_config = self.task_config.get(self.task_type, {})

        start_at = self.task_config.get('start_at')
        end_at = self.task_config.get('end_at')
        scan_period_start_time = self.task_config.get('scan_period_start_time')
        scan_period_end_time = self.task_config.get('scan_period_end_time')

        if self.task_config.get('promptly', False) and not self.task_config.get(
            'is_periodic', True
        ):
            start_at = arrow.utcnow().isoformat()
        scan_task_config = {
            'is_periodic': self.task_config.get('is_periodic', True),
            'priority': self.priority,
            'seconds': base_config.get('seconds'),
            'is_disabled': base_config.get('is_disabled', True),
            'scan_period_start_time': scan_period_start_time,
            'scan_period_end_time': scan_period_end_time,
            'start_at': start_at,
            'end_at': end_at,
        }

        return scan_task_config

    def _init_job_config_map(self):
        config = self.job_config_map.get(self.task_type, {})
        ex_job_config_map = self._ex_init_flex_job_config_map()
        self.flex_job_config_map = {}
        # ex_job_config_map = {}

        for job_name in self.job_names:
            ex_job_config_map.setdefault(job_name, {}).update(
                {
                    k: config.get(k, '')
                    for k in getattr(self, 'allow_job_config_keys', [])
                }
            )

        if self.task_config.get('use_increment', False):
            for k in (
                ScanJobName.kscan.value,
                ScanJobName.appscan.value,
                ScanJobName.nscan.value,
            ):
                if ex_job_config_map.get(k):
                    ex_job_config_map[k].update(
                        {
                            "is_write_incremental_cache": True,
                            "is_filter_incremental_cache": True,
                            "incremental_cache_id": f"{self.target_id}_{self.task_type}_{k}",
                        }
                    )

        return ex_job_config_map

    def _ex_init_flex_job_config_map(self):
        exclude_keys_tuple = (
            'is_solo',
            'site_login',
            'login_method',
            'is_deep_malscan',
        )
        exclude_scan_job_name_tuple = (ScanJobName.site_info.value,)

        self.flex_job_config_map['request_http_headers'] = trans_textbox_to_dict(
            self.flex_job_config_map.get('request_http_headers', '')
        )

        if self.flex_job_config_map.get('site_login', False):
            login_method = self.flex_job_config_map.get('login_method')
            func = getattr(self, f'_get_prelogin_{login_method}')
            self.flex_job_config_map.update(func())

        if self.flex_job_config_map.get(
            'crawler_included_urls'
        ) or self.flex_job_config_map.get('crawler_excluded_urls'):
            self.flex_job_config_map['crawler_scope'] = 3

        ex_flex_job_config_map = {}
        for job_name in self.job_names:
            if job_name in exclude_scan_job_name_tuple:
                continue
            ex_flex_job_config_map.setdefault(job_name, {}).update(
                {
                    k: v
                    for k, v in self.flex_job_config_map.items()
                    if k not in exclude_keys_tuple
                }
            )
            for k in ('crawler_included_urls', 'crawler_excluded_urls'):
                if ex_flex_job_config_map[job_name].get(k):
                    ex_flex_job_config_map[job_name][k] = [
                        f"{self.target_url}{item}"
                        for item in ex_flex_job_config_map[job_name][k]
                    ]

        return ex_flex_job_config_map

    def _init_addition(self):
        scan_addition = {'task_type': self.task_type, **self.addition}
        return scan_addition

    @staticmethod
    def _get_prelogin_form():
        return {
            'is_login_form_enabled': True,
            'is_login_json_enabled': False,
            'request_cookie': None,
            'request_http_headers': None,
        }

    @staticmethod
    def _get_prelogin_json():
        return {
            'is_login_form_enabled': False,
            'is_login_json_enabled': True,
            'request_cookie': None,
            'request_http_headers': None,
        }

    @staticmethod
    def _get_prelogin_cookie():
        return {
            'is_login_form_enabled': False,
            'is_login_json_enabled': False,
            'request_http_headers': None,
        }

    @staticmethod
    def _get_prelogin_headers():
        return {
            'is_login_form_enabled': False,
            'is_login_json_enabled': False,
            'request_cookie': None,
        }

    def _get_cleaned_job_names(self) -> list:
        if ScanJobName.appscan.value in self.job_names:
            include_plugins = self.job_config_map.get(
                ScanJobName.appscan.value, {}
            ).get('include_plugins', [])
            if not include_plugins or include_plugins == [
                ScanEventName.statistics.value
            ]:
                return [
                    item for item in self.job_names if item != ScanJobName.appscan.value
                ]

        return self.job_names

    def create(self):
        """创建任务

        Raises:
            APIScannerCreateTaskFail: [description]
        """
        resp = self._action('/v1/tasks:create', self.get_all_body())
        task_id = resp.get('task_id')
        if task_id:
            self.task_id = task_id
        else:
            raise APIScannerCreateTaskFail()

    def modify(self):
        """修改任务, 必须要有task_id,才能修改

        Raises:
            APIScannerCreateTaskFail: [description]
        """
        resp = self._action('/v1/tasks:modify', self.get_all_body())
        if resp.get('code') == 5:
            resp = self._action('/v1/tasks:create', self.get_all_body())

        task_id = resp.get('task_id')
        if task_id:
            self.task_id = task_id
        else:
            raise APIScannerCreateTaskFail()

    def auto_schedule(self):
        """根据是否有task_id, 判断应该执行modify还是create"""
        if self.task_id:
            self.modify()
        else:
            self.create()

    def remove(self):
        if not self.task_id:
            raise ServerError(detail="task id not exist.")
        self._action('/v1/tasks:remove', self.get_task_id_body())
        self.task_id = ''

    def stop(self):
        if not self.task_id:
            raise ServerError(detail="task id not exist.")
        self._action('/v1/tasks:stop', self.get_task_id_body())
        self.task_id = ''

    def disable(self):
        if not self.task_id:
            raise ServerError(detail="task id not exist.")
        self._action('/v1/tasks:disable', self.get_task_id_body())
        self.task_id = ''

    def rescan(self, is_force=False):
        if not self.task_id:
            raise ServerError(detail="task id not exist.")
        body = (
            self.get_task_id_with_force_body() if is_force else self.get_task_id_body()
        )
        self._action('/v1/tasks:rescan', body)

    def init_config(self):
        for attr in self.__dir__():
            if attr.startswith('_init_'):
                key = attr.split('_init_')[-1]
                if hasattr(self, key):
                    setattr(self, key, getattr(self, attr)())


class AvailabilityScheduler(Scheduler):
    job_names = [ScanJobName.site_info.value]
    priority = 3
    task_type = TaskType.availability.value
    allow_job_config_keys = ('include_plugins', 'use_cloud')

    def _init_task_config(self):
        scan_task_config = super(AvailabilityScheduler, self)._init_task_config()

        scan_task_config['job_queue'] = AVAILABLE_JOB_QUEUE
        return scan_task_config

    def _init_job_config_map(self):
        scan_job_config_map = super(AvailabilityScheduler, self)._init_job_config_map()

        scan_job_config_map['site_info'].update({"runtime_limit": 240})
        # todo: support http and ping

        modules = []
        if scan_job_config_map['site_info'].pop('use_cloud', False):
            modules.append('cloud')
            scan_job_config_map['site_info']['cloud'] = {
                'probe_types': ['HTTP', 'PING'],
                'network_type': NetWorkType.v4.value,
            }

        for k in scan_job_config_map['site_info'].pop('include_plugins', []):
            if k not in ('ping', 'http_get'):
                continue
            if k == 'http_get':
                modules.extend([k, 'dns', 'http_get_full_time'])
            else:
                modules.append(k)

        scan_job_config_map['site_info']['modules'] = modules

        return scan_job_config_map


class TargetInfoScheduler(AvailabilityScheduler):
    priority = 1
    task_type = EXTaskType.target_info.value
    allow_job_config_keys = ('enable',)  # type: ignore

    def _init_task_config(self):
        scan_task_config = super(TargetInfoScheduler, self)._init_task_config()
        if "job_queue" in scan_task_config:
            del scan_task_config["job_queue"]
        return scan_task_config

    def _init_job_config_map(self):
        scan_job_config_map = super(AvailabilityScheduler, self)._init_job_config_map()
        site_info = scan_job_config_map.get('site_info', {})
        if site_info.pop('enable', False):
            site_info['modules'] = [
                'alexa',
                'basic_info',
                'http_method',
                'icp',
                'ipdb',
                'port',
                'sslcheck',
                'subdomain',
                'wafdetector',
            ]
            site_info['wafdetector'] = {"mode": "normal"}

        return scan_job_config_map


class SecurityEventScheduler(Scheduler):
    job_names = [ScanJobName.appscan.value, ScanJobName.nscan.value]
    task_type = TaskType.securityEvent.value
    allow_job_config_keys = (
        'crawler_depth',
        'crawler_max_page',
        'crawler_scope',
        'include_url',
    )
    allow_evidence = (
        SecurityEventPlugins.cryptojacking.value,
        # SecurityEventPlugins.keyword.value,
        SecurityEventPlugins.malscan.value,
        SecurityEventPlugins.seo_hijack.value,
    )
    plugins_appscan = (
        SecurityEventPlugins.black_links.value,
        SecurityEventPlugins.malscan.value,
        SecurityEventPlugins.cryptojacking.value,
        SecurityEventPlugins.seo_hijack.value,
    )
    plugins_nscan = (
        SecurityEventPlugins.keyword.value,
        SecurityEventPlugins.privacy_disclosure.value,
    )

    def _init_job_config_map(self):
        scan_job_config_map = super(SecurityEventScheduler, self)._init_job_config_map()
        config = self.job_config_map.get('securityEvent', {})
        evidence = config.get('evidence', False)
        if config.get('crawler_scope', 2) == 2:
            for job_name in self.job_names:
                scan_job_config_map[job_name]['include_url'] = []

        # scan_job_config_map['appscan']['include_plugins'] = ['statistics']
        if config.get('crawler_scope') == 3:
            scan_job_config_map['nscan']['is_without_crawl'] = True

        for plugin in self.job_config_map.get(TaskType.securityEvent.value, {}).get(
            'include_plugins', []
        ):
            if plugin in self.plugins_appscan:
                scan_job_config_map['appscan'].setdefault('include_plugins', []).append(
                    plugin
                )
            elif plugin in self.plugins_nscan:
                scan_job_config_map['nscan'].setdefault('plugins', []).append(plugin)

            func = getattr(self, f"_get_{plugin}_option", None)
            if func:
                func(scan_job_config_map)

        # set evidence
        for plugin_type in self.allow_evidence:
            scan_job_config_map['appscan'].setdefault('plugins', {}).setdefault(
                plugin_type, {}
            )['evidence'] = evidence

        if scan_job_config_map.get('nscan'):
            scan_job_config_map['nscan']['mode'] = 2
            scan_job_config_map['nscan']['crawler_scope'] = 3

        if scan_job_config_map.get('appscan'):
            scan_job_config_map['appscan']['crawler_scope'] = 3

        return scan_job_config_map

    def _get_keyword_option(self, scan_job_config_map):
        custom_id = self.user_id
        obj_user = User.objects.filter(user_id=custom_id).first()
        if obj_user and not obj_user.is_superuser:
            obj_puser = User.objects.filter(user_id=obj_user.parent_id).first()
            if obj_puser:
                custom_id = obj_puser.user_id

        scan_job_config_map.setdefault('nscan', {})['keyworddb_options'] = {
            'is_disable_system_keywords': False,
            "is_disable_custom_keywords": False,
            "custom_id": custom_id,
            "system_keyword_types": [],
            "custom_keyword_types": [],
        }

    def _get_malscan_option(self, scan_job_config_map):
        mode = "deep" if self.is_deep_malscan else "fast"
        scan_job_config_map.setdefault('appscan', {}).setdefault('plugins', {})[
            'malscan'
        ] = {'mode': mode}


class VulScheduler(Scheduler):
    job_names = [ScanJobName.kscan.value]
    task_type = TaskType.vul.value
    allow_job_config_keys = (
        'crawler_depth',
        'crawler_max_page',
        'scan_profiles',
        # 'scan_custom_profiles'
    )

    def _init_filter_events(self):
        scan_filter_events = super(VulScheduler, self)._init_filter_events()
        scan_filter_events.extend(['vuln', 'statistics'])
        return scan_filter_events

    def _init_job_config_map(self):
        job_config_map = super(VulScheduler, self)._init_job_config_map()
        for job_name in self.job_names:
            s_profiles = []
            c_profiles = []
            for profile in job_config_map.setdefault(job_name, {}).get(
                'scan_profiles', []
            ):
                if isinstance(profile, int) or isinstance(profile, float):
                    s_profiles.append(profile)
                else:
                    c_profiles.append(profile)
            job_config_map[job_name]['scan_profiles'] = s_profiles
            job_config_map[job_name]['scan_custom_profiles'] = c_profiles

        return job_config_map


class RiskLinkScheduler(Scheduler):
    job_names = [ScanJobName.nscan.value]
    task_type = TaskType.risk_link.value
    allow_job_config_keys = ('crawler_depth', 'crawler_max_page')

    def _init_job_config_map(self):
        scan_job_config_map = super(RiskLinkScheduler, self)._init_job_config_map()

        scan_job_config_map['nscan'].update({'mode': 2, 'plugins': ['risk_link']})

        return scan_job_config_map


class ChangeCheckScheduler(Scheduler):
    job_names = [ScanJobName.nscan.value]
    task_type = TaskType.change_check.value
    allow_job_config_keys = (
        'crawler_depth',
        'crawler_max_page',
        'crawler_scope',
        'include_url',
    )

    def _init_job_config_map(self):
        scan_job_config_map = super(ChangeCheckScheduler, self)._init_job_config_map()
        scan_job_config_map['nscan'].update({'mode': 2, 'plugins': ['change_check']})

        config = self.job_config_map.get('change_check', {})
        if config.get('crawler_scope', 2) == 2:
            # 全量爬取
            for job_name in self.job_names:
                scan_job_config_map[job_name]['include_url'] = []

                exclude_url = config.pop('exclude_url', [])
                crawler_excluded_urls = self.flex_job_config_map.get(
                    'crawler_excluded_urls', []
                )
                crawler_excluded_urls.extend(exclude_url)
                if crawler_excluded_urls:
                    self.flex_job_config_map[
                        'crawler_excluded_urls'
                    ] = crawler_excluded_urls

        if config.get('crawler_scope') == 3:
            # 指定页面
            scan_job_config_map['nscan']['is_without_crawl'] = True

        data = {
            'automatic_update_baseline_interval': config.get(
                'automatic_update_baseline_interval'
            ),
            'is_automatic_update_baseline': bool(
                config.get('is_automatic_update_baseline')
            ),
            'is_update_baseline': False,
            'is_delete_baseline': False,
            'is_check_text': bool(config.get('is_check_text')),
            'is_check_structure': bool(config.get('is_check_structure')),
            'is_check_resource': bool(config.get('is_check_resource')),
            'text_change_ratio': float(config.get('text_change_ratio', 0)),
            'structure_change_ratio': float(config.get('structure_change_ratio', 0)),
            'resource_change_ratio': float(config.get('resource_change_ratio', 0)),
        }
        scan_job_config_map.setdefault('nscan', {}).setdefault('plugin_config', {})[
            'change_check'
        ] = data

        return scan_job_config_map


class ChangeCheckConfigScheduler(Scheduler):
    """更新、删除基准 - 单次任务"""

    job_names = [ScanJobName.nscan.value]
    task_type = EXTaskType.change_check_config.value
    priority = 3

    def __init__(
        self,
        user_id,
        target_id,
        target_url,
        base_task_id,
        is_update_baseline=False,
        is_delete_baseline=False,
        include_urls=None,
        exclude_urls=None,
        crawler_depth=0,
        crawler_max_page=0,
        crawler_scope=3,
    ):
        self.base_task_id = base_task_id
        self.is_update_baseline = is_update_baseline
        self.is_delete_baseline = is_delete_baseline
        self.include_urls = include_urls
        self.exclude_urls = exclude_urls
        self.crawler_depth = crawler_depth
        self.crawler_max_page = crawler_max_page
        self.crawler_scope = crawler_scope
        super(ChangeCheckConfigScheduler, self).__init__(
            target_id=target_id, target_url=target_url, user_id=user_id
        )

    def _init_task_config(self):
        return {'priority': self.priority, "is_periodic": False, "is_disabled": False}

    def _init_job_config_map(self):
        if self.is_update_baseline:
            if self.crawler_scope == 3:
                is_without_crawl = True
            else:
                is_without_crawl = False
            self.job_config_map = {
                "nscan": {
                    "plugins": ["change_check"],
                    "mode": 2,
                    "is_without_crawl": is_without_crawl,
                    "include_urls": self.include_urls,
                    "plugin_config": {
                        "change_check": {
                            "task_id": self.base_task_id,
                            "is_update_baseline": True,
                            "is_delete_baseline": False,
                        }
                    },
                    "crawler_depth": self.crawler_depth,
                    "crawler_max_page": self.crawler_max_page,
                    "crawler_excluded_urls": self.exclude_urls,
                }
            }
            return self.job_config_map
        elif self.is_delete_baseline:
            self.job_config_map = {
                "nscan": {
                    "plugins": ["change_check"],
                    "mode": 2,
                    "is_without_crawl": True,
                    "plugin_config": {
                        "change_check": {
                            "task_id": self.base_task_id,
                            "is_update_baseline": False,
                            "is_delete_baseline": True,
                        }
                    },
                }
            }
            return self.job_config_map


class BatchRemoveTask(SchedulerServer):
    key = 'task_id'

    def __init__(self, ids=None) -> None:
        self.ids = set(ids) if ids else set()

    def get_all_body(self):
        """获取request body

        Returns:
            [type]: [description]
        """
        task_id_groups = {"task_id_groups": []}

        for _id in self.ids:
            if not _id:
                continue
            task_id_groups['task_id_groups'].append({self.key: _id})

        return task_id_groups

    def batch_remove(self):
        if not self.ids:
            return
        self._action('/v1/tasks:batchRemove', self.get_all_body())


class BatchRemoveTaskSession(BatchRemoveTask):
    key = 'task_session_id'


class BatchStopTask(BatchRemoveTask):
    key = 'task_id'

    def batch_stop(self):
        if not self.ids:
            return
        self._action('/v1/tasks:batchStop', self.get_all_body())


class BatchStopTaskSession(BatchStopTask):
    key = 'task_session_id'


class BatchDisableTask(BatchRemoveTask):
    key = 'task_id'

    def batch_disable(self):
        if not self.ids:
            return
        self._action('/v1/tasks:batchDisable', self.get_all_body())


class BatchCreateTask(Scheduler):
    request_url = '/v1/tasks:batchCreate'

    def __init__(self, scheduler_list: list):
        self.scheduler_list = scheduler_list
        super(BatchCreateTask, self).__init__()

    def batch_create(self):
        params = []

        for scheduler in self.scheduler_list:
            params.append(scheduler.get_all_body())

        return self._action(self.request_url, {'custom_tasks': params})


class BatchModifyTask(BatchCreateTask):
    request_url = '/v1/tasks:batchModify'


class BatchRescanTask(ScannerRequest):
    request_url = '/v1/tasks:batchRescan'

    def __init__(self, task_id_list, is_force=False):
        self.task_id_list = task_id_list
        self.is_force = is_force
        super(BatchRescanTask, self).__init__()

    def send(self):
        rescan_requests = []
        for task_id in self.task_id_list:
            rescan_requests.append({'task_id': task_id, 'is_force': self.is_force})
        self.post(self.request_url, {'rescan_requests': rescan_requests})


class GetPagesUrlList(object):
    request_url = "/v1/pages/url:list"

    def __init__(self, task_id=None, task_session_id=None):
        self.task_id = task_id
        self.task_session_id = task_session_id

    def get(self):
        request_args = {}
        if self.task_id:
            request_args['task_id'] = self.task_id
        if self.task_session_id:
            request_args['task_session_id'] = self.task_session_id

        data = ScannerRequest().get(self.request_url, request_args)

        return data


class VulVerificationScheduler(VulScheduler):
    task_type = EXTaskType.vul_verification_task.value
    priority = 3

    def __init__(self, event, task_session):
        self.target_id = None
        self.user_id = None
        self.event = event
        self.task_session = task_session
        super(VulVerificationScheduler, self).__init__(
            target_id=task_session.get('target_id'),
            user_id=task_session.get('user_id'),
            target_url=event.get('detail', {}).get('affect') or event.get('target_url'),
            job_config_map=task_session.get('job_config_map', {}),
            flex_job_config_map=task_session.get('flex_job_config_map', {}),
        )

    def _init_job_config_map(self):
        detail = self.event.get('detail', {})
        plugin_name = detail.get('plugin_name', []) or detail.get('key', '')
        if plugin_name and isinstance(plugin_name, str):
            plugin_name = [plugin_name]
        elif plugin_name and isinstance(plugin_name, list):
            plugin_name = plugin_name
        else:
            plugin_name = []
        self.job_config_map.setdefault('kscan', {}).update(
            {
                'plugins': plugin_name,
                'crawler_depth': 1,
            }
        )
        return self.job_config_map

    def _init_addition(self):
        addition = super(VulVerificationScheduler, self)._init_addition()
        addition.update(
            {
                'event_id': self.event.get('event_id'),
                'target_id': self.target_id,
                'user_id': self.user_id,
            }
        )

        return addition

    def _init_task_config(self):
        return {'priority': self.priority}

    def _init_flex_job_config_map(self):
        return {job_name: self.flex_job_config_map for job_name in self.job_names}


SCHEDULE_TUPLE = (
    AvailabilityScheduler,
    VulScheduler,
    SecurityEventScheduler,
    TargetInfoScheduler,
    RiskLinkScheduler,
    ChangeCheckScheduler,
)

SCHEDULE_MAP = {schedule.task_type: schedule for schedule in SCHEDULE_TUPLE}
