import json
import requests
import arrow
import logging
import traceback

from typing import Tuple

from app.config.settings import SCANNER_SERVER
from app.errors import ScannerAPICreateTaskFail, ScannerAPIException, ScannerAPINotOk, ServerError
from app.libs import enums
from app.libs.json_helper import DateTimeEncoder
from app.libs.utility import trans_textbox_to_dict, random_one_area
from app.config.settings import AVAILABLE_JOB_QUEUE, HOSTVUL_JOB_QUEUE, API_USER_LIST
from app.libs.enums import (
    VulType,
    ScanJobName,
    TaskType,
    NetWorkType,
    NotificationTarget,
    ExTaskType,
    EX_TASK_TYPES
)

logger = logging.getLogger('app')


class ScannerRequest(object):
    def ajax(self, **kwargs):
        kwargs['url'] = SCANNER_SERVER + kwargs['url']
        info = {'label': 'scanner'}
        info.update(kwargs)
        logger.info(info)

        try:
            if data := kwargs.get('data'):
                kwargs['data'] = json.dumps(data, cls=DateTimeEncoder)
            resp = requests.request(**kwargs)
            # or后面的条件表示任务在调度丢失, 需要重建, 不属于错误
            if resp.ok or (resp.status_code == 404 and resp.json().get('code') == 5):
                logger.info({
                    'label': 'scanner',
                    'code': resp.status_code,
                    'resp': resp.json()
                })
                return resp.json()
            else:
                logger.error({
                    'label': 'scanner',
                    'code': resp.status_code,
                    'resp': resp.json()
                })
                raise ScannerAPINotOk()

        except Exception as e:
            logger.exception(e)
            raise ScannerAPIException()

    def get(self, url: str, params: dict = None):
        return self.ajax(method='GET', url=url, params=params)

    def post(self, url: str, body: dict):
        return self.ajax(method='POST', url=url, data=body)


class SchedulerServer:
    server = SCANNER_SERVER

    def _action(self, uri, body):
        scanner_request = ScannerRequest()
        return scanner_request.post(uri, body)


class Scheduler(SchedulerServer):
    server = SCANNER_SERVER
    job_names = []
    priority = 1
    task_type = ''

    def __init__(
            self,
            task_id='',
            target_url='',
            origin_ip='',
            filter_events=None,
            task_config=None,
            job_config_map=None,
            flex_job_config_map=None,
            target_id='',
            user_id='',
            addition=None,
    ):
        self.task_id = task_id
        self.target_url = target_url
        self.origin_ip = origin_ip
        self.filter_events = filter_events if filter_events else []
        self.task_config = task_config if task_config else {}
        self.job_config_map = job_config_map if job_config_map else {}
        self.flex_job_config_map = flex_job_config_map if flex_job_config_map else {}
        self.target_id = target_id
        self.user_id = user_id
        self.addition = addition if addition else {}
        self.job_names = [job_name for job_name in self.job_names]

    def get_task_id_body(self):
        """获取task_id的字典

        Returns:
            [type]: [description]
        """
        return {
            "task_id": self.task_id
        }

    def get_task_id_with_force_body(self):
        """获取task_id的字典, 带有is_force参数

        Returns:
            [type]: [description]
        """
        return {
            "task_id": self.task_id,
            "is_force": True
        }

    def get_all_body(self):
        """获取request body

        Returns:
            [type]: [description]
        """
        return {
            "task_id": self.task_id,
            "target_url": self.target_url,
            "origin_ip": self.origin_ip,
            "job_names": self.job_names,
            "filter_events": self.filter_events,
            "task_config": self.task_config,
            "job_config_map": self.job_config_map,
            "flex_job_config_map": self.flex_job_config_map,
            "target_id": self.target_id,
            "user_id": self.user_id,
            "addition": self.addition,
        }

    def init_from_setting(self, setting, **kwargs):
        self._init_task_id(setting, **kwargs)
        self._init_origin_ip(setting, **kwargs)
        self._init_target_url(setting, **kwargs)
        self._init_addition(setting, **kwargs)
        self._init_task_config(setting, **kwargs)
        self._init_filter_events(setting, **kwargs)
        self._init_job_config_map(setting, **kwargs)
        self._init_flex_job_config_map(setting, **kwargs)
        self._job_config_map_set_crawler_urls(**kwargs)
        self._job_config_map_set_prelogin(**kwargs)
        # 所有任务kscan设置request_cookie参数
        # self._job_config_map_set_request_cookie(**kwargs)

        return self

    # 所有任务scan设置request_cookie参数
    def _job_config_map_set_request_cookie(self, **kwargs):
        scan_types = ["nscan", "kscan"]
        request_cookie = kwargs.pop("cookie", "")
        for scan_type in scan_types:
            if self.job_config_map.get(scan_type):
                self.job_config_map[scan_type].update({'request_cookie': request_cookie})

    def _job_config_map_set_crawler_urls(self, **kwargs):
        scan_types = ["nscan", "kscan"]
        custom_monitor_urls = kwargs.pop("custom_monitor_urls", {})
        crawler_excluded_urls = custom_monitor_urls.get("excludeUrl", [])
        crawler_included_urls = custom_monitor_urls.get("includeUrl", [])
        for scan_type in scan_types:
            if self.job_config_map.get(scan_type):
                # 篡改监测任务不适用crawler_excluded_urls和crawler_included_urls
                if self.job_config_map[scan_type].get("plugins") == ["change_check"]:
                    continue
                self.job_config_map[scan_type].update({
                    'crawler_excluded_urls': crawler_excluded_urls,
                    'crawler_included_urls': ["".join([self.target_url, item]) for item in crawler_included_urls]
                })

    def _job_config_map_set_prelogin(self, **kwargs):
        exclude_keys_tuple = ('site_login', 'login_method')

        prelogin = kwargs.get('prelogin', {})
        prelogin = prelogin.to_dict() if hasattr(prelogin, 'to_dict') else prelogin

        d0 = {k: v for k, v in prelogin.items() if k not in exclude_keys_tuple}

        if prelogin.get('site_login', False):
            login_method = prelogin.get('login_method')
            func = getattr(self, f'_get_prelogin_{login_method}')
            d0.update(func())

        if (request_http_headers := d0.get('request_http_headers')) is not None:
            d0['request_http_headers'] = trans_textbox_to_dict(request_http_headers)

        for k in self.job_config_map.keys():
            if k != ScanJobName.site_info.value:
                self.job_config_map.setdefault(k, {}).update(d0)

    @staticmethod
    def _get_prelogin_form():
        return {
            'is_login_form_enabled': True,
            'is_login_json_enabled': False,
            'request_cookie': None,
            'request_http_headers': None
        }

    @staticmethod
    def _get_prelogin_json():
        return {
            'is_login_form_enabled': False,
            'is_login_json_enabled': True,
            'request_cookie': None,
            'request_http_headers': None
        }

    @staticmethod
    def _get_prelogin_cookie():
        return {
            'is_login_form_enabled': False,
            'is_login_json_enabled': False,
            'request_http_headers': None
        }

    @staticmethod
    def _get_prelogin_headers():
        return {
            'is_login_form_enabled': False,
            'is_login_json_enabled': False,
            'request_cookie': None,
        }

    def _init_task_id(self, setting, **kwargs):
        if self.task_type in EX_TASK_TYPES:
            return
        if task_id := kwargs.pop('task_id', ''):
            self.task_id = task_id
        elif 'task_id' in setting._fields:
            self.task_id = setting.taskId
        else:
            pass

    def _init_origin_ip(self, setting, **kwargs):
        self.origin_ip = kwargs.pop('origin_ip', '')

    def _init_target_url(self, setting, **kwargs):
        if target_url := kwargs.pop('target_url', ''):
            self.target_url = target_url
        elif 'target' in setting._fields:
            self.target_url = setting.target
        else:
            pass

    def _init_filter_events(self, setting, **kwargs):
        pass

    def _init_task_config(self, setting, **kwargs):
        immediate_exec = kwargs.pop('immediate_exec', False)
        if immediate_exec and not self.task_config.get("start_at"):
            self.task_config['start_at'] = arrow.utcnow().for_json()
        is_periodic = kwargs.pop('is_periodic', True)
        self.task_config['is_periodic'] = is_periodic
        self.task_config['is_disabled'] = kwargs.pop('is_disabled', False)
        self.task_config['priority'] = self.task_config['priority'] = min(self.priority if self.user_id in API_USER_LIST else self.priority + 1, 3)
        self.task_config['is_enable_crawling_task'] = True
        self.task_config['is_enable_concurrency_limit'] = kwargs.get('is_enable_concurrency_limit', False)
        self.task_config['task_concurrency'] = kwargs.get('task_concurrency', 0)
        if kwargs.get("enable_proxy_ip", None) is not None:
            self.task_config["is_enable_proxy"] = kwargs.pop("enable_proxy_ip", False)

    def _init_job_config_map(self, setting, **kwargs):
        pass

    def _init_flex_job_config_map(self, setting, **kwargs):
        pass

    def _init_addition(self, setting, **kwargs):
        addition = kwargs.pop('addition', {})
        addition.update({
            # 'uid': get_jwt_identity(),
            # 'is_sync': "0",
            'notificationTarget': NotificationTarget.SCANV.value,
            'taskType': self.task_type
        })
        self.addition = addition

    def create(self):
        """创建任务

        Raises:
            ScannerAPICreateTaskFail: [description]
        """
        resp = self._action('/v1/tasks:create', self.get_all_body())
        if task_id := resp.get('task_id'):
            self.task_id = task_id
        else:
            raise ScannerAPICreateTaskFail()

    def modify(self, insert=True):
        """修改任务, 必须要有task_id,才能修改

        Raises:
            ScannerAPICreateTaskFail: [description]
        """
        resp = self._action('/v1/tasks:modify', self.get_all_body())
        if resp.get('code') == 5 and insert:
            resp = self._action('/v1/tasks:create', self.get_all_body())
        if task_id := resp.get('task_id'):
            self.task_id = task_id
        else:
            raise ScannerAPICreateTaskFail()

    def auto_schedule(self, insert=True):
        """根据是否有task_id, 判断应该执行modify还是create
        """
        if self.task_id:
            self.modify(insert=insert)
        else:
            self.create()

    def remove(self):
        if not self.task_id:
            raise ServerError(msg="task id not exist.")
        self._action('/v1/tasks:remove', self.get_task_id_body())
        self.task_id = ''

    def rescan(self, is_force=False):
        if not self.task_id:
            raise ServerError(msg="task id not exist.")
        body = self.get_task_id_with_force_body() if is_force else self.get_task_id_body()
        self._action('/v1/tasks:rescan', body)


class HttpScheduler(Scheduler):
    job_names = [ScanJobName.site_info.value]
    probe_type = 'HTTP'
    priority = 3
    task_type = 'http'

    def _init_task_config(self, setting, **kwargs):
        if 'interval' in setting._fields:
            self.task_config['seconds'] = setting.interval / 1000
        self.task_config['job_queue'] = AVAILABLE_JOB_QUEUE
        return super()._init_task_config(setting, **kwargs)

    def _init_job_config_map(self, setting, **kwargs):
        header = setting.collect.header or {}
        area, ipv4_area, ipv6_area, network_type = self._get_merged_area(setting)
        if not header or "" in header.keys():
            header = None
        http_args = {
            'area': area,
            'method': setting.collect.method,
            'header': header,
            'payload': setting.collect.payload,
            'redirect': setting.collect.redirect,
            'is_disabled_redirect': False
        }
        if setting.collect.redirect == 0:
            http_args["is_disabled_redirect"] = True
        self.job_config_map = {
            "site_info": {
                "runtime_limit": 360,
                "modules": ["cloud"],
                "cloud": {
                    'min_nodes': len(ipv4_area),
                    'min_nodes_v6': len(ipv6_area),
                    "probe_types": [self.probe_type],
                    "sever_ip": setting.sourceIp,
                    'network_type': network_type,
                    "http_args": http_args
                }
            },
        }
        if include := setting.alert.include:
            self.job_config_map[ScanJobName.site_info.value]['cloud']['args'].update({
                "inner_type": 'keyword',
                "include": include,
                'logic': 'and'
            })

    def _get_network_type(self, setting):
        v4_node_list = setting.collect.area
        v6_node_list = setting.collect.area_ipv6

        if v4_node_list and not v6_node_list:
            return NetWorkType.v4.value
        if not v4_node_list and v6_node_list:
            return NetWorkType.v6.value
        return NetWorkType.v4v6.value

    @staticmethod
    def _get_merged_area(setting) -> Tuple[list, list, list, int]:
        area = []
        network_type = 0
        if area_ipv4 := setting.collect.area:
            area.extend(area_ipv4)
            network_type += NetWorkType.v4.value
        if area_ipv6 := setting.collect.area_ipv6:
            area.extend(area_ipv6)
            network_type += NetWorkType.v6.value
        if not network_type:
            network_type = NetWorkType.v4.value

        return list(set(area)), list(set(area_ipv4)), list(set(area_ipv6)), network_type


class PingScheduler(HttpScheduler):
    probe_type = 'Ping'
    task_type = 'ping'

    def _init_job_config_map(self, setting, **kwargs):
        area, ipv4_area, ipv6_area, network_type = self._get_merged_area(setting)
        self.job_config_map = {
            "site_info": {
                "runtime_limit": 360,
                "modules": ["cloud"],
                "cloud": {
                    'min_nodes': len(ipv4_area),
                    'min_nodes_v6': len(ipv6_area),
                    "probe_types": [self.probe_type],
                    "sever_ip": setting.sourceIp,
                    'network_type': network_type,
                    "ping_args": {
                        'area': area
                    }
                }
            }
        }


class SecurityEventScheduler(Scheduler):
    job_names = [ScanJobName.site_info.value, ScanJobName.nscan.value]
    task_type = 'securityEvent'

    def _web_security_site_info(self):
        area = random_one_area(self.target_url, self.origin_ip)
        site_info = {
            "modules": ["wafdetector", "http_get_sequence", "detail_ping", "cloud", "tracepath"],
            "wafdetector": {"mode": "normal"},
            "cloud": {
                "probe_types": ["http", "ping"],
                "http_args": {"area": [area, ]},
                "ping_args": {"area": [area, ]},
            },
            "detail_ping": {
                "count": 5,
                "is_with_traffics": True
            },
            "tracepath": {
                "count": 20,
                "is_with_traffics": True
            },
            "http_get_sequence": {
                "count": 5,
                "interval": 15,
                "max_content_size": 5000,
                "is_with_traffics": True,
                "network_type": 1
            },
        }
        return site_info

    def _init_task_config(self, setting, **kwargs):
        if 'interval' in setting._fields:
            self.task_config['seconds'] = setting.interval / 1000
        if start_at := self.addition.get("taskSettings", {}).get("startAt", ""):
            self.task_config['start_at'] = start_at
        elif (start_time := self.addition.get("taskSettings", {}).get("periodStartTime", "")) and \
                (end_time := self.addition.get("taskSettings", {}).get("periodEndTime", "")):
            self.task_config["scan_period_start_time"] = start_time
            self.task_config["scan_period_end_time"] = end_time
        return super()._init_task_config(setting, **kwargs)

    def _init_job_config_map(self, setting, **kwargs):
        origin_plugins = setting.collect.plugins

        nscan_plugins = set()
        for plugin in origin_plugins:
            if plugin == "black_links":
                nscan_plugins.add("black_links")
            elif plugin == "cryjack":
                nscan_plugins.add("cryptojacking")
            elif "foreign_links" in plugin:
                nscan_plugins.add("risk_link")
            elif plugin == "malscan":
                nscan_plugins.add("malscan")
            elif plugin == "broken_links":
                nscan_plugins.add("broken_link")
        self.job_config_map = {
            "site_info": self._web_security_site_info()
        }

        if nscan_plugins:
            nscan_cfg = {
                    "crawler_depth": setting.collect.depth,
                    "crawler_max_page": setting.collect.maxPage,
                    "is_with_urldb": True,
                    'mode': 2,
                    'plugins': list(nscan_plugins),
                    'is_without_crawl': False
                }
            if "cryptojacking" in nscan_plugins:
                nscan_cfg.update({
                    "plugin_config": {"cryptojacking": {"level": 2}}
                })
            self.job_config_map.update({
                "nscan": nscan_cfg
            })


class ContentScheduler(SecurityEventScheduler):
    job_names = [ScanJobName.nscan.value, ScanJobName.site_info.value]
    task_type = 'content'
    priority = 1

    def _init_job_config_map(self, setting, **kwargs):
        plugins = setting.collect.plugins
        _plugins = plugins.copy()
        _plugins.append('statistics')

        # 插件的附加配置
        plugins_ = {}
        if setting.collect.privacyDisclosureTypes:
            plugins_.update({
                "privacy_disclosure": {
                    "types": setting.collect.privacyDisclosureTypes
                }
            })
        self.job_config_map = {
            "nscan": {
                "crawler_depth": setting.collect.depth,
                "crawler_max_page": setting.collect.maxPage,
                "is_with_urldb": True,
                'plugin_config': plugins_,
                "crawler_scope": 0,
                "plugins": _plugins,
                'mode': 2,
                "keyworddb_options": {
                    "is_disable_system_keywords": False,
                    "is_disable_custom_keywords": False,
                    "custom_id": setting.collect.customId,
                    "system_keyword_types": setting.collect.systemKeywordTypes,
                    "custom_keyword_types": setting.collect.customKeywordTypes
                },
                # 'foreign_depth_limit': 1
            },
            "site_info": self._web_security_site_info()
        }

        if include_url := setting.collect.includeUrl:
            self.job_config_map[ScanJobName.nscan.value]['include_url'] = include_url
            self.job_config_map[ScanJobName.nscan.value]['is_include_url_only'] = True


class VulScheduler(SecurityEventScheduler):
    job_names = [ScanJobName.kscan.value, ScanJobName.site_info.value]
    task_type = 'vul'

    def _init_filter_events(self, setting, **kwargs):
        self.filter_events = ['vuln', 'statistics', 'site_info']

    def _init_job_config_map(self, setting, **kwargs):
        self.job_config_map = {
            "site_info": self._web_security_site_info(),
            "kscan": {}
        }
        #  网站画像, 会传入指定的url
        if setting.enableSitePortraitTriggerMonitor:
            self.job_config_map["kscan"].update({
                "include_url": setting.collect.includeUrl,
                "is_without_crawl": True
                # "is_include_url_only": True
            })
            # return

        elif setting.collect.type == VulType.full.value:
            self.job_config_map["kscan"].update({
                "crawler_max_page": setting.collect.maxPage,
                "crawler_depth": setting.collect.depth,
                "is_with_urldb": True,
            })
        # 增量扫描
        elif setting.collect.type == VulType.increment.value:
            self.job_config_map["kscan"].update({
                "crawler_max_page": 1000,
                "crawler_depth": 3,
                "is_with_urldb": True,
                "is_write_incremental_cache": True,
                "is_filter_incremental_cache": True,
                "incremental_cache_id": kwargs.pop('job_id', '')
            })
        if speed := setting.collect.speed:
            self.job_config_map["kscan"].update({"scan_profile": enums.ScanProfile[speed].value})

        plugins = []
        # 自定义插件
        if setting.vulType == 'plugIn':
            plugins = setting.collect.vul or []
            if 'ssl_check' in plugins:
                plugins.remove('ssl_check')
        # 自定义漏洞模板类型
        else:
            system_profiles = []
            custom_profiles = []
            all_profiles = setting.collect.vul or []
            for profile in all_profiles:
                if profile.isdigit():
                    system_profiles.append(int(profile))
                else:
                    custom_profiles.append(profile)
            self.job_config_map["kscan"].update({"scan_profiles": system_profiles})
            self.job_config_map["kscan"].update({"scan_custom_profiles": custom_profiles})
        self.job_config_map[ScanJobName.kscan.value].update({'plugins': plugins, "disabled_plugins": ['ssl_check']})


class SpecialVulScheduler(VulScheduler):
    def _init_job_config_map(self, setting, **kwargs):
        self.job_config_map = {
            "kscan": {
                # "scan_profile": enums.ScanProfile['medium'].value,
                "crawler_max_page": 1000,
                "crawler_depth": 3,
                "is_with_urldb": True,
                'plugins': setting.collect.vul
            }
        }


class SslScheduler(SecurityEventScheduler):
    job_names = [ScanJobName.nscan.value, ScanJobName.site_info.value]
    task_type = 'ssl'

    def _init_job_config_map(self, setting, **kwargs):
        self.job_config_map = {
            "site_info": self._web_security_site_info(),
            "nscan": {
                "plugins": ["ssl_check", ],
                "plugin_config": {
                    "ssl_check": {
                        "check_port": 443
                    }
                }
            }
        }


class ChangeCheckScheduler(SecurityEventScheduler):
    """
    下发周期篡改监测任务
    """
    job_names = [ScanJobName.nscan.value, ScanJobName.site_info.value]
    task_type = TaskType.change_check.value

    def _init_job_config_map(self, setting, **kwargs):
        self.job_config_map = {
            "site_info": self._web_security_site_info(),
        }
        collect = setting.collect
        nscan_config = {
            "plugins": [
                "change_check"
            ],
            "mode": 2,
            "crawler_max_page": collect.maxPage,
            "crawler_depth": collect.depth,
            "plugin_config": {}
        }
        change_check = {
            "is_automatic_update_baseline": collect.enableBaseChange,
            "is_check_text": "text" in collect.changeType,
            "is_check_structure": "structure" in collect.changeType,
            "is_check_resource": "resource" in collect.changeType
        }
        if collect.enableBaseChange:
            change_check.update({
                "automatic_update_baseline_interval": collect.baseChangeCount
            })
        if collect.keywords:
            change_check.update({
                "words": collect.keywords
            })
        if collect.resources:
            change_check.update({
                "focus_resource_urls": collect.resources
            })
        nscan_config["plugin_config"].update({"change_check": change_check})
        if (collect.type == "part") and (include_url := setting.collect.includeUrl):
            nscan_config['include_url'] = include_url
            nscan_config['crawler_included_urls'] = []
            nscan_config['is_without_crawl'] = True
        self.job_config_map[ScanJobName.nscan.value] = nscan_config


class UpdateChangeCheckBaseScheduler(SecurityEventScheduler):
    """
    手动更新样本任务
    """
    job_names = [ScanJobName.nscan.value, ]
    priority = 3
    task_type = ExTaskType.change_check_config.value

    def _init_job_config_map(self, setting, **kwargs):
        collect = setting.collect
        nscan_config = {
            "plugins": [
                "change_check"
            ],
            "mode": 2,
            "crawler_max_page": collect.maxPage,
            "crawler_depth": collect.depth,
            "plugin_config": {
                "change_check": {
                    "task_id": setting.taskId,
                    "is_update_baseline": kwargs.get("is_update_baseline", False),
                    "is_delete_baseline": kwargs.get("is_delete_baseline", False),
                }
            }
        }
        if (collect.type == "part") and (include_url := collect.includeUrl):
            nscan_config['include_url'] = include_url
            nscan_config['crawler_included_urls'] = []
            nscan_config['is_without_crawl'] = True
        self.job_config_map = {"nscan": nscan_config}


class HostVulScheduler(SecurityEventScheduler):
    job_names = [ScanJobName.pinpin.value]
    task_type = 'hostVul'

    def _init_task_config(self, setting, **kwargs):
        self.task_config['job_queue'] = HOSTVUL_JOB_QUEUE
        return super()._init_task_config(setting, **kwargs)

    def _init_job_config_map(self, setting, **kwargs):
        self.job_config_map = {
            "network_scan": {
                "timeout": setting.timeout,
                "network_scan_profile": setting.network_scan_profile,
                "is_detect_os_enabled": setting.is_detect_os_enabled,
                "is_tcp_enabled": setting.is_tcp_enabled,
                "is_udp_enabled": setting.is_udp_enabled,
            }
        }


class IPv6Scheduler(HttpScheduler):
    job_names = [ScanJobName.site_info.value]
    task_type = TaskType.ipv6.value

    def _init_job_config_map(self, settings, **kwargs):
        self.job_config_map = {
            'site_info': {
                'modules': ['dns', 'http_get_full_time', 'tcp_time', 'http_content_check',
                            'http_structure_check', 'ipv6_dns_check', 'http_get_sequence',
                            'ipv6_website_support_check'],
                'dns': {"types": ["A", "AAAA"]},
                'http_get_full_time': {"network_type": 3},
                'tcp_time': {'network_type': 3},
                'http_get_sequence': {'network_type': 3},
                'runtime_limit': 86400,
                'ipv6_website_support_check': {'max_urls': 5000},
            }
        }


class AssetScheduler(SslScheduler):
    job_names = [ScanJobName.site_info.value, ]
    task_type = 'asset'

    def _init_job_config_map(self, setting, **kwargs):
        self.job_config_map = {
            "site_info": {
                "modules": ['alexa', 'basic_info', 'http_method', 'http_get', 'icp', 'port', 'subdomain', 'wafdetector'],
                "subdomain": {
                    "is_enable_alive": False
                },
                "port": {
                    "scan_all_ports": False
                },
                "wafdetector": {"mode": "normal"},
            }
        }


class BatchRemoveTask(SchedulerServer):
    key = 'task_id'

    def __init__(self, ids=None) -> None:
        self.ids = set(ids) if ids else set()

    def get_all_body(self):
        """获取request body

        Returns:
            [type]: [description]
        """
        task_id_groups = {
            "task_id_groups": [
            ]
        }

        for _id in self.ids:
            if not _id:
                continue
            task_id_groups['task_id_groups'].append(
                {
                    self.key: _id
                }
            )

        return task_id_groups

    def batch_remove(self):
        if not self.ids:
            return
        self._action('/v1/tasks:batchRemove', self.get_all_body())


class BatchRemoveTaskSession(BatchRemoveTask):
    key = 'task_session_id'


class BatchStopTask(BatchRemoveTask):
    key = 'task_id'

    def batch_stop(self):
        if not self.ids:
            return
        self._action('/v1/tasks:batchStop', self.get_all_body())


class BatchStopTaskSession(BatchStopTask):
    key = 'task_session_id'


class GetAllProxyIPs(object):

    @staticmethod
    def get():
        data = {"ips": [], "count": 0}
        try:
            resp = ScannerRequest().get(url="/v1/proxy/ips:list")
            data.update({"ips": resp.get("ips") or [], "count": resp.get("count") or 0})
        except Exception:
            traceback.format_exc()
        return data
