# -*- coding: utf-8 -*-
# @Organization  : asiainfo
# @Author        : 周伟东
# @Time          : 2021/1/7 16:39
# @Function      : 资产探测模块，资产探测模块,使用一个接口
import json
import logging
from concurrent.futures.thread import ThreadPoolExecutor
from app.constants import TASK_THREAD_NUMS, IP_STATE_DOWN, IP_STATE_UP, ASSET_JOB_COMPLETE_EXPLORE_IPS_WEIGHT, \
    ASSET_JOB_COMPLETE_EXPLORE_SERVICE_WEIGHT, ASSET_JOB_COMPLETE_WRITE_RESULT_TO_TABLE_WEIGHT, \
    ASSET_JOB_COMPLETE_WRITE_RESULT_TO_KAFKA_WEIGHT, ASSET_JOB_COMPLETE_WRITE_JOB_TO_TABLE_WEIGHT
from app.core.asset_so_caller import assetSoCaller
from app.core.kafka_producer import kafkaProducer
from app.dao.job_dao import jobDao
from app.models import CyberspaceAssetResult, CyberspaceAssetService
from app.utils.ip_helper import ips_split
from app.utils.port_helper import ports_split


class JobAssetService:
    """
    资产探测任务，单例
    """

    def __init__(self):
        # 初始化一个线程池
        self.threadPool = ThreadPoolExecutor(max_workers=TASK_THREAD_NUMS, thread_name_prefix="job_")

    def process(self, job):
        data = json.loads(job.job_params)
        ips = data['ips']
        ports = data['ports']
        ip_list = ips_split(ips)
        port_list = ports_split(ports)

        job_id = job.job_id

        task_complete_info = {"task_nums": 0,
                              "complete_task_nums": 0,
                              "complete_percent": 0,
                              "complete_weight": 0,
                              "expect_achieve_percent": 0
                              }

        try:
            # 更新任务的状态为进行中
            jobDao.update_job_status_start(job)
            logging.info("开始检测任务:{job_id}，任务参数为：{job_param}".format(job_id=job_id, job_param=job.job_params))

            ex_result_dict = {}
            # step1: 检验是否存活,如果存活获取os的消息
            logging.info("开始检测ip是否存活，存活的检测操作系统信息")
            self.submit_ips_explore_task(ip_list, task_complete_info, job, ex_result_dict)

            # step2: 获取端口服务信息
            logging.info("开始探测服务")
            self.submit_service_explore_task(ip_list, port_list, task_complete_info, job, ex_result_dict)

            # step3 存储结果数据
            """
            1.对比当前结果表中的数据，如果内容不一致，需要将结果表中的记录转移到插入到历史库中,并且将当前结果插入,如果内容一致，
            更改一下日期就可以了
            2.将当前的结果发往kafka
            """
            # 初始化完成信息，该阶段总任务数以及占比
            result_list = []
            kafka_message_list = []
            for result in ex_result_dict.values():
                result_list.append(result)
                kafka_message_list.append(self.result2dict(result))

            # 将结果批量插入到结果库中
            self.init_task_complete_info(task_complete_info, len(ex_result_dict.values()),
                                         ASSET_JOB_COMPLETE_WRITE_RESULT_TO_TABLE_WEIGHT)
            jobDao.insert_bulk(result_list)
            logging.info("结果已经写入了结果表中")
            # 更新进度
            self.update_complete_percent(job, task_complete_info)

            # step4 将当前结果放入到kafka中
            self.init_task_complete_info(task_complete_info, len(ex_result_dict.values()),
                                         ASSET_JOB_COMPLETE_WRITE_RESULT_TO_KAFKA_WEIGHT)
            kafkaProducer.send_result_message(json.dumps(kafka_message_list))
            kafkaProducer.flush()
            logging.info("结果已经写入了kafka中")
            # 更新进度
            self.update_complete_percent(job, task_complete_info)

            # self.init_task_complete_info(task_complete_info, len(ex_result_dict.values()), 30)
            # for result in ex_result_dict.values():
            #     # 将当前结果放入到kafka中
            #     message = json.dumps(self.result2dict(result))
            #     history = jobDao.find_result(result.ip)
            #     # 比较有没有变化
            #     if self.compare_result(result, history):
            #         # 没有变化，直接更改插入的日期
            #         jobDao.save_or_update_result(result)
            #         pass
            #     else:
            #         jobDao.save_and_move_result_to_history(result, history)
            #     # 将当前结果放入到kafka中
            #     kafkaProducer.send_result_message(message)
            #     # 更新进度
            #     self.update_complete_percent(job, task_complete_info)
            # kafkaProducer.flush()
            # logging.info("结果写入了kafka中")

            # step5 更新任务的状态为成功以及任务统计结果
            self.init_task_complete_info(task_complete_info, 1,
                                         ASSET_JOB_COMPLETE_WRITE_JOB_TO_TABLE_WEIGHT)
            jobDao.update_job_status_success(job)
            self.update_complete_percent(job, task_complete_info)
            logging.info("任务解析成功")
        except Exception as e:
            logging.error("任务提交失败", e)
            # 更新任务的状态为成功失败
            jobDao.update_job_status_failed(job.job_id)

    def result2dict(self, result):
        """
        desc: 将结果转为字典
        """

        service_list = []
        for service in result.service2result:
            service_list.append(self.service2dict(service))
        return {
            'ip': result.ip,
            'hostname': result.hostname,
            'mac': result.mac,
            'mac_vender': result.mac_vender,
            'os_version': result.os_version,
            'device_type': result.device_type,
            'port_nums': result.port_nums,
            'os_info': json.loads(result.os_info),
            'server_info': service_list,
            'job_id': result.job_id,
            'job_name': result.job_name

        }

    def service2dict(self, service):
        """
        desc: 将service转为字典
        """
        return {
            'port': service.port,
            'protocol': service.protocol,
            'state': service.state,
            'service': service.service,
            'version': service.version
        }

    def compare_result(self, result, history):
        """
        desc: 比较当前结果与库中的结果
        param: result 当前扫描的结果
        param: history 数据库中存的结果
        return: True OR false
        """
        if history:
            # 1. 比较port_nums
            if result.port_nums != history.port_nums:
                return False
            # 2. 比较每个service
            if result.port_nums != history.port_nums:
                return False

            if result.port_nums != 0:
                result_server_list = json.loads(result.server_info)
                history_server_list = json.loads(history.server_info)
                server_set = set()
                for server in result_server_list:
                    server_set.add(server["port"] + "|" +
                                   server["state"] + "|" + server["service"] + server["version"])
                for history_server in history_server_list:
                    server_id = history_server["port"] + "|" + history_server["state"] + "|" + history_server[
                        "service"] + history_server["version"]
                    if server_id not in server_set:
                        return False
        return True

    @staticmethod
    def wrap_result(result):
        """
        desc: 解析主机信息 os_info、alive_info，并且丰富result字段
        param: result 结果对象
        return:
        """
        if result.os_info:
            os_info = json.loads(result.os_info)
            if "hostName" in os_info.keys():
                result.hostname = os_info["hostName"]
            else:
                result.hostname = ""

            if "osVersion" in os_info.keys():
                result.os_version = os_info["osVersion"]
            else:
                result.os_version = ""

            if "deviceType" in os_info.keys():
                result.device_type = os_info["deviceType"]
            else:
                result.device_type = ""

            if "mac" in os_info.keys():
                result.mac = os_info["mac"]
            else:
                result.mac = ""

            if "macVendor" in os_info.keys():
                result.mac_vender = os_info["macVendor"]
            else:
                result.mac_vender = ""

    def init_task_complete_info(self, task_complete_info, task_nums, complete_weight):
        task_complete_info["complete_task_nums"] = 0
        task_complete_info["task_nums"] = task_nums
        task_complete_info["complete_weight"] = complete_weight
        task_complete_info["expect_achieve_percent"] = task_complete_info.get("complete_percent", 0) + complete_weight

    def update_complete_percent(self, job, task_complete_info):
        """
        desc: 更新完成进度
        param: job 任务信息
        param: task_complete_info 封装了任务完成的信息
        return:
        """
        # 计算出当前的进度，并且更新数据库中的进度
        task_complete_info["complete_task_nums"] = task_complete_info.get("complete_task_nums", 0) + 1

        if task_complete_info["complete_task_nums"] == task_complete_info.get("task_nums"):
            task_complete_info["complete_percent"] = task_complete_info["expect_achieve_percent"]
        else:
            task_complete_info["complete_percent"] += 1 / task_complete_info.get("task_nums") * task_complete_info.get(
                "complete_weight")
        job.job_complete_percent = task_complete_info["complete_percent"]
        jobDao.update_job_complete_percent(job)

    def submit_ips_explore_task(self, ip_list, task_complete_info, job, ex_result_dict):
        """
        desc: 主机存活以及操作信息探测
        param: ip_list 需要探测的ip 列表
        param: job 任务信息
        param: ex_result_list 探测的结果信息的字典 ,key为ip ，值为探测的结果
        return:
        """
        task_list = []
        # 提交ip存活检测以及操作系统检测任务
        for ip in ip_list:
            task = self.threadPool.submit(assetSoCaller.get_alive_info, ip)
            task_list.append(task)

        up_ip_count = 0
        down_ip_count = 0

        # 初始化完成信息，该阶段总任务数以及占比
        self.init_task_complete_info(task_complete_info, len(ip_list),
                                     ASSET_JOB_COMPLETE_EXPLORE_IPS_WEIGHT)

        # 获取结果
        for t in task_list:
            flag, ip, os_info = t.result()
            if not flag:
                result = CyberspaceAssetResult(ip=ip, state=IP_STATE_DOWN, job_id=job.job_id, job_name=job.job_name, os_info="{}")
                ex_result_dict[ip] = result
                ip_list.remove(ip)
                down_ip_count += 1
            else:
                result = CyberspaceAssetResult(ip=ip, state=IP_STATE_UP, job_id=job.job_id, job_name=job.job_name, os_info=os_info)
                # 包装warp
                self.wrap_result(result)
                ex_result_dict[ip] = result
                up_ip_count += 1
            # 更新任务进度
            self.update_complete_percent(job, task_complete_info)

        job.job_result = "一共扫描%d 个ip, 其中存活的 %d 个,非存活的%d 个" % (down_ip_count + up_ip_count, up_ip_count, down_ip_count)

    def submit_service_explore_task(self, ip_list, port_list, task_complete_info, job, ex_result_list):
        """
        desc: 探测所有主机上特定的服务
        param: ip_list 需要探测的ip 列表
        param: port_list 需要探测的port 列表
        param: job 任务信息
        param: ex_result_list 探测的结果信息的字典 ,key为ip ，值为探测的结果
        return:
        """
        task_list = []
        for ip in ip_list:
            for port in port_list:
                task = self.threadPool.submit(assetSoCaller.get_server_info, ip, port)
                task_list.append(task)

        # 存放每个ip 上的服务，ip为key value为服务列表
        ip_servers = {}
        # 存放每个ip 上每个服务的个数
        ip_server_count = {}
        # 所有ip上存活的服务总数
        total_up_server_count = 0

        # 初始化完成信息，该阶段总任务数以及占比
        self.init_task_complete_info(task_complete_info, len(ip_list) * len(port_list),
                                     ASSET_JOB_COMPLETE_EXPLORE_SERVICE_WEIGHT)

        for t in task_list:
            flag, ip, port, server_info_str = t.result()
            server_json = json.loads(server_info_str)
            if flag:
                total_up_server_count += 1
                ip_server_count[ip] = ip_server_count.get(ip, 0) + 1
                if ip not in ip_servers.keys():
                    ip_servers[ip] = [CyberspaceAssetService(job_id=job.job_id, ip=ip,
                                                             port=server_json["port"],
                                                             state=server_json['state'],
                                                             protocol=server_json['protocol'],
                                                             service=server_json['service'],
                                                             version=server_json['version'])]
                else:
                    ip_servers[ip].append(CyberspaceAssetService(job_id=job.job_id, ip=ip,
                                                                 port=server_json["port"],
                                                                 state=server_json['state'],
                                                                 protocol=server_json['protocol'],
                                                                 service=server_json['service'],
                                                                 version=server_json['version']))
            # 更新任务进度
            self.update_complete_percent(job, task_complete_info)

        for (ip, result) in ex_result_list.items():
            if ip in ip_servers.keys():
                result.service2result = ip_servers[ip]
                result.port_nums = ip_server_count[ip]
            else:
                # 即该ip上没有任何服务
                result.server_info = []
                result.port_nums = 0
        job.job_result += ", 存活的服务共 %d 个" % total_up_server_count
        # 如果成功走到这，直接更新到该阶段最大
        task_complete_info[
            "complete_percent"] = ASSET_JOB_COMPLETE_EXPLORE_SERVICE_WEIGHT + ASSET_JOB_COMPLETE_EXPLORE_IPS_WEIGHT


jobAssetService = JobAssetService()
