import base64
import io
import os.path
import shutil
import zipfile
from datetime import datetime, timedelta
from io import BytesIO
from collections import defaultdict

import arrow

from urllib.parse import urlparse

import pytz
from bson import ObjectId, code
from docx.shared import Mm
from docxtpl import DocxTemplate, InlineImage
import plotly.graph_objects as go
import jinja2
from app.db.models.vulndb import KbVuln
from app.db.models.tasks import Task
from app.db.models.assets import Asset
from app.db.models.jobs import Job
from app.db.models.celery_task import CeleryTask
from app.db.models.wass import ScanEvents
from app.db.models.asset_tasks import get_monitor_urls
from app.config.settings import DATETIME_FMT, REPORT_FILE_DIR, DATETIME_FMT_S, SPIDER_URL_TIME
from app.libs.enums import SecurityEventPlugin
from app.libs.display import FOREIGN_LINKS_TYPE_DISPLAY_NAME, TASK_TARGET_STATUS_MAP
from app.libs.score import Score
from app.libs.utility import list_protect_get
from app.libs.public_funcs import get_ipv6_ava_results
from app.libs.ipv6_check import parse_ipv6_event

# 内容合规
keyword_category_mapping = {
    1: '政治',
    2: '涉黄',
    3: '反动',
    4: '涉黑',
    5: '暴恐',
    6: '博彩',
    7: '民生',
    8: '其他',
    9: 'ChatGPT',
}

privacy_category_mapping = {
    1: '手机号',
    2: '银行卡号',
    3: '邮箱号',
    4: '身份证号'
}


class ExportDocxHandler:
    """将指定的数据进行处理，生成context，导入指定的word模板中
    该类中的context函数按照函数名对应关键字参数名进行自自动处理， 其他人使用需要注意规则
    自动生成渲染及图表数据更新到最终jinja2 context中进行word渲染并返回文件对象
    1. context处理函数必须以context_开头， 继承该类需只需自定义context_xx函数，对应的数据传入必须以context_函数前缀作为键
        exp:
        存在方法context_replace_trend_data，则函数将自动调用且处理kwarg为replace_trend_data键的数据，返回数据将以replace_trend_data为键，渲染到context
    2. 自定义的jinja2过滤器时，必须以为_filter结尾，可完成自动注册，否则请手动进行注册
    3. 基类中实现了折线图、柱状图iO文件生成的基本方法
    4. export方法返回渲染后的word文件io对象，可提供接口下载
    """
    def __init__(self, template, base_context=None, multiple=False, **kwargs):
        """

        Args:
            template: docx模板文件路径
            base_context: 渲染文档的基本字典内容
            **kwargs: 其他关键字参数，每个关键字参数将调用context+参数key的函数进行处理，并将结果更新到base_context内
        """
        self.tpl = DocxTemplate(template)
        self.context = base_context if base_context else {}
        self.jinja_env = jinja2.Environment()
        self._register_filter(self.jinja_env)

        if not multiple:
            for k, v in kwargs.items():
                setattr(self, k, v)
            self.make_context()
            self.after_make_context()

    # 绘制折线图—plotly的方式，环境允许的情况下推荐
    def get_linecharts_image(self, x, y1, y2=None, y3=None, y1name="", y2name="", y3name="",
                             title="", xaxis_title="", yaxis_title=""):
        """
        根据网站监测数据生成单折现或者双折线图
        :param x: x轴数据列表，这里是日期列表
        :param y1: 第一组数据列表
        :param y2: 第二组数据列表
        :param y1name: y1线名
        :param y2name: y2线名
        :param title: 图名
        :param xaxis_title: x轴名
        :param yaxis_title: y轴名
        :return: 折线图io对象
        """
        if not x or not y1:
            fig = go.Figure(data=go.Scatter(x=[0], y=[0]))
        else:
            fig = go.Figure()
            fig.add_trace(go.Scatter(x=x, y=y1, text=y1, textposition="top center",
                          mode='lines+markers+text', name=y1name))
            if y2:
                fig.add_trace(go.Scatter(x=x, y=y2, text=y2, textposition="top center",
                              mode='lines+markers+text', name=y2name))
            if y3:
                fig.add_trace(go.Scatter(x=x, y=y3, text=y3, textposition="top center",
                                         mode='lines+markers+text', name=y3name))
        fig.update_layout(title=title, xaxis_title=xaxis_title, yaxis_title=yaxis_title)
        image_io = BytesIO()
        fig.write_image(image_io, format="jpeg", scale=2)
        return image_io

    # # 绘制柱状图
    def get_barcharts_image(self, x: list, y: list, name=None, title="", xaxis_title="", yaxis_title="", multi=False):
        """
        根据参数数据生成柱状图
        Args:
            x: x轴数据序列
            y: y轴数据序列
            name: 图名
            title: 标题
            xaxis_title: x轴名
            yaxis_title: y轴名
            multi: 多层结构

        Returns:柱状图io对象

        """
        color_list = ['rgba(91,170,249,100)', 'rgba(97,221,170,100)', 'rgba(101,120,155,100)']
        if not x or not y:
            fig = go.Figure(data=go.Bar(x=[0], y=[0]))
        else:
            fig = go.Figure()
            if multi:
                for i, _ in enumerate(y):
                    if name:
                        item_name = name[i]
                    else:
                        item_name = ""
                    marker = None
                    if color := list_protect_get(color_list, i):
                        marker = {'color': color}
                    fig.add_trace(go.Bar(x=x, y=y[i], name=item_name, text=y[i],
                                         textfont={'size': 18, 'color': 'rgba(45,53,67,65)'},
                                         textposition='outside', marker=marker))
            else:
                fig.add_trace(go.Bar(x=x, y=y, name=name, textposition='outside', text=y,
                                     textfont={'size': 18, 'color': 'rgba(45,53,67,65)'}, marker={'color': color_list[0]}))
        legend = {
            'title_font_family': 'ABeeZee',
            'font': {'family': 'ABeeZee', 'size': 12, 'color': 'rgba(45,53,67,65)'},
            'traceorder': 'reversed',
            'y': 1.04,
            'x': 0,
            'bgcolor': 'rgba(0,0,0,0)',
            'orientation': 'h'
        }
        xaxis = {
            "tickfont": {"size": 16},
        }
        yaxis = {
            'gridcolor': 'rgba(65,97,128,15)',
            # 'tickmode': 'linear',
            'tick0': 0,
            'zeroline': True,
            'zerolinewidth': 1,
            'zerolinecolor': 'rgba(65,97,128,15)'
        }
        if len(x) <= 2:
            fig.update_layout(bargap=0.8)
        elif len(x) <= 5:
            fig.update_layout(bargap=0.5)
        fig.update_layout(
            paper_bgcolor='rgba(255,255,255,100)',
            plot_bgcolor='rgba(255,255,255,100)',
            margin={'l': 5, 'r': 5, 't': 35, 'b': 5},
            xaxis=xaxis,
            yaxis=yaxis,
            title=title,
            xaxis_title=xaxis_title,
            yaxis_title=yaxis_title,
            legend=legend
        )
        image_io = BytesIO()
        fig.write_image(image_io, format="jpeg", scale=2)
        return image_io

    def get_tiaoxin_barcharts_image(self, x: list, y: list, title="", xaxis_title="", yaxis_title=""):
        """
        根据参数数据生成条形图
        Args:
            x: x轴数据序列
            y: y轴数据序列
            name: 图名
            title: 标题
            xaxis_title: x轴名
            yaxis_title: y轴名
            multi: 多层结构

        Returns:柱状图io对象

        """

        data = [go.Bar(
            x=x,
            y=y,
            orientation='h',
            marker={'color': 'rgba(240,125,49,100)'},
            width=[0.4]*len(y),
            text=x,
            textfont={'size': 18, 'color': 'rgb(0,0,0)'},
            textposition="auto",
        )]
        layout = go.Layout(
            title=title
        )

        fig = go.Figure(data=data, layout=layout)

        legend = {
            'title_font_family': 'ABeeZee',
            'font': {'family': 'ABeeZee', 'size': 24, 'color': 'rgba(255,255,255,0)'},
            'traceorder': 'reversed',
            'y': 0,
            'x': 0,
            'bgcolor': 'rgba(255,255,255,0)',
            'orientation': 'h'
        }
        xaxis = {
            'gridcolor': 'rgba(0,0,0,0)',
            'tickmode': 'linear',
            'tick0': 0,
            'zeroline': True,
            'zerolinewidth': 0,
            'zerolinecolor': 'rgba(255,255,255,0)',
            'visible': False,
        }
        yaxis = {
            'gridcolor': 'rgba(0,0,0,0)',
            'tickmode': 'linear',
            'tick0': 0,
            'zeroline': True,
            'zerolinewidth': 0,
            'zerolinecolor': 'rgba(255,255,255,0)',
            "tickfont": {"size": 16},
        }

        if len(x) <= 2:
            fig.update_layout(bargap=0.8)
        elif len(x) <= 5:
            fig.update_layout(bargap=0.5)
        fig.update_layout(
            paper_bgcolor='rgba(255,255,255,100)',
            plot_bgcolor='rgba(255,255,255,100)',
            margin={'l': 110, 'r': 5, 't': 35, 'b': 5},
            xaxis=xaxis,
            yaxis=yaxis,
            title=title,
            xaxis_title=xaxis_title,
            yaxis_title=yaxis_title,
            legend=legend
        )
        image_io = BytesIO()
        fig.write_image(image_io, format="jpeg", scale=2)
        return image_io

    def get_piecahrts_image(self, labels, values, title=None, hole=0.5, color_list=None):
        """
        根据标签和数据生成饼图
        Args:
            labels: 标签序列
            values: 数据序列
            title: 标题
            hole: 空心

        Returns:饼图IO对象

        """
        if not color_list:
            color_list = ['rgba(91,143,249,100)', 'rgba(97,221,170,100)', 'rgba(246, 189, 22, 100)', 'rgba(101,120,155,100)']

        fig = go.Figure(data=[go.Pie(
            labels=labels,
            values=values,
            hole=hole,
            textinfo='label+percent',
            textposition='inside',
            marker={'colors': color_list}
        )])
        fig.update_layout(
            legend=dict(x=0.9, y=1),
            title={
                'text': title,
            }
        )
        # fig.update_traces(title=title, title_font_size=12, title_position="top left", selector=dict(type='pie'))
        image_io = BytesIO()
        fig.write_image(image_io, format="jpeg", scale=2)
        return image_io

    # 对所有传入的数据进行对应处理函数的调用，把处理后的context更新到最终渲染用的context内
    def make_context(self):
        """根据数据处理得到渲染模板用context映射
        将数据按处理模块名更新到context
        exp:
            context_keyword_inttrend_data方法处理后的数据更为{"keyword_inttrend_data": final_data}
        """
        # 检查kwargs参数即类属性，如果包含context函数可处理的数据字段，则进行数据处理并更新到context
        for key, data in self.__dict__.items():
            context_method = "context_{}".format(key)
            if hasattr(self, context_method):
                # 调用context_[key]方法处理关键词参数传递的各类数据
                this_context = getattr(self, context_method)(data)
                # 将处理结果更新到最终的context中
                self.context.update({key: this_context})

    def after_make_context(self):
        pass

    def _register_filter(self, jinja_env):
        """
        将所有{}_filter样例的内置函数注册到jinja2环境变量内，类初始化的调用
        """
        for attr in self.__dir__():
            if attr.endswith("_filter"):
                jinja_env.filters[attr] = self.__getattribute__(attr)

    def export(self):
        """将模板和数据映射导出为word文档"""
        """autoescape: True
        word中包含此种脚本导致word标签异常:'textinputs[0] = decodeURIComponent("'");alert(45767499);</script>'
        """
        self.tpl.render(context=self.context, jinja_env=self.jinja_env, autoescape=True)  # 请求内包含
        # word保存为bytesio
        file_io = BytesIO()
        self.tpl.save(file_io)
        file_io.seek(0)
        return file_io

    @staticmethod
    def utc2asia_filter(utime, sep='-'):
        """utc 时间展示为亚洲时间 """
        try:
            if not utime:
                return '暂无数据'

            old_v = utime
            if isinstance(utime, str):
                utime = datetime.strptime(utime, '%Y-%m-%d %H:%M:%S')
            if isinstance(utime, datetime):
                atime = utime + timedelta(hours=8)
                if sep == '-':
                    return atime.strftime('%Y-%m-%d %H:%M:%S')
                return atime.strftime('%Y/%m/%d %H:%M:%S')
        except Exception as e:  # 无法解析的数据按原始数据展示
            return utime

    # 统计表格合计数量
    @staticmethod
    def sum_filter(table_list, field):
        """jinja2方法：对列表按照指定字段求和"""
        sum_res = sum([ele[field] for ele in table_list])
        return sum_res

    @staticmethod
    def type_mapping_filter(keyword_type):
        """jinja2方法：将关键字按照映射返回，若无，则返回原始关键字"""
        category_mapping = {
            'zhengzhi': "政治",
            'baokong': "暴恐",
            'shehuang': "涉黄",
            'shehei': "涉黑",
            'qita': "其他",
            'minsheng': "民生",
            'bocai': "博彩",
            'fandong': "反动",
            'phone_number': "手机号隐私",
            'bank_account': "银行账户隐私",
            'id_number': "身份证隐私",
            'email_account': "邮箱隐私"
        }
        final_category = category_mapping.get(keyword_type, keyword_type)
        return final_category or "-"

    @staticmethod
    def date_filter(date_str):
        """jinja2方法：将任意日期字符串转为为特定格式日期字符串，转换失败则返回原字符串"""
        if "-" in date_str:
            date_format = "%Y-%m-%d"
        elif "/" in date_str:
            date_format = "%Y/%m/%d"
        else:
            date_format = "%Y%m%d"
        try:
            final_date_str = datetime.strptime(date_str, date_format)
        except ValueError:
            # 若未匹配以上三种日期格式，直接返回原格式日期字符串
            return date_str
        else:
            return final_date_str.strftime("%Y/%m/%d")

    @staticmethod
    def join_filter(data_list: list, split='\r\n'):
        return split.join(data_list)

    @staticmethod
    def distinct_filter(item: list) -> list:
        return list(set(item))

    @staticmethod
    def datetime_s_filter(dt):
        try:
            return arrow.get(dt).shift(hours=8).strftime(DATETIME_FMT_S)
        except Exception:
            return dt

    @staticmethod
    def round2_filter(flt):
        return format(round(flt, 2), ".2f")


class ExportMonitorDocxHandler(ExportDocxHandler):
    """
    资产监测word报告导出，基础类型数据字段共7个 ('ssl', 'vul', 'securityEvent', 'http', 'ping', 'content')
    """
    level_mapping = {
        2: "低危",
        3: "中危",
        4: "高危",
        5: "严重",
    }
    order_dict = {
        '严重': 5,
        '高危': 4,
        '中危': 3,
        '低危': 2,
    }

    def get_task_session_ids(self, job_id=None):
        task_session_ids = self.context.pop("task_session_ids", [])
        if getattr(self, "task_session_ids", {}):
            _task_session_ids = getattr(self, "task_session_ids", {})
            if not job_id:
                for _, tsid in _task_session_ids.items():
                    task_session_ids.extend(tsid)
            else:
                task_session_ids.extend(_task_session_ids.get(job_id, []))
        return list(set(task_session_ids))

    def is_display_url(self, job_id=None):
        task_start_times = self.context.pop("task_start_times", [])
        if getattr(self, "task_start_times", {}):
            _task_start_times = getattr(self, "task_start_times", {})
            if not job_id:
                for _, tsid in _task_start_times.items():
                    task_start_times.extend(tsid)
            else:
                task_start_times.extend(_task_start_times.get(job_id, []))
        if not task_start_times:
            return False
        else:
            return SPIDER_URL_TIME < min(task_start_times)

    def after_make_context(self):
        task_count = 0
        warning_count = 0
        alert_count = 0
        monitor_urls_count = 0
        scan_count = 0
        if task_session_ids := self.get_task_session_ids():
            scan_count = get_monitor_urls(task_session_id=task_session_ids, is_scan_count=True)
            monitor_urls_count = len(get_monitor_urls(task_session_id=task_session_ids))
        for item_name, detail in self.context.items():
            if isinstance(detail, dict):
                task_count += detail.get("taskCount", 0)
                warning_count += detail.get("warningCount", 0)
                alert_count += detail.get("alertCount", 0)

        self.context["overview"] = {
            "taskCount": task_count,
            "warningCount": warning_count,
            "alertCount": alert_count,
            "monitorUrlsCount": monitor_urls_count,
            "scanCount": scan_count,
            "is_display_url": self.is_display_url()
        }

    def context_asset(self, asset):
        """资产变动监测数据"""
        total_stat = {
            "taskCount": len(asset),  # 监测次数
            "warningCount": 0,  # 告警次数
            "resolvedWarningCount": 0,  # 已确认变动数
            "unresolvedWarningCount": 0,  # 未确认变动数
            "subdomainAddCount": 0,  # 子域名上线变动数
            "subdomainRemoveCount": 0,  # 子域名下线变动数
            "portAddCount": 0,  # 端口开放变动次数
            "portRemoveCount": 0,  # 端口关闭变动
            "portServiceCount": 0,  # 端口服务调整变动
            "subdomainAdd": [],  # 子域名上线变动详情
            "subdomainRemove": [],  # 子域名下线变动详情
            "portAdd": [],  # 端口开放变变动详情
            "portRemove": [],  # 端口关闭变动详情
            "portService": []  # 端口服务调整变动详情
        }
        total_stat.update(self._get_base_stat(asset))
        # 获取资产变动百分比,及占比最高的对应数据字段
        percent_fields_mapping = {
            "subdomainAddCount": "子域名上线变动",
            "subdomainRemoveCount": "子域名下线变动",
            "portAddCount": "端口服务开放变动",
            "portRemoveCount": "端口服务关闭变动",
            "portServiceCount": "端口服务调整变动"
        }

        # 获取监测变动详情
        has_warning_count = 0
        all_warnings = []
        for asset_item in asset:
            if _warnings := asset_item.get("result", {}).get("warnings", []):
                end_time = asset_item.get('endTime')
                for _warning in _warnings:
                    _warning['foundAt'] = end_time
                has_warning_count += 1
                all_warnings.extend(_warnings)

        total_stat['warningPercent'] = round(has_warning_count/len(asset)*100, 2) if asset else 0
        total_stat['hasWarningCount'] = has_warning_count

        detail_mapping, ex_detail_mapping = self._get_asset_change_detail(warnings=all_warnings)
        total_stat.update(detail_mapping)

        base = defaultdict(int)
        # 获取监测变动每类个数
        for _type, detail in detail_mapping.items():
            count_field = f"{_type}Count"
            total_stat[count_field] = len(detail)
            base['unresolvedWarningCount'] += len([i for i in detail if i.get('status') == '待确认'])
            base['resolvedWarningCount'] += len([i for i in detail if i.get('status') == '已确认'])
            base['warningCount'] += len(detail)
        total_stat.update(base)
        total_stat.update(ex_detail_mapping)

        most_count = 0
        # 获取百分比字段
        for field, field_name in percent_fields_mapping.items():
            percent_field = f"{field}Percent"
            field_count = total_stat[field]
            total_stat[percent_field] = format(field_count / total_stat["warningCount"], ".2%") if \
                total_stat["warningCount"] else "-"
            # 获取最大数量及字段信息
            if field_count >= most_count:
                most_count = field_count
                total_stat["mostType"] = field_name
                total_stat["mostTypeCount"] = most_count
                total_stat["mostTypePercent"] = total_stat[percent_field]

        # 获取charts图表
        # labels = ["域名上线", "域名下线", "端口开放", "端口关闭", "端口服务调整"]
        # values = [total_stat["subdomainAddCount"], total_stat["subdomainRemoveCount"], total_stat["portAddCount"],
        #           total_stat["portRemoveCount"], total_stat["portServiceCount"]]
        #
        # pie_data = {k: v for k, v in zip(labels, values) if v}
        # labels, values = list(pie_data.keys()), list(pie_data.values())
        #
        # chart_image = self.get_piecahrts_image(labels=labels, values=values, title="资产变动类型个数分布")
        # total_stat["warningTypeChart"] = InlineImage(self.tpl, chart_image, width=Mm(132), height=Mm(93))

        # bar chart
        bar_labels = ['子域名上线', '子域名下线', '端口开放', '端口关闭']
        bar_data = [
            total_stat["subdomainAddCount"],
            total_stat["subdomainRemoveCount"],
            total_stat["portAddCount"],
            total_stat["portRemoveCount"]
        ]
        bar_charts = self.get_barcharts_image(
            x=bar_labels,
            y=bar_data,
            title="资产变动数量")
        total_stat['barChart'] = InlineImage(self.tpl, bar_charts, width=Mm(93), height=Mm(66))

        return total_stat

    def context_vul(self, vul):
        """脆弱性监测数据, vul漏洞监测"""
        # 更新基本信息数据
        total_stat = self._get_base_stat(vul)

        # 获取监测变动详情
        all_warnings = []
        for vul_item in vul:
            all_warnings.extend(vul_item.get("result", {}).get("warnings", []))
        vul_details = self._get_vul_detail(warnings=all_warnings)
        total_stat.update(vul_details)

        # Bar charts
        bar_data = [total_stat['warningCriticalVulCount'], total_stat['warningHighVulCount'],
                    total_stat['warningMediumVulCount'], total_stat['warningLowVulCount']]

        # bar chart
        bar_labels = ['严重', '高危', '中危', '低危']
        bar_charts = self.get_barcharts_image(
            x=bar_labels,
            y=bar_data,
            title="Web漏洞监测",
        )
        total_stat['barChart'] = InlineImage(self.tpl, bar_charts, width=Mm(93), height=Mm(66))

        return total_stat

    def context_ssl(self, ssl):
        """脆弱性监测数据,ssl安全监测"""
        total_stat = self._get_base_stat(ssl)
        all_warnings = []
        for ssl_item in ssl:
            warnings = ssl_item.get("result", {}).get("warnings", [])
            addition = ssl_item.get("result", {}).get("addition", [])
            [warning.update({"addition": addition}) for warning in warnings]
            all_warnings.extend(warnings)
        ssl_details = self._get_ssl_detail(warnings=all_warnings)
        total_stat.update(ssl_details)

        base = {
            'unresolvedWarningProtocolCount': 0,
            'resolvedWarningProtocolCount': 0,
            'ignoredWarningProtocolCount': 0,
            'unresolvedWarningCertificateCount': 0,
            'resolvedWarningCertificateCount': 0,
            'ignoredWarningCertificateCount': 0,
        }
        # 获取监测变动每类个数
        protocols = ssl_details.get('protocolDetails', [])
        certificates = ssl_details.get('certificateDetails', [])
        for protocol in protocols:
            if protocol.get('status') == '待修复':
                base['unresolvedWarningProtocolCount'] += 1
            elif protocol.get('status') == '已修复':
                base['resolvedWarningProtocolCount'] += 1
            elif protocol.get('status') == '已忽略':
                base['ignoredWarningProtocolCount'] += 1
        for certificate in certificates:
            if certificate.get('status') == '待修复':
                base['unresolvedWarningCertificateCount'] += 1
            elif certificate.get('status') == '已修复':
                base['resolvedWarningCertificateCount'] += 1
            elif certificate.get('status') == '已忽略':
                base['ignoredWarningCertificateCount'] += 1

        total_stat.update(base)
        # Bar charts
        bar_data = [
            [total_stat['warningCriticalProtocolCount'], total_stat['warningHighProtocolCount'],
             total_stat['warningMediumProtocolCount'], total_stat['warningLowProtocolCount']],
            [0, len(total_stat['certificateDetails']), 0, 0],
        ]

        # bar chart
        bar_labels = ['严重', '高危', '中危', '低危']
        bar_charts = self.get_barcharts_image(
            x=bar_labels,
            y=bar_data,
            name=['SSL协议漏洞', 'SSL证书配置'],
            title="SSL安全数量",
            multi=True
        )
        total_stat['barChart'] = InlineImage(self.tpl, bar_charts, width=Mm(93), height=Mm(66))
        return total_stat

    def context_securityEvent(self, security):
        """安全事件监测数据"""
        total_stat = self._get_base_stat(security)
        all_warnings = self.get_all_warnings(security)
        security_detail = self._get_security_detail(all_warnings)
        total_stat.update(security_detail)

        base = {
            'unresolvedWarningCount': 0,
            'resolvedWarningCount': 0,
            'ignoredWarningCount': 0,
        }
        for k, v in security_detail['typesDetail'].items():
            for item in v:
                if item.get('status') == '待处理':
                    base['unresolvedWarningCount'] += 1
                elif item.get('status') == '已处理':
                    base['resolvedWarningCount'] += 1
                elif item.get('status') == '已忽略':
                    base['ignoredWarningCount'] += 1
        total_stat.update(base)

        # 获取charts图表
        labels = []
        values = []
        for item in total_stat.get("titleCountDetails", []):
            labels.append(item.get("title", ""))
            values.append(item.get("count", 0))

        pie_data = {k: v for k, v in zip(labels, values) if v}
        labels = ['暗链', '坏链', '挂马', '挖矿', '风险外链']
        values = [pie_data.get(item) for item in labels]

        chart_image = self.get_piecahrts_image(labels=labels, values=values, title="安全事件个数占比")
        total_stat["CategoryChart"] = InlineImage(self.tpl, chart_image, width=Mm(132), height=Mm(93))

        # Bar charts
        bar_data = [
            total_stat['warningBlackLinkCount'],
            total_stat['warningCryjackCount'],
            total_stat['warningMalscanCount'],
            total_stat['warningBrokenLinkCount'],
            total_stat['warningForeignLinksCount']
        ]

        # bar chart
        bar_labels = ['暗链', '挖矿', '挂马', '坏链', '风险外链']
        bar_charts = self.get_barcharts_image(
            x=bar_labels,
            y=bar_data,
            title="安全事件数量")
        total_stat['barChart'] = InlineImage(self.tpl, bar_charts, width=Mm(93), height=Mm(66))

        return total_stat

    def context_http(self, http):
        """可用性监测数据, http监测"""
        total_stat = self.get_http_ping_detail(http, "http")
        total_stat['errorTargets'] = []
        for k, v in total_stat['errorNodeDetail'].items():
            if v.get('errorNodeCount', 0) > 0:
                total_stat['errorTargets'].append(k)
        return total_stat

    def context_ping(self, ping):
        """可用性监测数据, ping监测"""
        total_stat = self.get_http_ping_detail(ping, "ping")
        # ping监测仅一个监测地址
        error_node_detail = total_stat["errorNodeDetail"]
        total_stat["errorNodeDetail"] = list(error_node_detail.values())[0] if error_node_detail else {}
        total_stat['errorTargets'] = [list(error_node_detail.keys())[0]]if total_stat["errorNodeDetail"].get('errorNodeCount', 0) > 0 else []

        # bar chart
        bar_labels = ['HTTP', 'PING']
        http_data = self.context['http']
        bar_data = [
            len(http_data["errorTargets"]),
            len(total_stat["errorTargets"])
        ]
        bar_charts = self.get_barcharts_image(
            x=bar_labels,
            y=bar_data,
            title="可用性异常数量")
        total_stat['barChart'] = InlineImage(self.tpl, bar_charts, width=Mm(93), height=Mm(66))

        return total_stat

    def context_content(self, content):
        """内容合规监测数据"""
        total_stat = self._get_base_stat(content)
        all_warnings = self.get_all_warnings(content)
        content_detail = self._get_content_detail(all_warnings)
        total_stat.update(content_detail)

        base = {
            'unresolvedWarningCount': 0,
            'resolvedWarningCount': 0,
            'ignoredWarningCount': 0,
        }
        for k, v in content_detail['typesDetail'].items():
            for item in v:
                if item.get('status') == '待处理':
                    base['unresolvedWarningCount'] += 1
                elif item.get('status') == '已处理':
                    base['resolvedWarningCount'] += 1
                elif item.get('status') == '已忽略':
                    base['ignoredWarningCount'] += 1
        total_stat.update(base)

        # 获取charts图表
        labels = ["隐私信息", "变更信息", "敏感词"]
        values = [
            total_stat["warningPrivacyCount"],
            total_stat["warningChangeCount"],
            total_stat["warningKeywordCount"]]

        pie_data = {k: v for k, v in zip(labels, values) if v}
        values = [pie_data.get(item) for item in labels]

        chart_image = self.get_piecahrts_image(labels=labels, values=values, title="内容合规威胁占比", hole=0)
        total_stat["CategoryChart"] = InlineImage(self.tpl, chart_image, width=Mm(132), height=Mm(93))

        # bar chart
        bar_labels = ['敏感词', '隐私信息']
        bar_data = [
            total_stat["warningKeywordCount"],
            total_stat["warningPrivacyCount"]
        ]
        bar_charts = self.get_barcharts_image(
            x=bar_labels,
            y=bar_data,
            title="内容违规数量")
        total_stat['barChart'] = InlineImage(self.tpl, bar_charts, width=Mm(93), height=Mm(66))

        return total_stat

    @staticmethod
    def get_all_warnings(data_list: list) -> list:
        all_warnings = []
        for weakness_item in data_list:
            end_time = weakness_item.get('endTime')
            _warnings = weakness_item.get("result", {}).get("warnings", [])
            for item in _warnings:
                found_at = item.get('foundAt')
                detail = item.get('detail', {})
                if isinstance(detail, dict) and not found_at:
                    found_at = detail.get('found_at') or detail.get('foundAt', '')
                    if found_at and not isinstance(found_at, datetime):
                        found_at = arrow.get(found_at).datetime

                item['foundAt'] = found_at or end_time
            all_warnings.extend(_warnings)

        return all_warnings

    @staticmethod
    def _get_base_stat(_type_data):
        base = {
            "taskCount": len(_type_data),  # 监测次数
            "alertCount": 0,  # 告警通知次数
            "warningCount": 0,  # 告警个数
            "resolvedWarningCount": 0,  # 已解决告警数
            "unresolvedWarningCount": 0,  # 未解决告警数
            "monitorUrlsCount": 0,  # 监测url数
        }
        for item in _type_data:
            result = item.get("result", {})
            # 发送告警
            send_alert = item.get("isSendAlert", False)
            if send_alert:
                base["alertCount"] += 1
            if result.get("addition"):
                try:
                    base["monitorUrlsCount"] += result.get("addition", {}).get("statistics", {}).get("urls", 0)
                except:
                    pass
            base["resolvedWarningCount"] += result.get("warningResolvedCount", 0)
            base["unresolvedWarningCount"] += result.get("warningUnresolvedCount", 0)
            base["warningCount"] += result.get("warningCount", 0)

        return base

    def _get_vul_detail(self, warnings: list) -> dict:
        """漏洞详情统计"""
        category_titles = defaultdict(set)
        title_affects = defaultdict(set)
        title_type_mapping = {}
        title_level_mapping = {}
        title_details_mapping = defaultdict(list)
        resolve_stat = defaultdict(int)
        # 取值用最新数据库的数据
        vuln = KbVuln.objects.find({'is_deleted': False, 'name.zh_cn': {'$in': list(set(i['title'] for i in warnings if i.get('title')))}})
        vuln_info = {item.name.zh_cn: item.to_mongo().to_dict() for item in vuln}

        warnings.sort(key=lambda x: (getattr(x, 'level', 5) or 5) * -1)  # 高危优先展示 排序处理
        for warning in warnings:
            level = ExportMonitorDocxHandler.level_mapping.get(warning.get("level"))  # 漏洞等级
            category = warning.get("category")  # 漏洞类型
            title = warning.get("title")  # 漏洞名

            # 优先使用数据库的 漏洞信息
            if vuln_info.get(title, {}).get('description', {}). get('zh_cn'):
                warning['description'] = vuln_info.get(title, {}).get('description', {}). get('zh_cn')
            if vuln_info.get(title, {}).get('impact', {}). get('zh_cn'):
                warning['impact'] = vuln_info.get(title, {}).get('impact', {}). get('zh_cn')
            if vuln_info.get(title, {}).get('recommendation', {}). get('zh_cn'):
                warning['recommendation'] = vuln_info.get(title, {}).get('recommendation', {}). get('zh_cn')
            if vuln_info.get(title, {}).get('reference'):
                warning['reference'] = vuln_info.get(title, {}).get('reference')

            affects = warning.get("affects")
            category_titles[category].add(title)
            title_affects[title].add(affects)
            title_type_mapping[title] = category
            title_level_mapping[title] = level
            status = "已处理" if warning.get("isResolved") else "已忽略" if warning.get("ignoredTime") else "未处理"
            warning["status"] = status
            title_details_mapping[title].append(warning)
            resolve_stat[status] += 1

        # 3.1.1漏洞类型比例及分布数量
        sum_vul_count = sum(len(affects) for title, affects in title_affects.items())
        category_count_detail = {}
        for category, titles in category_titles.items():
            cat_data = category_count_detail.setdefault(category, {
                'level_list': [],
                'count': 0,
                "percent": "-"
            })
            category_vul_count = sum([len(title_affects.get(t, {})) for t in titles])
            level_list = list({ExportMonitorDocxHandler.order_dict[title_level_mapping[t]] for t in titles})
            cat_data['level_list'] = list(sorted(level_list, reverse=True))
            cat_data['count'] += category_vul_count
            cat_data['percent'] = format(cat_data['count'] / sum_vul_count, ".2%") if sum_vul_count else "-"

        # 3.1.2web漏洞top10
        sum_title_count = sum(len(affects) for title, affects in title_affects.items())
        title_count_detail = self._get_title_count_detail(title_affects, title_level_mapping, title_type_mapping)
        title_count_detail.sort(key=lambda x: x["count"], reverse=True)
        title_count_detail_top10 = title_count_detail[:10]
        for top_item in title_count_detail_top10:
            title = top_item["title"]
            top_item["times"] = len(title_details_mapping.get(title, []))

        # 3.1.3 漏洞详情，需要按照漏洞类型，affects聚合计算相同affects的次数
        title_times_detail = self.get_title_times_detail(title_details_mapping, title_level_mapping)
        title_times_detail.sort(key=lambda x: self.order_dict.get(x['level'], -1), reverse=True)

        # 漏洞危险等级名称数统计
        level_title_count = defaultdict(int)
        for item in title_count_detail:
            level_title_count[item["level"]] += item["count"]

        most_vul_title_item = max(title_count_detail, key=lambda x: x["count"]) if title_count_detail else {}
        most_vul_title = most_vul_title_item.get("title", "-")
        most_vul_count = most_vul_title_item.get("count", 0)
        most_vul_title_percent = most_vul_title_item.get("percent", "-")

        detail_result = {
            "categoryDetails": category_count_detail,
            "titleTop10Details": title_count_detail_top10,
            "itemDetails": title_times_detail,

            "warningVulCount": sum_title_count,  # 漏洞个数，按title-effects去重计算
            "warningCriticalVulCount": level_title_count.get("严重", 0),  # 高危漏洞个数
            "warningHighVulCount": level_title_count.get("高危", 0),  # 高危漏洞个数
            "warningMediumVulCount": level_title_count.get("中危", 0),  # 中危漏洞个数
            "warningLowVulCount": level_title_count.get("低危", 0),  # 低危漏洞个数
            "mostVul": most_vul_title,  # 漏洞个数最多的漏洞名
            "mostVulCount": most_vul_count,  # 漏洞个数最多的漏洞数
            "mostVulPercent": most_vul_title_percent,  # 漏洞个数最多的漏洞名占比

            "resolvedWarningCount": resolve_stat.get("已处理", 0),  # 已处理数量
            "unresolvedWarningCount": resolve_stat.get("未处理", 0),  # 未处理数量
            "ignoredWarningCount": resolve_stat.get("已忽略", 0),  # 已忽略数量
        }
        return detail_result

    def _get_ssl_detail(self, warnings: list) -> dict:
        title_affects = defaultdict(set)
        title_protocol_affects = defaultdict(set)
        title_level_mapping = {}
        title_details_mapping = {
            "protocol": defaultdict(list),  # 协议漏洞详情
            "certificate": defaultdict(list)  # 证书漏洞详情
        }
        title_type_mapping = {}

        protocol_resolve_stat = defaultdict(int)
        certificate_titles = set()

        warnings.sort(key=lambda x: (getattr(x, 'level', 5) or 5) * -1)  # 高危优先展示 排序处理
        for warning in warnings:
            title = warning.get("title")
            level = ExportMonitorDocxHandler.level_mapping.get(warning.get("level"))
            affects = warning.get("affects")
            category = warning.get("category")

            title_level_mapping[title] = level
            title_type_mapping[title] = category
            title_affects[title].add(affects)
            status = "已修复" if warning.get("isResolved") else "已忽略" if warning.get("ignoredTime") else "待修复"
            if category == "protocol":
                title_protocol_affects[title].add(affects)
                protocol_resolve_stat[status] += 1
            else:
                certificate_titles.add(title)
            warning["status"] = status
            title_details_mapping[category][title].append(warning)

        sum_title_count = sum(len(affects) for title, affects in title_affects.items())
        # 3.2 ssl安全监测概览
        title_count_detail = [
            {
                "title": title,
                "count": len(affects),
                "category": title_type_mapping.get(title),
                "level": title_level_mapping.get(title),
                "percent": format(len(affects) / sum_title_count, ".2%") if sum_title_count else "-"
            } for title, affects in title_affects.items()
        ]

        # 3.2.1 SSL协议漏洞详情, 每类仅一个，不按风险地址展示
        protocol_detail = [item.get("detail")[0] for item in self.get_title_times_detail(title_details_mapping["protocol"], title_level_mapping)]
        protocol_detail.sort(key=lambda x: self.order_dict.get(x['level'], -1), reverse=True)
        # 3.3.2 SSL证书异常详情，每类仅一个，不按风险地址展示
        certificate_detail = [item.get("detail")[0] for item in self.get_title_times_detail(title_details_mapping["certificate"], title_level_mapping)]
        certificate_detail.sort(key=lambda x: self.order_dict.get(x['level'], -1), reverse=True)

        # 漏洞危险等级名称数统计
        level_title_count = defaultdict(int)
        for item in title_count_detail:
            if item["category"] == "protocol":
                level_title_count[item["level"]] += item["count"]

        sum_protocol_title_count = sum(len(affects) for title, affects in title_protocol_affects.items())

        protocol_resolve_stat = defaultdict(int)
        for warning in protocol_detail:  # 去重后的统计
            status = "已修复" if warning.get("isResolved") else "已忽略" if warning.get("ignoredTime") else "待修复"
            protocol_resolve_stat[status] += 1

        certificate_resolve_stat = defaultdict(int)
        for warning in certificate_detail:  # 去重后的统计
            status = "已修复" if warning.get("isResolved") else "已忽略" if warning.get("ignoredTime") else "待修复"
            certificate_resolve_stat[status] += 1

        detail_result = {
            "titleDetails": title_count_detail,
            "protocolDetails": protocol_detail,
            "certificateDetails": certificate_detail,
            "certificateTitles": "、".join(list(certificate_titles)),  # 证书漏洞

            "warningProtocolCount": sum_protocol_title_count,  # 协议漏洞个数，按title-effects去重计算
            "warningSslAllCount": sum_protocol_title_count + len(certificate_titles),  # 漏洞个数，按title-effects去重计算
            "warningCriticalProtocolCount": level_title_count.get("严重", 0),  # 高危漏洞个数
            "warningHighProtocolCount": level_title_count.get("高危", 0),  # 高危漏洞个数
            "warningMediumProtocolCount": level_title_count.get("中危", 0),  # 中危漏洞个数
            "warningLowProtocolCount": level_title_count.get("低危", 0),  # 低危漏洞个数

            "resolvedWarningCertificateCount": certificate_resolve_stat.get("已修复", 0),  # 已处理数量
            "unresolvedWarningCertificateCount": certificate_resolve_stat.get("待修复", 0),  # 未处理数量
            "ignoredWarningCertificateCount": certificate_resolve_stat.get("已忽略", 0),  # 已忽略数量

            "resolvedWarningProtocolCount": protocol_resolve_stat.get("已修复", 0),  # 已处理数量
            "unresolvedWarningProtocolCount": protocol_resolve_stat.get("待修复", 0),  # 未处理数量
            "ignoredWarningProtocolCount": protocol_resolve_stat.get("已忽略", 0),  # 已忽略数量
        }
        return detail_result

    def _get_asset_change_detail(self, warnings: list):
        """告警详情次数统计"""
        result_count = defaultdict(int)
        result_status = {}
        result_type = {}
        subdomain_dict = {}
        port_dict = {}

        for warning in warnings:
            # 获取类型：如portAdd, portRemove, portService, subdomainAdd等
            asset_type = warning.get("category") + warning.get("detail", {}).get("op", "").title()
            title = warning["title"]
            status = "已确认" if warning.get("isResolved") else "已忽略" if warning.get("ignoredTime") else "待确认"
            result_count[title] += 1
            result_status[title] = status
            result_type[title] = asset_type

            affects = warning.get('affects')
            if (_category := warning.get('category')) == 'subdomain':
                _tmp_dict = subdomain_dict
            elif _category == 'port':
                _tmp_dict = port_dict
            else:
                continue
            _tmp_dict.setdefault(affects, []).append({
                'foundAt': warning.get('foundAt') or '暂无数据',
                'op': warning.get('detail', {}).get('op'),
                'status': status
            })

        result = []
        for title, count in result_count.items():
            result.append({
                "title": title,
                "times": count,
                "status": result_status.get(title),
                "type": result_type.get(title),
            })
        type_result = defaultdict(list)
        for item in result:
            type_result[item["type"]].append(item)

        return type_result, {'subdomainDict': subdomain_dict, 'portDict': port_dict}

    def _get_security_detail(self, warnings: list) -> dict:
        title_affects = defaultdict(set)
        title_level_mapping = {}
        title_type_mapping = {}
        title_details_mapping = defaultdict(list)

        resolve_stat = defaultdict(int)

        for warning in warnings:
            title = warning.get("title")
            level = self.level_mapping.get(warning.get("level"))
            affects = warning.get("affects")
            category = warning.get("category")

            title_level_mapping[title] = level
            title_type_mapping[title] = category
            title_affects[title].add(affects)
            status = "已处理" if warning.get("isResolved") else "已忽略" if warning.get("ignoredTime") else "待处理"
            resolve_stat[status] += 1
            warning["status"] = status
            title_details_mapping[title].append(warning)

        # title个数统计(不同的affects算多个)
        title_count_detail = self._get_title_count_detail(title_affects, title_level_mapping, title_type_mapping, )

        # 最终展示详情
        detail_result = self.get_security_title_times_detail(title_details_mapping, title_level_mapping)
        categories = {
            "暗链": 'black_links',
            "坏链": 'broken_links',
            "挖矿": 'cryjack',
            "网页挂马": 'malscan',
            "风险外链": "foreign_links"
        }
        types_detail = {t: [] for t in categories.values()}
        for detail in detail_result:
            title = detail.get("title")
            category = categories.get(title)
            if category:
                types_detail[category].extend(detail.get("detail", []))

        # 漏洞危险等级名称数统计
        level_title_count = defaultdict(int)
        for item in title_count_detail:
            level_title_count[item["level"]] += item["count"]

        # 最大类型
        most_title, most_count, most_title_percent = self.get_most_category(title_count_detail)

        detail_result = {
            "titleCountDetails": title_count_detail,
            "typesDetail": types_detail,

            "warningSecurityCount": self.get_sum_title_count(title_affects),  # 漏洞个数，按title-effects去重计算
            "warningBlackLinkCount": len(title_affects.get("暗链", [])),  # 暗链个数
            "warningCryjackCount": len(title_affects.get("挖矿", [])),  # 挖矿个数
            "warningMalscanCount": len(title_affects.get("挂马", [])),  # 挂马个数
            "warningBrokenLinkCount": len(title_affects.get("坏链", [])),  # 坏链个数
            "warningForeignLinksCount": len(title_affects.get("风险外链", [])),  # 风险外链个数

            "mostType": most_title,  # 出现最多的类型
            "mostTypeCount": most_count,  # 最大类型的数量
            "mostTypePercent": most_title_percent,  # 最大类型百分比

            "resolvedWarningCount": resolve_stat.get("已处理", 0),  # 已处理数量
            "unresolvedWarningCount": resolve_stat.get("待处理", 0),  # 未处理数量
            "ignoredWarningCount": resolve_stat.get("已忽略", 0),  # 已忽略数量
        }
        return detail_result

    def _get_content_detail(self, warnings: list) -> dict:
        title_affects = defaultdict(set)
        title_level_mapping = {}
        title_details_mapping = defaultdict(list)
        resolve_stat = defaultdict(int)

        for warning in warnings:
            title = warning.get("title")
            level = self.level_mapping.get(warning.get("level"))
            affects = warning.get("affects")
            title_affects[title].add(affects)
            title_level_mapping[title] = level
            status = "已处理" if warning.get("isResolved") else "已忽略" if warning.get("ignoredTime") else "待处理"
            warning["status"] = status
            title_details_mapping[title].append(warning)
            resolve_stat[status] += 1

        title_count_detail = self._get_title_count_detail(title_affects, title_level_mapping)
        title_times_detail = self.get_content_title_times_detail(title_details_mapping, title_level_mapping)

        categories = {
            "敏感词": 'keyword',
            "隐私信息": 'privacy_disclosure',
            "变更信息": 'statistics',
        }
        types_detail = {t: [] for t in categories.values()}
        for title, detail in title_times_detail.items():
            category = categories.get(title)
            if category:
                types_detail[category].extend(detail)

        # 敏感词聚合
        types_detail["keyword"] = self.group_keyword(types_detail["keyword"], keyword_category_mapping)
        types_detail["privacy_disclosure"] = self.group_keyword(types_detail["privacy_disclosure"], privacy_category_mapping)

        detail_result = {
            "titleCountDetails": title_count_detail,
            "typesDetail": types_detail,

            "warningContentCount": self.get_sum_title_count(title_affects),  # 漏洞个数，按title-effects去重计算
            "warningKeywordCount": len(title_affects.get("敏感词", [])),  # 敏感词个数
            "warningPrivacyCount": len(title_affects.get("隐私信息", [])),  # 隐私信息个数
            "warningChangeCount": len(title_affects.get("变更信息", [])),  # 变更信息个数

            "resolvedWarningCount": resolve_stat.get("已处理", 0),  # 已处理数量
            "unresolvedWarningCount": resolve_stat.get("待处理", 0),  # 未处理数量
            "ignoredWarningCount": resolve_stat.get("已忽略", 0),  # 已忽略数量
        }
        return detail_result

    def get_http_ping_detail(self, data, task_type):

        total_stat = {
            "taskCount": len(data),  # 监测次数
            "alertCount": 0,  # 监测次数
            "warningCount": 0,  # 告警次数
            "errorTaskCount": 0,  # 异常任务数
            "monitorUrlsCount": 0,  # 异常任务数

            "errorNodeCount": 0,  # 异常节点数
            "mostErrorNode": "-",  # 异常最多的节点
            "mostErrorNodeCount": 0,  # 异常最多的节点数
            "mostErrorNodeCountPercent": "-",  # 异常最多的节点次数占比

            "nodeErrorCountDetail": [],  # 节点异常详情
            "errorNodeDetail": [],  # 异常详情，按target统计
        }
        node_times = defaultdict(int)  # 节点出现次数统计
        target_node_times = defaultdict(int)
        target_node_counts = {}
        target_area_data = defaultdict(dict)
        error_nodes_data = self.context.pop(f"{task_type}_node_data")
        error_nodes = error_nodes_data.pop("node_detail")
        for node_item in error_nodes:
            this_target = node_item["_id"]["target"]
            this_area = node_item["_id"]["area"]
            node_items_length = len(node_item["additions"])
            node_times[this_area] += node_items_length
            t = target_node_counts.setdefault(this_target, {})
            t.setdefault(this_area, 0)
            t[this_area] += node_items_length
            target_node_times[this_target] += node_items_length
            target_area_data[this_target][this_area] = node_item["additions"]

        affects_times = defaultdict(int)  # 风险监测次数
        target_node_stat = {}  # 按风险地址单独统计
        for item in data:
            target = item.get("target")
            warning_count = item.get("result", {}).get("warningCount", 0)
            total_stat["warningCount"] += warning_count
            send_alert = item.get("isSendAlert", False)
            if send_alert:
                total_stat["alertCount"] += 1
            total_stat["monitorUrlsCount"] += item.get("result", {}).get("addition", {}).get("statistics", {}).get("urls", 0)
            security_status = True if item.get("securityStatus") == "warning" else False
            if security_status:
                total_stat["errorTaskCount"] += 1
            affects = item.get("target")
            # 按风险地址汇总统计
            affects_times[affects] += 1

            # 按target分别汇总
            if target not in target_node_stat:
                target_stat = {
                    "taskCount": 1,
                    "errorTaskCount": 1 if security_status else 0,
                    "note": item.get('name')

                }
                target_node_stat[target] = target_stat

            else:
                target_node_stat[target]["taskCount"] += 1
                target_node_stat[target]["errorTaskCount"] += 1 if security_status else 0
        # 对结果中的持续时间进行处理
        node_continuance = error_nodes_data.pop("node_continuance")

        for target, item in target_node_stat.items():
            item_error_detail = []

            for res_item in node_continuance:
                if res_item.get("_id", {}).get("target") == target:
                    item_error_detail.extend(res_item.get("result", []))

            item["errorNodeCount"] = len(target_node_counts.get(target, {}).keys())
            item["errorNodeTime"] = target_node_times.get(target, 0)
            item["errorItemContinueDetail"] = item_error_detail
            max_continuance_item = max(item_error_detail, key=lambda x: x["continuanceTime"]) if item_error_detail else {}
            item["mostErrorTimeNode"] = max_continuance_item.get("area", "-")
            item["mostErrorTimeNodeContinuance"] = max_continuance_item.get("continuanceTime", "-")
            chart_image = self.get_node_response_time_chart(target_area_data.get(target, []))
            item["nodeResponseTimeChart"] = InlineImage(self.tpl, chart_image, width=Mm(136), height=Mm(94))

            item["target"] = target
        total_stat["errorNodeDetail"] = target_node_stat

        # 汇总统计
        if node_times:
            sum_node_count = sum(node_times.values())
            most_error_node = max(node_times, key=lambda x: node_times[x])
            most_error_node_count = node_times.get(most_error_node)
            total_stat["errorNodeCount"] = len(node_times.keys())
            total_stat["mostErrorNode"] = most_error_node
            total_stat["mostErrorNodeCount"] = most_error_node_count
            total_stat["mostErrorNodeCountPercent"] = format(most_error_node_count / sum_node_count, ".2%")
            total_stat["nodeErrorCountDetail"] = [{
                "node": node,
                "count": count,
                "percent": format(count / sum_node_count, ".2%")
            } for node, count in node_times.items()]

            # 获取charts图表
            labels = list(node_times.keys())
            values = [node_times.get(label, 0) for label in labels]

            pie_data = {k: v for k, v in zip(labels, values) if v}
            labels, values = list(pie_data.keys()), list(pie_data.values())

            chart_image = self.get_piecahrts_image(labels=labels, values=values, title="异常节点占比")
            total_stat["CategoryChart"] = InlineImage(self.tpl, chart_image, width=Mm(132), height=Mm(93))

        return total_stat

    def _get_title_count_detail(self, title_affects, title_level_mapping, title_type_mapping=None):
        sum_title_count = self.get_sum_title_count(title_affects)
        title_count_detail = [
            {
                "title": title,
                "count": len(affects),
                "category": title_type_mapping.get(title) if title_type_mapping else title,
                "level": title_level_mapping.get(title),
                "percent": format(len(affects) / sum_title_count, ".2%") if sum_title_count else "-"
            } for title, affects in title_affects.items()
        ]
        return title_count_detail

    @staticmethod
    def get_sum_title_count(title_affects: dict) -> int:
        return sum(len(affects) for title, affects in title_affects.items())

    @staticmethod
    def get_most_category(title_count_detail):
        # 最大类型
        most_title_item = max(title_count_detail, key=lambda x: x["count"]) if title_count_detail else {}
        most_title = most_title_item.get("category", "-")
        most_count = most_title_item.get("count", "-")
        most_title_percent = most_title_item.get("percent", "-")
        return most_title, most_count, most_title_percent

    def get_title_times_detail(self, title_details_mapping, title_level_mapping):
        title_times_detail = []
        for title, detail in title_details_mapping.items():
            title_affects = set()
            title_result = {}
            for item in detail:
                item_affects = item.get("affects")
                found_at = item.get("foundAt")

                if item_affects not in title_affects:
                    title_affects.add(item_affects)
                    traffics = item.get("traffics") or [{}]
                    request_data, response_data = self.request_response_info(request=traffics[-1].get("request", {}),
                                                                             response=traffics[-1].get("response", {}))

                    item["times"] = 1
                    item["foundAt"] = found_at.strftime(DATETIME_FMT) if found_at else "暂无数据"
                    item['firstFoundAt'] = found_at.strftime(DATETIME_FMT) if found_at else "暂无数据"

                    item["request"] = request_data or "暂无数据"
                    item["response"] = response_data or "暂无数据"
                    item["payload"] = item.get("payload") or "暂无数据"
                    item["reference"] = "\n".join(item.get("reference", [])) or "暂无数据"
                    item["recommendation"] = item.get("recommendation", "暂无数据")
                    item["level"] = title_level_mapping.get(title)
                    title_result[item_affects] = item
                else:
                    title_result[item_affects]["times"] += 1
                    title_result[item_affects]["status"] = item["status"]
                    if found_at:
                        if title_result[item_affects]["foundAt"] == '暂无数据' \
                                or arrow.get(found_at) > arrow.get(title_result[item_affects]["foundAt"]):
                            title_result[item_affects]["foundAt"] = found_at.strftime(DATETIME_FMT)
                        if title_result[item_affects]['firstFoundAt'] == '暂无数据' or \
                                arrow.get(found_at) < arrow.get(title_result[item_affects]["firstFoundAt"]):
                            title_result[item_affects]['firstFoundAt'] = found_at.strftime(DATETIME_FMT)

            title_times_detail.append({
                "title": title,
                "level": title_level_mapping.get(title),
                "detail": list(title_result.values())
            })
        return title_times_detail

    @staticmethod
    def get_content_title_times_detail(title_details_mapping, title_level_mapping):
        title_times_detail = defaultdict(list)
        for title, detail in title_details_mapping.items():
            title_affects = set()
            title_result = {}
            for item in detail:
                item_affects = item.get("affects")
                found_at = item.get("foundAt")
                category = item.get("category")
                if item_affects not in title_affects:
                    title_affects.add(item_affects)
                    item["times"] = 1
                    item["level"] = title_level_mapping.get(title)
                    item["foundAt"] = found_at.strftime(DATETIME_FMT) if found_at else "暂无数据"
                    item['firstFoundAt'] = found_at.strftime(DATETIME_FMT) if found_at else "暂无数据"

                    if category in ["keyword", "privacy_disclosure"]:
                        item["detail"] = [{key: value} for key, value in item.get("detail", {}).items()]
                    title_result[item_affects] = item
                else:
                    title_result[item_affects]["times"] += 1
                    title_result[item_affects]["status"] = item["status"]
                    # 增加关键词类型及详情数据
                    if category in ["keyword", "privacy_disclosure"]:
                        title_result[item_affects]["detail"].extend([{key: value} for key, value in item.get("detail", {}).items()])
                    if found_at:
                        if title_result[item_affects]["foundAt"] == '暂无数据' \
                                or arrow.get(found_at) > arrow.get(title_result[item_affects]["foundAt"]):
                            title_result[item_affects]["foundAt"] = found_at.strftime(DATETIME_FMT)
                        if title_result[item_affects]['firstFoundAt'] == '暂无数据' or \
                                arrow.get(found_at) < arrow.get(title_result[item_affects]["firstFoundAt"]):
                            title_result[item_affects]['firstFoundAt'] = found_at.strftime(DATETIME_FMT)
            title_times_detail[title] = list(title_result.values())

        return title_times_detail

    def get_security_title_times_detail(self, title_details_mapping, title_level_mapping):
        title_times_detail = []
        for title, detail in title_details_mapping.items():
            title_affects = set()
            title_result = {}
            for item in detail:
                item_affects = item.get("affects")
                found_at = item.get('foundAt')
                if item_affects not in title_affects:
                    if item["category"] == SecurityEventPlugin.foreign_links.value:
                        # TODO 聚合统计每一类外链数据
                        base_foreign_links_detail = {}
                        this_foreign_links_detail = item.get("detail", {})
                        self.group_foreign_links_to_base(base_foreign_links_detail, this_foreign_links_detail)
                        item["foreign_links_detail"] = base_foreign_links_detail
                        item['foreign_links_summary'] = self.get_foreign_links_summary(base_foreign_links_detail)
                    title_affects.add(item_affects)
                    item["times"] = 1
                    item["foundAt"] = found_at.strftime(DATETIME_FMT) if found_at else "暂无数据"
                    item['firstFoundAt'] = found_at.strftime(DATETIME_FMT) if found_at else "暂无数据"

                    item["level"] = self.level_mapping.get(item.get("level"))
                    title_result[item_affects] = item
                else:
                    title_result[item_affects]["times"] += 1
                    title_result[item_affects]["status"] = item["status"]
                    if item["category"] == SecurityEventPlugin.foreign_links.value:
                        this_foreign_links_detail = item.get("details", {})
                        self.group_foreign_links_to_base(base_mapping=title_result[item_affects]["foreign_links_detail"],
                                                         foreign_links=this_foreign_links_detail)
                    if found_at:
                        if title_result[item_affects]["foundAt"] == '暂无数据' \
                                or arrow.get(found_at) > arrow.get(title_result[item_affects]["foundAt"]):
                            title_result[item_affects]["foundAt"] = found_at.strftime(DATETIME_FMT)
                        if title_result[item_affects]['firstFoundAt'] == '暂无数据' or \
                                arrow.get(found_at) < arrow.get(title_result[item_affects]["firstFoundAt"]):
                            title_result[item_affects]['firstFoundAt'] = found_at.strftime(DATETIME_FMT)
            title_times_detail.append({
                "title": title,
                "level": title_level_mapping.get(title),
                "detail": list(title_result.values())
            })
        return title_times_detail

    @staticmethod
    def group_foreign_links_to_base(base_mapping, foreign_links):

        for link, detail in foreign_links.items():
            if link not in base_mapping:
                base_foreign_links_detail = {
                    'black_links': [],
                    'cryjack': [],
                    'malscan': [],
                    'icp': [],
                    'keyword': [],
                    'domains': []
                }
                base_foreign_links_detail.update(detail)
                base_mapping[link] = base_foreign_links_detail
            else:
                for link_type, type_data in detail.items():
                    base_mapping[link][link_type].extend(type_data)
        return base_mapping

    @staticmethod
    def get_foreign_links_summary(base_foreign_links_detail):
        summary = {}
        for url, detail in base_foreign_links_detail.items():
            type_list = []
            for foreign_type, _ in detail.items():
                if _:
                    type_list.append(FOREIGN_LINKS_TYPE_DISPLAY_NAME.get(foreign_type, foreign_type))
            summary[url] = list(set(type_list))

        return summary

    def get_node_response_time_chart(self, data):
        if not data:
            fig = go.Figure(data=go.Scatter(x=[0], y=[0]))
        else:
            fig = go.Figure()
            for node_area, detail in data.items():
                date_labels = []
                values = []
                for item in detail:
                    date_labels.append(item.get("monitor_time"))
                    values.append(item.get("totalTime"))

                fig.add_trace(go.Scatter(x=date_labels, y=values, text=values, textposition="top center",
                              mode='lines+markers', name=node_area))

        fig.update_layout(title="异常节点平均响应时间", xaxis_title="监测日期", yaxis_title="平均响应时间（毫秒）")
        image_io = BytesIO()
        fig.write_image(image_io, format="jpeg", scale=2)
        return image_io

    @staticmethod
    def group_keyword(details, keyword_category_mapping):

        # 敏感词种类出现次数
        for item in details:
            keyword_category = defaultdict(int)
            keyword_self = defaultdict(int)
            keyword_detail = defaultdict(set)
            keywords = item.get("detail")
            for category_mapping in keywords:
                for category_index, words in category_mapping.items():
                    category = keyword_category_mapping.get(int(float(category_index)))
                    keyword_category[category] += 1
                    for word in words:
                        keyword_self[word] += 1
                        keyword_detail[category].add(word)
            item["keywordCategoryCount"] = keyword_category
            item["keywordCount"] = keyword_self
            item["detail"] = keyword_detail
        return details

    @staticmethod
    def _decode_b64str(b64str):
        try:
            # 把"\x00" 替换为空， 否则模板加载出错
            result = base64.b64decode(b64str.encode()).decode("utf-8").replace("\x00", "")
            return result
        except:
            return ""

    def request_response_info(self, request, response):
        """从请求和响应结果中解析请求体和响应体"""
        # 请求体
        body = self._decode_b64str(request.get("b64content", ""))
        method = request.get('method', "")
        u_parse = urlparse(request.get("url", ""))
        url = f"{u_parse.path}?{u_parse.query}"
        http = response.get("version", "")
        req_headers_list = [f"{key}:{value}" for key, value in request.get("headers", {}).items()]
        req_headers = '\n'.join(req_headers_list)
        req_joins = [method, url, http]
        req_data = " ".join(req_joins) if request else ""
        req_data = f"{req_data}\n{req_headers}"
        if body:
            req_data = f"{req_data}\n\n{req_data}"

        # 响应体
        body_resp = self._decode_b64str(response.get("b64content", ""))
        status_code = str(int(response.get("status_code", -1)))
        res_headers_list = [f"{key}:{value}" for key, value in response.get("headers", {}).items()]
        res_joins = [http, status_code]
        res_headers = '\n'.join(res_headers_list)
        res_data = " ".join(res_joins) if response else ""
        res_data = f"{res_data}\n{res_headers}"
        if body_resp:
            if body_resp.startswith('%PDF') and body_resp.endswith('%%EOF'):
                body_resp = ''
            res_data = f"{res_data}\n\n{body_resp}"

        if req_data == '\n':
            req_data = ''
        if res_data == '\n':
            res_data = ''
        return req_data, res_data

    @staticmethod
    def node_time_sum(node_dict):
        detail = list(node_dict.values())[0]
        return sum([item["continuanceTime"] for item in detail])

    @staticmethod
    def keyword_filter(arr, count_mapping):
        result = []
        for i in arr:
            result.append(f"{i}")
        return "\r\n".join(result)

    @staticmethod
    def keyword_count_filter(arr, count_mapping):
        result = []
        for i in arr:
            i_count = count_mapping.get(i)
            result.append(f"{i}({i_count})")
        return "\r\n".join(result)

    @staticmethod
    def category_count_filter(category, count_mapping):
        return f"{category}({count_mapping.get(category)})"

    @staticmethod
    def node_count_filter(count_dict):
        return sum(count_dict.values())

    @staticmethod
    def dict_keys_filter(d: dict):
        """输出字典键组成的\n字符串"""
        return "\r\n".join(list(d.keys()))

    @staticmethod
    def rule_key_filter(keywords: list):
        """输出外链违规内容详情"""
        return "\r\n".join([key.get("risk_rule", "") for key in keywords])

    @staticmethod
    def icp_filter(icps: list):
        """输出未备案域名详情"""
        # result = {icp.get("risk_rule", "") for icp in icps}
        result = {icp.get("risk_domain", "") for icp in icps}
        return "\r\n".join(result)

    @staticmethod
    def domains_filter(domains: list):
        """输出风险域名详情"""
        result = {domain.get("extra_details", "") for domain in domains}
        return "\r\n".join(result)

    @staticmethod
    def black_links_filter(black_links: list):
        """输出暗链风险地址下的所有恶意链接"""
        risk_links = []
        for item in black_links:
            links = item.get("links", [])
            risk_links.extend(links)
        return list(set(risk_links))

    @staticmethod
    def cryjack_filter(cryjack: list):
        """输出挖矿关键词：链接资源详情"""
        item_result = []
        for c in cryjack:
            detail = c.get("results", [])
            item_result.extend(detail)
        return item_result

    @staticmethod
    def cert_info_filter(addition: dict):
        if not addition:
            return ""
        cert_info = addition.get("cert_info", {})
        result = {
            "颁发者": cert_info.get("issuer", "暂无数据"),
            "开始时间": arrow.get(cert_info.get("not_valid_before")).format("YYYY-MM-DD HH:mm:ss") if cert_info.get("not_valid_before") else "暂无数据",
            "结束时间": arrow.get(cert_info.get("not_valid_after")).format("YYYY-MM-DD HH:mm:ss") if cert_info.get("not_valid_after") else "暂无数据",
        }
        return "\r\n".join([f"{key}:{value}" for key, value in result.items()])

    @staticmethod
    def range_time_filter(range_time_dict: dict):
        start_time = ExportDocxHandler.utc2asia_filter(range_time_dict.get("start_time"))
        end_time = ExportDocxHandler.utc2asia_filter(range_time_dict.get("end_time"))
        return f"{start_time}~{end_time}"

    @staticmethod
    def trans_hms_filter(seconds):
        m, s = divmod(seconds, 60)
        h, m = divmod(m, 60)
        return "%02d:%02d:%02d" % (h, m, s)

    @staticmethod
    def strtime_filter(strtime):
        return arrow.get(strtime).datetime.strftime('%Y/%m/%d %H:%M:%S')


class OneTaskExportMonitorDocxHandler(ExportMonitorDocxHandler):

    def context_vul(self, vul):
        """脆弱性监测数据, vul漏洞监测"""
        # 更新基本信息数据
        total_stat = self._get_base_stat(vul)

        # 获取监测变动详情
        all_warnings = []
        for vul_item in vul:
            all_warnings.extend(vul_item.get("result", {}).get("warnings", []))
        vul_details = self._get_vul_detail(warnings=all_warnings)
        total_stat.update(vul_details)
        # bar chart
        bar_data = [total_stat['warningCriticalVulCount'], total_stat['warningHighVulCount'], total_stat['warningMediumVulCount'], total_stat['warningLowVulCount']]
        bar_labels = ['严重', '高危', '中危', '低危']
        bar_charts = self.get_barcharts_image(
            x=bar_labels,
            y=bar_data,
            title="Web漏洞"
        )
        total_stat['barChart'] = InlineImage(self.tpl, bar_charts, width=Mm(93), height=Mm(66))
        return total_stat

    def context_ssl(self, ssl):
        """ssl安全监测"""
        total_stat = super(OneTaskExportMonitorDocxHandler, self).context_ssl(ssl)
        # bar chart
        bar_data = [
            [total_stat['warningCriticalProtocolCount'], total_stat['warningHighProtocolCount'],
             total_stat['warningMediumProtocolCount'], total_stat['warningLowProtocolCount']],
            [0, len(total_stat['certificateDetails']), 0, 0]
        ]
        bar_labels = ['严重', '高危', '中危', '低危']
        bar_charts = self.get_barcharts_image(
            x=bar_labels,
            y=bar_data,
            name=['SSL协议漏洞', 'SSL证书配置'],
            title="SSL安全数量",
            multi=True
        )
        total_stat['barChart'] = InlineImage(self.tpl, bar_charts, width=Mm(93), height=Mm(66))
        return total_stat

    def context_asset(self, asset):
        """资产变动监测数据"""
        this_task = asset[0] if asset else {}
        total_stat = super(OneTaskExportMonitorDocxHandler, self).context_asset(asset)

        # 查询上一次监测时间
        total_stat["lastTime"] = ""
        uid = this_task.get('uid', '')
        this_end_time = this_task.get('endTime', '')
        if uid and this_end_time:
            tmp_last_task = list(Task._get_collection().find(
                {"uid": uid, "taskType": "asset", "status": "completed", "endTime": {"$lt": this_end_time}},
                {"endTime": 1}).sort([("endTime", -1), ]).limit(1))
            last_task = tmp_last_task[0] if tmp_last_task else {}
            if last_time := last_task.get("endTime", ""):
                total_stat["lastTime"] = arrow.get(last_time).shift(hours=8).datetime
        return total_stat

    def context_ipv6(self, ipv6):
        if not ipv6:
            return {}
        dict_task = ipv6[0]
        is_support_ipv6 = dict_task.get('result', {}).get('addition', {}).get('is_support_ipv6', False)
        event = ScanEvents.objects.filter(task_session_id=dict_task.get('taskSessionId')).first()
        event_detail = event.detail if event else {}
        event_detail, total_score = parse_ipv6_event(event_detail)
        event_detail.setdefault('ipv6_website_support_check', {})

        ava_data = {'http_rate': -1}
        if not self.context.get('sourceIps'):
            ava_data = get_ipv6_ava_results(dict_task.get('uid'), dict_task.get('jobId'))

        total_stat = {
            'str_end_time': arrow.get(dict_task.get('endTime')).shift(hours=8).strftime(DATETIME_FMT_S),
            'name': dict_task.get('name'),
            'detail': event_detail,
            'is_support_ipv6': is_support_ipv6,
            'ava_data': {
                'total': ava_data.get('total', 0),
                'abnormal': ava_data.get('abnormal', 0),
                'http_rate': ava_data.get('availability', 0),
            },
            'has_http_structure_check': False,
            'has_http_content_check': False,
            'score': total_score,
        }

        for key in ('http_structure_check', 'http_content_check'):
            check_dict = event_detail.get(key, {})
            if int(check_dict.get('similarity', -1)) == 1:
                continue
            text = check_dict.get('text', "").splitlines(keepends=True)
            text_v6 = check_dict.get('text_v6', "").splitlines(keepends=True)
            # total_stat[f"{key}_count"] = len(list(difflib.Differ().compare(text, text_v6)))
            if text or text_v6:
                total_stat[f"has_{key}"] = True

        return total_stat


class ExportMonitorsDocxHandler(ExportMonitorDocxHandler):

    def __init__(self, base_template, single_template, **kwargs):
        super().__init__(base_template, base_context={}, multiple=True, **kwargs)
        self.single_template = single_template
        self.job_context_maps = dict()
        self.job_base_context_maps = dict()
        self.final_context_maps = dict()
        self.job_list = []
        self.start_time = ''
        self.end_time = ''
        self.task_session_ids = dict()
        self.task_start_times = dict()

    def get_job_task_type_mapping(self, user_id, report_type, job_id_list, start_date, end_date) -> dict:
        """
        获取三类["latest", "previous", "range"]，对应最新监测、上一次监测、时间范围监测三种筛选条件的任务id

        Returns: 任务类对应的任务数据:
        {"ssl": [mongoObject], "vul": [], "securityEvent": [], "http": [], "ping": [], "content": [], "asset": []}

        """

        normal_task_type_tuple = ('ssl', 'vul', 'securityEvent', 'http', 'ping', 'content')
        special_task_type_tuple = ("asset",)
        project = {
            "taskSettings": 0,
            "alertSettings": 0,
            "triggerType": 0,
            "isSpecialTask": 0,
            "progress": 0,
            "isLastTask": 0,
            "taskIdx": 0,
            "refType": 0,
            "refId": 0,
            "taskId": 0,
            "nextExecDate": 0,
            "result.addition.detail": 0
        }

        if start_date:
            self.start_time = datetime.strptime(start_date, DATETIME_FMT)
        if end_date:
            self.end_time = datetime.strptime(end_date, DATETIME_FMT)

        # 查询jobs
        job_ids = []
        job_list = Job.objects.filter(uid=user_id, id__in=job_id_list)
        now = datetime.now().strftime("%Y/%m/%d")
        default_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        for job in job_list:
            self.job_list.append({"note": job.note, "target": job.targetUrl})
            job_ids.append(job.id)
            self.job_base_context_maps[job.id] = {
                "host": job.target.host,
                "startTime": default_time,
                "endTime": default_time,
                "note": job.note,
                "target": job.targetUrl,
                "score": {'score': 0, 'level': '未知'},
                "targetStatusDisplay": "",
                "sourceIps": [],
                "reportDate": now,
                "http_node_data": {
                    "node_continuance": [],
                    "node_detail": [],
                },
                "ping_node_data": {
                    "node_continuance": [],
                    "node_detail": [],
                },
                "taskCountMap": {"web_sec_count": 0}
            }

            if start_date:
                self.job_base_context_maps[job.id]['startTime'] = self.start_time
            if end_date:
                self.job_base_context_maps[job.id]['endTime'] = self.end_time

        # 最近监测的一般字段为【任务类型】+ResultId
        if report_type in ["latest", ]:
            task_type_id_maps, tasks = self.get_tasks_by_report_type(job_list, normal_task_type_tuple, project)
            for job_id, task_type_ids in task_type_id_maps.items():
                self.job_base_context_maps[job_id]['http_node_data'] = self.get_http_node_data(task_type_ids, job_id)
                self.job_base_context_maps[job_id]['ping_node_data'] = self.get_ping_node_data(task_type_ids, job_id)
        # 如果是按照时间范围筛选，则直接查询监测结束时间的时间范围任务
        else:
            start_date = datetime.strptime(start_date, DATETIME_FMT)
            end_date = datetime.strptime(end_date, DATETIME_FMT)
            query_start_date = start_date.astimezone(pytz.timezone('Asia/Shanghai'))
            query_end_date = end_date.astimezone(pytz.timezone('Asia/Shanghai'))
            tasks = self.get_task_by_datetime(project, job_ids, query_start_date, query_end_date)

            for job_id in job_ids:
                self.job_base_context_maps[job_id]['http_node_data'] = self.get_http_node_data([], job_id,
                                                                                               query_start_date,
                                                                                               query_end_date)
                self.job_base_context_maps[job_id]['ping_node_data'] = self.get_ping_node_data([], job_id,
                                                                                               query_start_date,
                                                                                               query_end_date)

        job_task_maps = dict()
        only_http_maps = {_job_id: True for _job_id in job_id_list}
        for task in tasks:
            if only_http_maps.get(task.get('jobId')) and task.get('taskType') != 'http':
                only_http_maps[task.get('jobId')] = False
            job_task_maps.setdefault(task.get('jobId'), []).append(task)
            if (task.get("taskType") in ["content", "securityEvent", "ssl", "vul"]
                    and (task_session_id := task.get("taskSessionId"))):
                self.task_session_ids.setdefault(task.get("jobId"), []).append(task_session_id)
            if (task.get("taskType") in ["content", "securityEvent", "ssl", "vul"]
                    and (start_time := task.get("startTime"))):
                self.task_start_times.setdefault(task.get("jobId"), []).append(start_time)

        all_task_type = normal_task_type_tuple + special_task_type_tuple
        for job_id, tasks in job_task_maps.items():
            task_type_mapping = {_type: [] for _type in all_task_type}
            for task in tasks:
                if task.get('taskType') not in all_task_type:  # fix websoc_6851 问题
                    continue
                task_type_mapping[task.get("taskType")].append(task)
            self.job_context_maps[job_id] = task_type_mapping

            # 获取每个资产的分数
            score, last_status = self.get_monitor_score(task_type_mapping)
            self.job_base_context_maps[job_id]['score'] = score
            self.job_base_context_maps[job_id]['targetStatusDisplay'] = TASK_TARGET_STATUS_MAP.get(last_status, "")
            self.job_base_context_maps[job_id]['sourceIps'] = self.get_source_ips(task_type_mapping)
            self.job_base_context_maps[job_id]['taskCountMap'] = self.get_job_task_count_map(task_type_mapping)

            # 更新监测时间
            if report_type in ["latest", ]:
                start, end = self.get_monitor_times(task_type_mapping)
                _start = datetime.strptime(datetime.strftime(start, DATETIME_FMT), DATETIME_FMT)
                _end = datetime.strptime(datetime.strftime(end, DATETIME_FMT), DATETIME_FMT)
                if not self.start_time or self.start_time > _start:
                    self.start_time = _start
                if not self.end_time or self.end_time < _end:
                    self.end_time = _end

                self.job_base_context_maps[job_id]['startTime'] = start
                self.job_base_context_maps[job_id]['endTime'] = end
        for job_id, bool_status in only_http_maps.items():
            if not bool_status:
                continue
            if sum([v for v in self.job_base_context_maps[job_id]['score'].values() if isinstance(v, int)]) == 0:
                self.job_base_context_maps[job_id]['score']['level'] = '未知'
        return self.job_context_maps

    def get_tasks_by_report_type(self, job_list, normal_task_type_tuple, project):
        task_type_ids = []
        asset_id_list = []
        task_type_id_maps = dict()
        # 获取除asset之外监测类型的其他监测结果
        for job in job_list:
            asset_id_list.append(job.assetId)
            for task_type in normal_task_type_tuple:
                task_id = job.to_mongo().get(f"{task_type}ResultId")
                if isinstance(task_id, ObjectId):
                    task_type_ids.append(task_id)
                    task_type_id_maps.setdefault(job.id, list()).append(task_id)
                elif isinstance(task_id, list):
                    if task_id and task_id[0]:
                        task_type_ids.extend(task_id)
                        task_type_id_maps.setdefault(job.id, list()).extend(task_id)

        # 单独获取asset的上一次任务id,逻辑与其他类型不一致
        asset_list = Asset.objects.filter(id__in=asset_id_list)
        for asset in asset_list:
            asset_task_id = asset.to_mongo().get("resultId")
            if asset_task_id:
                task_type_ids.append(asset_task_id)
                task_type_id_maps.setdefault(asset.jobId, list()).append(asset_task_id)

        query = {"_id": {"$in": task_type_ids}, 'status': 'completed'}
        tasks = Task._get_collection().find(query, project)
        return task_type_id_maps, tasks

    def get_task_by_datetime(self, project, job_id_list, start_date, end_date):
        query = {
            "jobId": {"$in": job_id_list},
            "status": "completed",
            "startTime": {
                "$gte": start_date,
            },
            "endTime": {
                "$lte": end_date,
            }
        }
        tasks = Task._get_collection().find(query, project)
        return tasks

    def get_monitor_score(self, task_type_mapping):
        # 获取每个资产的分数
        score_types = ['vul', 'ssl', 'securityEvent', 'content']
        all_warnings = []
        target_status_list = []
        for score_type in score_types:
            type_data = task_type_mapping.get(score_type)
            for data in type_data:
                this_warnings = data.get("result", {}).get("warnings", [])
                target_status_list.append({
                    "end_time": data.get("endTime", datetime.now()),
                    "target_status": data.get("result", {}).get("targetStatus", {}).get("status", "good")
                })
                if score_type == 'content':
                    for warning in this_warnings:
                        warning['level'] = 2
                        all_warnings.append(warning)
                else:
                    all_warnings.extend(this_warnings)
        score_dict = Score.score_info(event_map={}, warnings=all_warnings)
        if "good" not in [t["target_status"] for t in target_status_list] and not all_warnings:
            score_dict.update({"level": "未知"})
        last_status = "good"
        if target_status_list:
            last_status = list(sorted(target_status_list, key=lambda t: t["end_time"]))[-1]["target_status"]
        return score_dict, last_status

    def get_source_ips(self, task_type_mapping):
        source_ip_list = []
        for task_list in task_type_mapping.values():
            for task in task_list:
                if _source_ip := task.get('sourceIp'):
                    source_ip_list.append(_source_ip)
        return list(set(source_ip_list))

    def get_job_task_count_map(self, task_type_mapping):
        res = {"web_sec_count": 0}
        for t, task_list in task_type_mapping.items():
            task_count = len(task_list)
            if t in ['vul', 'ssl', 'securityEvent', 'content']:
                res["web_sec_count"] += task_count
            res.update({t: task_count})
        return res

    def get_monitor_times(self, task_type_mapping):
        # 更新监测时间
        times = []
        for _, tasks in task_type_mapping.items():
            for task in tasks:
                if start_time := task.get("startTime"):
                    times.append(start_time)
                if end_time := task.get('endTime'):
                    times.append(end_time)
        start = arrow.get(min(times, default=datetime.utcnow())).shift(hours=8).datetime
        end = arrow.get(max(times, default=datetime.utcnow())).shift(hours=8).datetime
        return start, end

    def get_http_node_data(self, task_type_ids, job_id, query_start_date=None, query_end_date=None):
        http_error_node_continuance = self.aggregate_http_ping_node_error_continuance(task_type_ids, job_id,
                                                                                      query_start_date, query_end_date,
                                                                                      task_type="http")
        http_node_detail = self.aggregate_error_node_detail(task_type_ids, job_id, query_start_date, query_end_date,
                                                            task_type="http")
        return {
            "node_continuance": http_error_node_continuance,
            "node_detail": http_node_detail
        }

    def get_ping_node_data(self, task_type_ids, job_id, query_start_date=None, query_end_date=None):
        ping_error_node_continuance = self.aggregate_http_ping_node_error_continuance(task_type_ids, job_id,
                                                                                      query_start_date, query_end_date,
                                                                                      task_type="ping")
        ping_node_detail = self.aggregate_error_node_detail(task_type_ids, job_id, query_start_date, query_end_date,
                                                            task_type="ping")
        return {
            "node_continuance": ping_error_node_continuance,
            "node_detail": ping_node_detail
        }

    def aggregate_http_ping_node_error_continuance(self, task_type_ids, job_id, query_start_date=None,
                                                   query_end_date=None, task_type=None):

        # 在mongodb服务器中执行的$function js语句，用于计算数组中异常节点持续时间
        js_code = code.Code(
            """
      function get_node_error_time (node_time_list) {
          const error_list = []
          let start_index = -1
          let end_index = -1
          let end_time = null
          let start_time = null
          const final_elem_index = node_time_list.length - 1
          node_time_list.sort((a, b) => {
            return new Date(b.monitor_time) - new Date(a.monitor_time)
          }).reverse().map((item, index) => {
            const has_error = item.securityStatus === "warning"

            if (has_error && start_index === -1) {

              start_index = index
              start_time = item.monitor_time
            }
            if (!has_error && start_index !== -1) {
              end_index = index
            } else if (has_error && start_index !== -1 && index === final_elem_index) {
              end_index = index
            }
            if (end_index !== -1) {
              end_time = item.monitor_time
            }

            if (start_time && end_time) {

              error_item = {
                area: item.area,
                ip: item.ip,
                avgTime: (node_time_list.slice(start_index, end_index).reduce((total, i) => {
                  total += Number(i.totalTime)
                  return total
                }, 0) / (end_index - start_index)).toFixed(2),
                continuanceTime: (end_time - start_time) / 1000,
                timeRange: { start_time, end_time }
              }
              start_index = -1
              end_index = -1
              start_time = null
              end_time = null
              error_list.push(error_item)
            }
          })
          return error_list
        }
            """
        )
        if task_type_ids:
            match = {
                "$match": {
                    "_id": {"$in": task_type_ids}
                }
            }
        else:
            match = {
                "$match": {
                    "jobId": job_id,
                    "taskType": task_type,
                    "startTime": {
                        "$gte": query_start_date,
                        "$lte": query_end_date
                    }
                }
            }

        pipeline = [
            match,
            {
                "$project": {
                    "result.addition.detail.area": 1,
                    "result.addition.detail.securityStatus": 1,
                    "result.addition.detail.ip": 1,
                    "result.addition.detail.totalTime": 1,
                    "result.addition.detail.monitor_time": "$startTime",
                    "result.addition.detail.target": "$target"

                }
            },
            {
                "$unwind": "$result.addition.detail"
            },
            {
                "$group": {
                    # "_id": "$result.addition.detail.area",
                    "_id": {"area": "$result.addition.detail.area", "target": "$result.addition.detail.target"},
                    "additions": {
                        "$push": "$result.addition.detail"
                    }
                }
            },
            {
                "$project": {
                    "result": {
                        "$function": {
                            "body": code.Code(js_code),
                            "args": ["$additions"],
                            "lang": "js"
                        }
                    }
                }
            }
        ]

        # result = Task.objects.aggregate(pipeline)
        result = Task._get_collection().aggregate(pipeline)
        return list(result)

    def aggregate_error_node_detail(self, task_type_ids, job_id, query_start_date=None, query_end_date=None,
                                    task_type=None):
        if task_type_ids:
            match = {
                "$match": {
                    "_id": {"$in": task_type_ids}
                }
            }
        else:
            match = {
                "$match": {
                    "jobId": job_id,
                    "taskType": task_type,
                    "startTime": {
                        "$gte": query_start_date,
                        "$lte": query_end_date
                    }
                }
            }
        pipeline = [
            match,
            {
                "$project": {
                    "result.addition.detail.area": 1,
                    "result.addition.detail.securityStatus": 1,
                    "result.addition.detail.ip": 1,
                    "result.addition.detail.totalTime": 1,
                    "result.addition.detail.has_error": 1,
                    "result.addition.detail.monitor_time": "$startTime",
                    "result.addition.detail.target": "$target",
                }
            },
            {
                "$unwind": "$result.addition.detail"
            },
            {
                "$match": {
                    "result.addition.detail.securityStatus": "warning"
                }
            },
            {
                "$group": {
                    "_id": {
                        "target": "$result.addition.detail.target",
                        "area": "$result.addition.detail.area"
                    },
                    "additions": {
                        "$push": "$result.addition.detail"
                    }
                }
            }
        ]
        result = Task._get_collection().aggregate(pipeline)
        return result

    # 对所有传入的数据进行对应处理函数的调用，把处理后的context更新到最终渲染用的context内
    def make_job_contexts(self, download_type):
        """根据数据处理得到渲染模板用context映射
        将数据按处理模块名更新到context
        exp:
            context_keyword_inttrend_data方法处理后的数据更为{"keyword_inttrend_data": final_data}
        """
        # 检查kwargs参数即类属性，如果包含context函数可处理的数据字段，则进行数据处理并更新到context
        context_method_maps = {
            "ssl": "context_ssl",
            "vul": "context_vul",
            "securityEvent": "context_securityEvent",
            "http": "context_http",
            "ping": "context_ping",
            "content": "context_content",
            "asset": "context_asset",
        }

        for job_id, base_context in self.job_base_context_maps.items():
            self.context = base_context
            if download_type == '1' or len(self.job_list) == 1:
                tpl = DocxTemplate(self.single_template)
                self.tpl = tpl
                base_context['self_tpl'] = tpl
            for key, context_method in context_method_maps.items():
                if hasattr(self, context_method):
                    data = self.job_context_maps.get(job_id, {}).get(key, [])
                    # 调用context_[key]方法处理关键词参数传递的各类数据
                    this_context = getattr(self, context_method)(data)
                    # 将处理结果更新到最终的context中
                    self.context.update({key: this_context})
                    base_context.update({key: this_context})

    def make_job_contexts_summary(self):
        for job_id, context in self.job_base_context_maps.items():
            task_count = 0
            warning_count = 0
            alert_count = 0
            for item_name, detail in context.items():
                if isinstance(detail, dict):
                    task_count += detail.get("taskCount", 0)
                    warning_count += detail.get("warningCount", 0)
                    alert_count += detail.get("alertCount", 0)

                    if item_name in ("vul", "ssl", "securityEvent", "content", "asset"):
                        if 'monitorUrlsCount' not in detail:
                            detail['monitorUrlsCount'] = 0
            scan_count = 0
            monitor_urls_count = 0
            if task_session_ids := self.get_task_session_ids(job_id):
                scan_count = get_monitor_urls(task_session_id=task_session_ids, is_scan_count=True)
                monitor_urls_count = len(get_monitor_urls(task_session_id=task_session_ids))
            context["overview"] = {
                "taskCount": task_count,
                "warningCount": warning_count,
                "alertCount": alert_count,
                "monitorUrlsCount": monitor_urls_count,
                "scanCount": scan_count,
                "is_display_url": self.is_display_url(job_id)
            }

    def get_monitor_summary(self, job_base_context_maps):
        summary = {
            "assetCount": 0,
            "taskCount": 0,
            "warningVulCount": 0,
            "warningProtocolCount": 0,
            "warningSslAllCount": 0,
            "warningSecurityCount": 0,
            "warningContentCount": 0,
            "httpTaskCount": 0,
            "pingTaskCount": 0,
            "errorHttpTaskCount": 0,
            "errorPingTaskCount": 0,
            "errorAssetCount": 0,
            "assetTaskCount": 0,
            "assetHasWarningCount": 0,
            "monitorUrlsCount": 0,
            "warningPercent": 0,
            "level": 0,
        }
        vul_type_maps = dict()
        vul_level_chart = {
            "严重": 0,
            "高危": 0,
            "中危": 0,
            "低危": 0,
        }
        security_event_type_maps = {
            "暗链": 0,
            "挖矿": 0,
            "挂马": 0,
            "坏链": 0,
            "风险外链": 0,
        }
        for job_id, context in job_base_context_maps.items():
            summary['assetCount'] += 1
            summary['taskCount'] += context["overview"].get('taskCount', 0)
            summary['warningVulCount'] += context["vul"].get('warningVulCount', 0)
            summary['warningProtocolCount'] += context["ssl"].get('warningProtocolCount', 0)
            summary['warningSslAllCount'] += context["ssl"].get('warningSslAllCount', 0)
            summary['warningSecurityCount'] += context["securityEvent"].get('warningSecurityCount', 0)
            summary['warningContentCount'] += context["content"].get('warningContentCount', 0)
            summary['httpTaskCount'] += context["http"].get('taskCount', 0)
            summary['pingTaskCount'] += context["ping"].get('taskCount', 0)
            summary['errorHttpTaskCount'] += context["http"].get('errorTaskCount', 0)
            summary['errorPingTaskCount'] += context["ping"].get('errorTaskCount', 0)

            if context["http"].get('errorTaskCount', 0) or context["ping"].get('errorTaskCount', 0):
                summary['errorAssetCount'] += 1

            summary['assetTaskCount'] += context["asset"].get('taskCount', 0)
            summary['assetHasWarningCount'] += context["asset"].get('hasWarningCount', 0)
            summary['monitorUrlsCount'] += context["overview"].get('monitorUrlsCount', 0)

            vul_level_chart["严重"] += context["vul"].get('warningCriticalVulCount', 0)
            vul_level_chart["高危"] += context["vul"].get('warningHighVulCount', 0)
            vul_level_chart["中危"] += context["vul"].get('warningMediumVulCount', 0)
            vul_level_chart["低危"] += context["vul"].get('warningLowVulCount', 0)

            for k, v in context["vul"].get("categoryDetails", {}).items():
                for lv in v.get('level_list', []):
                    if lv == 3:
                        if k in vul_type_maps:
                            vul_type_maps[k] += v.get('count', 0)
                        else:
                            vul_type_maps[k] = v.get('count', 0)

            for x in context["securityEvent"].get("titleCountDetails", []):
                title = x.get('title', '')
                if title and title in security_event_type_maps:
                    security_event_type_maps[title] += x.get('count', 0)

        summary['warningPercent'] = round(summary['assetHasWarningCount']/summary['assetCount']*100, 2) if summary['assetCount'] else 0
        return summary, vul_type_maps, vul_level_chart, security_event_type_maps

    def make_charts(self, security_event_type_maps, vul_level_chart, vul_type_maps):
        chart_maps = {
            "security_event_chart": "",
            "vul_level_chart": "",
            "vul_type_chart": "",
        }
        if security_event_type_maps.get('暗链', 0) or security_event_type_maps.get('挖矿', 0) or \
                security_event_type_maps.get('挂马', 0) or security_event_type_maps.get('坏链', 0) or \
                security_event_type_maps.get('风险外链', 0):
            color_list = ['rgb(115, 160, 250)', 'rgb(255, 171, 103)', 'rgb(117, 133, 162)', 'rgb(247, 199, 57)',
                          'rgb(235, 126, 101)']
            label_list = ["{}: {}个".format(k, v) for k, v in security_event_type_maps.items()]
            security_event_chart_image = self.get_piecahrts_image(labels=label_list,
                                                                  values=list(security_event_type_maps.values()),
                                                                  title="安全事件结果统计", hole=0, color_list=color_list)
            chart_maps['security_event_chart'] = InlineImage(self.tpl, security_event_chart_image, width=Mm(132),
                                                             height=Mm(93))

        if vul_level_chart.get("严重", 0) or vul_level_chart.get("高危", 0) or vul_level_chart.get("中危", 0) or vul_level_chart.get("低危", 0):
            color_list = ['rgb(250, 71, 10)', 'rgb(235, 126, 101)', 'rgb(247, 199, 57)', 'rgb(255, 171, 103)']
            label_list = ["{}: {}个".format(k, v) for k, v in vul_level_chart.items()]
            vul_level_chart_image = self.get_piecahrts_image(labels=label_list,
                                                             values=list(vul_level_chart.values()),
                                                             title="Web漏洞风险等级分布", hole=0, color_list=color_list)
            chart_maps['vul_level_chart'] = InlineImage(self.tpl, vul_level_chart_image, width=Mm(132), height=Mm(93))

        if vul_type_maps:
            vul_type_chart_image = self.get_tiaoxin_barcharts_image(y=list(vul_type_maps.keys()),
                                                                    x=list(vul_type_maps.values()),
                                                                    title="高危Web漏洞类型分布")
            chart_maps['vul_type_chart'] = InlineImage(self.tpl, vul_type_chart_image, width=Mm(132), height=Mm(93))
        return chart_maps

    def make_final_context(self, download_type):
        final_context_maps = dict()
        if download_type == '0' and len(self.job_list) > 1:
            summary, vul_type_maps, vul_level_chart, security_event_type_maps = \
                self.get_monitor_summary(self.job_base_context_maps)
            final_context_maps["all"] = {
                "download_type": download_type,
                "summary": summary,
                "charts": self.make_charts(security_event_type_maps, vul_level_chart, vul_type_maps),
                "vul_type_maps": vul_type_maps,
                "vul_level_chart": vul_level_chart,
                "security_event_type_maps": security_event_type_maps,
            }

        self.final_context_maps = final_context_maps
        return final_context_maps

    def monitors_export(self, download_type, task_id):
        now = datetime.now().strftime("%Y/%m/%d")
        if download_type == '1':
            if not os.path.exists(REPORT_FILE_DIR):
                os.mkdir(REPORT_FILE_DIR)

            file_path_list = []
            file_dir = os.path.join(REPORT_FILE_DIR, task_id)
            os.mkdir(file_dir)

            exist_file_set = set()
            for job_id, context in self.job_base_context_maps.items():
                note = context.get('note').replace(':', '').replace('/', '_')
                if note in exist_file_set:
                    target = context.get('target', '').split('://')[-1].strip('/').replace('/', '')
                    note = "{}{}".format(note, target)
                    if note in exist_file_set:
                        continue
                    else:
                        exist_file_set.add(note)
                else:
                    exist_file_set.add(note)

                filename = "[{}]ScanV安全监测报告.docx".format(note)
                file_path = os.path.join(file_dir, filename)
                self.write_dist(context['self_tpl'], file_path, context)
                file_path_list.append(file_path)

            if len(self.job_list) > 1:
                filename = '[{}等多个资产]ScanV安全监测报告.zip'.format(self.job_list[0]['note'])
            else:
                filename = '[{}]ScanV安全监测报告.zip'.format(self.job_list[0]['note'])
            CeleryTask.objects(pk=task_id).update(result__filename=filename)

            file_io = self.export_zip(file_path_list)
            shutil.rmtree(file_dir)
            if os.path.exists(file_dir):
                os.remove(file_dir)
            return file_io
        elif len(self.job_list) == 1:
            for job_id, context in self.job_base_context_maps.items():
                filename = '[{}]ScanV安全监测报告.docx'.format(self.job_list[0]['note'])
                CeleryTask.objects(pk=task_id).update(result__filename=filename)
                return self.export_word(context)
        else:
            final_context_map = self.final_context_maps["all"]

            default_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            if not self.start_time:
                self.start_time = default_time
            if not self.end_time:
                self.end_time = default_time

            data = {
                "total": final_context_map.get("summary"),
                "context": self.job_base_context_maps,
                "job_list": self.job_list,
                "reportDate": now,
                "charts": final_context_map.get("charts"),
                "startTime": self.start_time,
                "endTime": self.end_time,
            }

            if len(self.job_list) > 1:
                filename = '[{}等多个资产汇总]ScanV安全监测报告.docx'.format(self.job_list[0]['note'])
            else:
                filename = '[{}]ScanV安全监测报告.docx'.format(self.job_list[0]['note'])

            CeleryTask.objects(pk=task_id).update(result__filename=filename)

            return self.export_word(data)

    def write_dist(self, tpl, file_path, context):
        # 写入磁盘
        tpl.render(context=context, jinja_env=self.jinja_env, autoescape=True)  # 请求内包含
        tpl.save(file_path)

    @staticmethod
    def export_zip(file_list):
        stream = io.BytesIO()
        zfile = zipfile.ZipFile(stream, 'w', zipfile.ZIP_DEFLATED, allowZip64=False)
        for file in file_list:
            if os.path.isfile(file):
                zfile.write(file, os.path.basename(file))
        zfile.close()
        stream.seek(0)
        return stream

    def export_word(self, context):
        """将模板和数据映射导出为word文档"""
        """autoescape: True
        word中包含此种脚本导致word标签异常:'textinputs[0] = decodeURIComponent("'");alert(45767499);</script>'
        """
        self.tpl.render(context=context, jinja_env=self.jinja_env, autoescape=True)  # 请求内包含
        # word保存为bytesio
        file_io = BytesIO()
        self.tpl.save(file_io)
        file_io.seek(0)
        return file_io
