import base64
import os
import datetime

import arrow
import jinja2
import json
import plotly.graph_objects as go

from io import BytesIO
from itertools import groupby
from docxtpl import DocxTemplate, InlineImage
from docx.shared import Mm
from docx import Document
from docx.oxml import OxmlElement
from docx.oxml.ns import qn
from bisect import bisect_left
from django.conf import settings

from utilities.redis import captcha_redis as redis
from models.strategy import SystemProfileTemplate, CustomProfileTemplate
from models.kb import KbVuln
from apps.vul.views import vul_unique

from wass.display import LANGUAGE_CODE, CHANGE_CHECK_TYPE_MAP
from apps.strategy.display import KEYWORD_TYPE_NAME, ScanProfile_NAME_MAP
from wass.display import (
    SecurityEventPlugins_NAME_MAP,
    SEVERITY2_NAME_MAP,
    availabilityMap_NAME_MAP,
    CrawlerScope_NAME_MAP,
    Modules_NAME_MAP,
    Picture,
)
from wass.settings import TZ_INFO
from utilities.enums import PictureName, AvailabilityNormalStatus
from utilities.utils import parse_malscan
from utilities.event_parser import AvailabilityParser
from utilities.target import check_availability_is_normal

dir_name = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))

SeverityMap = SEVERITY2_NAME_MAP.get(LANGUAGE_CODE, {})

securityEventMap = SecurityEventPlugins_NAME_MAP.get(LANGUAGE_CODE, {})

KeywordType = KEYWORD_TYPE_NAME.get(LANGUAGE_CODE, {})

availabilityMap = availabilityMap_NAME_MAP.get(LANGUAGE_CODE, {})

ScanProfile = ScanProfile_NAME_MAP.get(LANGUAGE_CODE, {})

CrawlerScope = CrawlerScope_NAME_MAP.get(LANGUAGE_CODE, {})

modules_map = Modules_NAME_MAP.get(LANGUAGE_CODE, {})

safe_http_methods = ["HEAD", "GET", "OPTIONS"]

risk_http_methods = ["TRACE", "POST", "PUT", "DELETE"]


def get_kb_value():
    """漏洞的键值对
    数据量大 不经常变更 走缓存

    优先缓存 ==》 数据库
    """
    key = 'report:kbval'
    try:
        b_data = redis.get(key)
        if b_data:
            return json.loads(b_data)
    except Exception:
        pass
    kb_map = {
        kb.key: kb.name.get('zh_cn', '')
        for kb in KbVuln.objects.find({'_id': {'$in': vul_unique()}})
    }
    s_data = json.dumps(kb_map)
    redis.set(key, s_data, ex=24 * 3600)  # 一天过期时间
    return kb_map


class Context:

    # 综述，可用性，安全事件，漏洞，信息，附录
    Fields = [
        'available',
        'securityEvent',
        'vul',
        'risk_link',
        'info',
        # 'appendix',
        'summary',
        'change_check',
    ]

    RISK_TYPE_TO_SCORE = {
        1: 0,
        2: 10,
        3: 60,
    }

    def __init__(self, data, tpl="files/tpl.docx"):
        self.tpl = DocxTemplate(os.path.join(dir_name, tpl))
        self.data = data
        self.jinja_env = jinja2.Environment()
        self.register_jinja_env()

    def register_jinja_env(self):
        self.jinja_env.filters['severity_name'] = self.severity_name_filter
        self.jinja_env.filters['keyword_name'] = lambda x: KeywordType.get(
            str(int(x)), '其他'
        )
        self.jinja_env.filters['scan_profile'] = lambda x: ' '.join(
            ScanProfile[i] for i in x if ScanProfile.get(i)
        )
        self.jinja_env.filters['crawler_scope'] = lambda x: CrawlerScope.get(x)

    def severity_name_filter(self, name):
        return SeverityMap.get(name)

    def get_piecahrts_image(self, labels, values, title=None):
        """
        根据标签和数据生成饼图
        Args:
            labels: 标签序列
            values: 数据序列
            title: 标题

        Returns:饼图IO对象

        """
        fig = go.Figure(
            data=[
                go.Pie(
                    labels=labels,
                    values=values,
                    hole=0.5,
                    textinfo='label+percent',
                    textposition='inside',
                )
            ]
        )
        fig.update_traces(
            title=title,
            title_font_size=12,
            title_position="top left",
            selector=dict(type='pie'),
        )
        image_io = BytesIO()
        fig.write_image(image_io, format="jpeg", scale=2)
        return image_io

    def context_config(self):
        """扫描监听配置"""
        config = self.data['job_config_map']
        config = {
            'cloud': config['availability'].get('use_cloud', False),  # 是否开启云监测,
            **{item: True for item in config['availability']['include_plugins']},
        }
        results = {'config': config}
        return results

    def context_info(self):
        """站点信息
        业务系统信息
        HTTP方法
        对外开放服务
        子域名信息
        IPv4/IPv6状态
        操作系统信息
        Web应用指纹
        """
        _events = self.data['task_events'].get('target_info', {}).get('events', [])
        if _events:
            detail = _events[0].get('detail', {})
        else:
            detail = {}

        alexa = detail.get('alexa', '暂无数据')
        if alexa != '暂无数据':
            alexa = "{}".format(alexa)

        system = {
            'title': detail.get('basic_info', {}).get('title', ''),
            'city_name': detail.get('ipdb', {}).get('city_name', '') or '暂无数据',
            'ip': detail.get('ipdb', {}).get('ip') or detail.get('port', {}).get('ip'),
            'icp': detail.get('icp', {}).get('desc', '暂无数据'),
            'alexa': alexa,
        }
        hmethod = [item for item in detail.get('http_method', '').split(',') if item]
        service = [
            {
                'port': int(item.get('port', -1)),
                'product': item.get('product', ''),
                'version': item.get('version', ''),
                'name': item.get('name', ''),
            }
            for item in detail.get('port', {}).get('data', [])
        ]
        subdomain = [
            {'subdomain': item['subdomain'], 'ip': item['ip']}
            for item in detail.get('subdomain', [])
        ]
        opsystem = {}
        web = detail.get('waf_list', [])
        return {
            'system': system,
            'hmethod': hmethod,
            'safe_http_methods': safe_http_methods,
            'risk_http_methods': risk_http_methods,
            'service': service,
            'subdomain': subdomain,
            'opsystem': opsystem,
            'web': web,
        }

    def context_appendix(self):
        """附录信息"""
        modules_val_map = {}
        for module, cn in modules_map.items():
            if module in self.data.get('job_config_map', {}):
                if module == 'securityEvent':
                    plugins = self.data['job_config_map']['securityEvent'].get(
                        'include_plugins', []
                    )
                    plugins_cn = [
                        securityEventMap.get(i)
                        for i in plugins
                        if securityEventMap.get(i)
                    ]
                    text = cn if not plugins_cn else f'{cn}({",".join(plugins_cn)})'
                elif module == 'availability':
                    plugins = self.data['job_config_map']['availability'].get(
                        'include_plugins', []
                    )
                    plugins_cn = [
                        availabilityMap.get(i)
                        for i in plugins
                        if availabilityMap.get(i)
                    ]
                    text = cn if not plugins_cn else f'{cn}({",".join(plugins_cn)})'
                elif module == 'vul':
                    kb_map = get_kb_value()
                    system_template = self.data['job_config_map']['vul'].get(
                        'scan_profiles', []
                    )
                    custom_template = self.data['job_config_map']['vul'].get(
                        'scan_custom_profiles', []
                    )
                    if not (system_template or custom_template):  # 未指定模板 默认展示所有漏洞
                        text = f'{cn} ({",".join(kb_map.values())})'
                    else:
                        plugins_en = []
                        for item in SystemProfileTemplate.objects.filter(
                            value__in=system_template
                        ):  # 策略和自定义策略都可以多选
                            plugins_en.extend(item.plugin_list)
                        for item in CustomProfileTemplate.objects.filter(
                            custom_profile_template_id__in=custom_template
                        ):
                            plugins_en.extend(item.plugin_list)
                        # 不同的模板可能有重复的漏洞 去重复展示
                        plugins_cn = []
                        for item in plugins_en:
                            if kb_map.get(item) and kb_map.get(item) not in plugins_cn:
                                plugins_cn.append(kb_map[item])
                        text = f'{cn} ({",".join(plugins_cn)})'
                else:
                    text = cn
                modules_val_map[module] = text

        config = {
            'available': {
                'started_at': self.data['task_events']
                .get('availability', {})
                .get('created_at'),
                'updated_at': self.data['task_events']
                .get('availability', {})
                .get('updated_at'),
            },
            'securityEvent': {
                'started_at': self.data['task_events']
                .get('securityEvent', {})
                .get('created_at'),
                'updated_at': self.data['task_events']
                .get('securityEvent', {})
                .get('updated_at'),
                'crawler_max_page': self.data['job_config_map']
                .get('securityEvent', {})
                .get('crawler_max_page'),
                'crawler_depth': self.data['job_config_map']
                .get('securityEvent', {})
                .get('crawler_depth'),
                'crawler_scope': self.data['job_config_map']
                .get('securityEvent', {})
                .get('crawler_scope'),  # 爬取范围
            },
            'vul': {
                'crawler_max_page': self.data['job_config_map']
                .get('vul', {})
                .get('crawler_max_page'),
                'crawler_depth': self.data['job_config_map']
                .get('vul', {})
                .get('crawler_depth'),
                'scan_profile': self.data['job_config_map']
                .get('vul', {})
                .get('scan_profiles', []),
                'started_at': self.data['task_events'].get('vul', {}).get('created_at'),
                'updated_at': self.data['task_events'].get('vul', {}).get('updated_at'),
            },
            'risk_link': {
                'started_at': self.data['task_events']
                .get('risk_link', {})
                .get('created_at'),
                'updated_at': self.data['task_events']
                .get('risk_link', {})
                .get('updated_at'),
                'crawler_max_page': self.data['job_config_map']
                .get('risk_link', {})
                .get('crawler_max_page'),
                'crawler_depth': self.data['job_config_map']
                .get('risk_link', {})
                .get('crawler_depth'),
            },
            'requests_per_sec': self.data['advance_config'].get('requests_per_sec'),
            'request_timeout': self.data['advance_config'].get('request_timeout'),
            'modules': modules_val_map,
        }
        return config

    def context_available(self):
        """可用性报表
        本地5次
        云端 最近一次
        """

        events = self.data['task_events'].get('availability', {}).get('events', [])
        plugins = self.data['job_config_map']['availability'].get('include_plugins', [])
        is_check = False
        if 'http_get' in plugins:
            is_check = True
        local_events = [
            {
                'date': (
                    i.get('updated_at', datetime.datetime.utcnow())
                    + datetime.timedelta(hours=8)
                ).strftime('%Y-%m-%d %H:%M:%S'),
                'ping': i['detail'].get('ping'),
                'dns': i['detail'].get('dns', {}).get('time', 0) if is_check else '-',
                'http_get_full_time': i['detail']
                .get('http_get_full_time', {})
                .get('time', 0)
                if is_check
                else '-',
                'http_get': i['detail'].get('http_get', {}).get('time', 0)
                if is_check
                else '-',
                'status': int(
                    i['detail'].get('http_get_full_time', {}).get('status', 0)
                )
                if is_check
                else '-',
            }
            for i in events
            if i.get('http_type') == 'local'
        ]
        cloud_events = [e for e in events if e.get('http_type') == 'cloud']

        # 本地异常情况
        e_local = []
        if is_check:
            func_list = ['http_get_full_time']
            parser = AvailabilityParser()
            for event in events:
                if event.get('http_type') != 'local':
                    continue
                detail = event.get('detail', {})
                normal_status = AvailabilityNormalStatus.default.value
                for k, v in detail.items():
                    if k not in func_list:
                        continue
                    func = getattr(parser, f"check_{k}", None)
                    if func:
                        normal_status = func(detail)

                if normal_status == AvailabilityNormalStatus.abnormal.value:
                    date = (
                        event.get('updated_at', datetime.datetime.utcnow())
                        + datetime.timedelta(hours=8)
                    ).strftime('%Y-%m-%d %H:%M:%S')
                    e_local.append(date)

        cloud_latest_event = cloud_events[0] if cloud_events else {}
        detail_pings = {
            item['area']: item['resp_time']
            for item in cloud_latest_event.get('detail', {})
            .get('cloud', {})
            .get('detail_ping', [])
        }
        cloud_event_details = {}
        e_error_nodes = []
        for item in (
            cloud_latest_event.get('detail', {}).get('cloud', {}).get('detail_http', [])
        ):
            area = item['area']
            cloud_event_details[area] = {'ping_time': detail_pings.get(area)}
            if is_check:
                status_code = int(item.get('status_code', 0))
                has_error = item.get('has_error', False)
                if has_error or not check_availability_is_normal(status_code):
                    e_error_nodes.append(item['area'])
                cloud_event_details[area].update(**item)
                cloud_event_details[area]['status_code'] = status_code
            else:
                cloud_event_details[area]['dns_time'] = '-'
                cloud_event_details[area]['status_code'] = '-'
                cloud_event_details[area]['resp_time'] = '-'
                cloud_event_details[area]['download_speed'] = '-'

        local = {
            'count': len(local_events),
            'errorcount': len(e_local),
            'details': local_events,
            'e_local': e_local,
        }
        cloud = {
            'errorcount': len(e_error_nodes),
            'e_cloud': e_error_nodes,
            'details': cloud_event_details,
            'date': (
                cloud_latest_event.get('updated_at', datetime.datetime.utcnow())
                + datetime.timedelta(hours=8)
            ).strftime('%Y-%m-%d %H:%M:%S'),
        }

        results = {'local': local, 'cloud': cloud, 'is_check': is_check}
        return results

    def context_securityEvent(self):
        """安全事件下分多个门类展示
        暗链 done
        挖矿 done
        seo 暂未启用
        挂马 done
        坏链 (数据不在安全事件中 在statistic中) done
        隐私信息 done
        关键词 done
        """
        names_1 = (
            'black_links',
            'cryptojacking',
            'malscan',
            # 'seo_hijack',   # seo 暂时不需要展示
            'privacy_disclosure',
            'keyword',
        )
        _events = self.data['task_events'].get('securityEvent', {}).get('events', [])
        events = [
            event for event in _events if event['event_name'] in names_1
        ]  # 暗链 挖矿 挂马
        events2 = [
            event for event in _events if event['event_name'] == 'statistics'
        ]  # 坏链

        events_map = {
            event_name: list(events)
            for event_name, events in groupby(
                sorted(events, key=lambda x: x['event_name']),
                key=lambda x: x['event_name'],
            )
        }

        # 暗链
        black_links_details = {}
        for item in events_map.get('black_links', []):
            _url = item['detail']['url']
            _links = set(item['detail']['links'])
            if _url in black_links_details:
                black_links_details[_url] = (
                    black_links_details[_url] | _links
                )  # 合并所有检测出的暗链
            else:
                black_links_details[_url] = _links
        black_links_details = [
            {'url': key, 'links': list(value)}
            for key, value in black_links_details.items()
        ]
        black_links = {
            'details': black_links_details,
            'count': len(black_links_details),
        }

        # 挖矿
        cryptojacking_details = {}
        cryptojacking_detail_results_set = set()
        for item in events_map.get('cryptojacking', []):
            _url = item['detail']['url']
            _results = []
            for _result in item['detail']['results']:
                check_key = (
                    f'{_url}{_result.get("src", "")}{_result.get("pattern", "")}'
                )
                if check_key in cryptojacking_detail_results_set:
                    continue
                cryptojacking_detail_results_set.add(check_key)
                _results.append(
                    {
                        'src': _result.get('src', ''),
                        'pattern': _result.get('pattern', ''),
                    }
                )
            if _url in cryptojacking_details:
                cryptojacking_details[_url].extend(_results)
            else:
                cryptojacking_details[_url] = _results

        cryptojacking_details = [
            {'url': key, 'results': value}
            for key, value in cryptojacking_details.items()
        ]

        cryptojacking = {
            'details': cryptojacking_details,
            'count': len(cryptojacking_details),
        }

        # 挂马
        malscan_details = {}
        malscan_detail_results_set = set()
        for item in events_map.get('malscan', []):
            _url = item['detail']['url']
            _details = []
            for detail in parse_malscan(item.get('detail', {})):
                check_key = f'{_url}{detail.get("url", "")}{detail.get("type", "")}{detail.get("desc", "")}'
                if check_key in malscan_detail_results_set:
                    continue
                malscan_detail_results_set.add(check_key)
                _details.append(
                    {
                        'url': detail.get('url', ''),
                        'type': detail.get('type', ''),
                        'desc': detail.get('desc', ''),
                    }
                )
            if _url in malscan_details:
                malscan_details[_url].extend(_details)
            else:
                malscan_details[_url] = _details

        malscan_details = [
            {'url': key, 'details': value} for key, value in malscan_details.items()
        ]

        malscan = {'details': malscan_details, 'count': len(malscan_details)}

        # 坏链
        broken_links_details = {}
        for event in events2:
            if not event['detail'].get('broken_links'):
                continue
            _url = event['target_url']
            _broken_links = set(
                item.get('url') for item in event['detail'].get('broken_links', [])
            )
            if _url in broken_links_details:
                broken_links_details[_url] = _broken_links | broken_links_details[_url]
            else:
                broken_links_details[_url] = _broken_links
        broken_links_details = [
            {'url': key, 'details': list(value)}
            for key, value in broken_links_details.items()
        ]
        broken_links = {
            'details': broken_links_details,
            'count': len(broken_links_details),
        }

        # 隐私信息
        privacy_disclosure_details = {}
        privacy_disclosure_detail_results_set = set()
        for item in events_map.get('privacy_disclosure', []):
            _url = item['detail']['url']
            _results = []
            for result in item['detail']['results']:
                check_key = f'{_url}{result.get("type_name")}{result.get("content")}'
                if check_key in privacy_disclosure_detail_results_set:
                    continue
                privacy_disclosure_detail_results_set.add(check_key)
                _results.append(
                    {
                        'type_name': result.get('type_name', ''),
                        'content': result.get('content', ''),
                    }
                )
            if _url in privacy_disclosure_details:
                privacy_disclosure_details[_url].extend(_results)
            else:
                privacy_disclosure_details[_url] = _results

        privacy_disclosure_details = [
            {'url': key, 'results': value}
            for key, value in privacy_disclosure_details.items()
        ]

        privacy_disclosure = {
            'details': privacy_disclosure_details,
            'count': len(privacy_disclosure_details),
        }

        # 关键词
        keyword_details = {}
        keyword_detail_results_set = set()
        for item in events_map.get('keyword', []):
            _url = item['detail']['url']
            _results = []
            for result in item['detail']['results']:
                check_key = f'{_url}{result.get("word")}{result.get("type")}'
                if check_key in keyword_detail_results_set:
                    continue
                keyword_detail_results_set.add(check_key)
                _results.append(
                    {'word': result.get('word', ''), 'type': result.get('type', '')}
                )
            if _url in keyword_details:
                keyword_details[_url].extend(_results)
            else:
                keyword_details[_url] = _results

        keyword_details = [
            {'url': key, 'results': value} for key, value in keyword_details.items()
        ]

        keyword = {'details': keyword_details, 'count': len(keyword_details)}

        results = {
            'black_links': black_links,
            'cryptojacking': cryptojacking,
            'malscan': malscan,
            # 'seo_hijack': seo_hijack,
            'broken_links': broken_links,
            'privacy_disclosure': privacy_disclosure,
            'keyword': keyword,
        }
        total = sum([i['count'] for i in results.values()])
        pie_info = [
            {
                'name': securityEventMap[name],
                'count': data['count'],
                'percent': format(data['count'] / total, '.2%'),
            }
            for name, data in results.items()
            if data.get('count')
        ]  # 无数据的不展示
        results['count'] = total
        results['pie'] = pie_info
        results['piePic'] = InlineImage(
            self.tpl,
            self.get_piecahrts_image(
                [i['name'] for i in pie_info],
                [i['count'] for i in pie_info],
                title='安全事件类型',
            ),
            width=Mm(132),
            height=Mm(93),
        )
        return results

    def context_vul(self):
        """漏洞信息
        漏洞威胁类型 （vul）done
        """
        events = []
        event_keys = set()
        for event in self.data['task_events'].get('vul', {}).get('events', []):
            if event['event_name'] == 'statistics':
                continue
            detail = event['detail']
            detail['found_at'] = arrow.get(detail['found_at']).to(TZ_INFO).format("YYYY-MM-DD HH:mm:ss")
            key = detail.get('key', '')
            url = detail.get('url', '')
            if (key + url) in event_keys:  # 事件去重
                continue
            event_keys.add(key + url)
            events.append(detail)

        events.sort(key=lambda x: -x.get('severity', 0))
        vul_gcate = {
            name: list(v)
            for name, v in groupby(
                sorted(
                    events,
                    key=lambda x: (x.get('vuln_type') or [{}])[0].get('zh_cn', ''),
                ),
                key=lambda x: (x.get('vuln_type') or [{}])[0].get('zh_cn', ''),
            )
        }
        vul_gname = {
            name: list(v)
            for name, v in groupby(
                sorted(events, key=lambda x: (x.get('name') or {}).get('zh_cn', '')),
                key=lambda x: (x.get('name') or {}).get('zh_cn', ''),
            )
        }
        # 需要排序
        vul_gname = {
            event['name']['zh_cn']: vul_gname[event['name']['zh_cn']]
            for event in events
            if (event.get('name') or {}).get('zh_cn')
            if vul_gname.get(event['name']['zh_cn'])
        }
        total = len(events)
        cate_pie = [
            {
                'cate': cate,
                'count': len(data),
                'severity': data[0]['severity'],
                'percent': format(len(data) / total, '.2%'),
            }
            for cate, data in vul_gcate.items()
        ]
        mostcate = max(cate_pie, key=lambda x: x['count'], default=dict())

        name_pie = []
        for name, data in vul_gname.items():
            name_pie.append(
                {
                    'name': name,
                    'count': len(data),
                    'severity': data[0]['severity'],
                    'category': data[0]['vuln_type'][0]['zh_cn'],
                    'percent': format(len(data) / total, '.2%'),
                }
            )

            for item in data:
                vuln_type = item.get('vuln_type', [])
                if vuln_type:
                    item['vuln_type'] = vuln_type[0]
                traffics = item.get('traffics', [])
                if traffics:
                    req_data, res_data = self.request_response_info(
                        request=traffics[-1].get("request", {}),
                        response=traffics[-1].get("response", {}),
                    )
                    item['request'] = req_data
                    item['response'] = res_data

        mostname = max(name_pie, key=lambda x: x['count'], default=dict())
        context = {
            'category': {
                'cate_pie': cate_pie,
                'mostcate': mostcate,
                'cate_piePic': InlineImage(
                    self.tpl,
                    self.get_piecahrts_image(
                        [i['cate'] for i in cate_pie],
                        [i['count'] for i in cate_pie],
                        title='威胁类型数量',
                    ),
                    width=Mm(132),
                    height=Mm(93),
                ),
            },
            'name': {
                'details': vul_gname,
                'mostname': mostname,
                'count': total,
                'name_pie': name_pie,
                'critical': len([i for i in events if i['severity'] == 5]),
                'high': len([i for i in events if i['severity'] == 4]),
                'middle': len([i for i in events if i['severity'] == 3]),
                'low': len([i for i in events if i['severity'] == 2]),
            },
            'severity_cate': {
                'critical': len([1 for cate in cate_pie if cate['severity'] == 5]),
                'high': len([1 for cate in cate_pie if cate['severity'] == 4]),
                'middle': len([1 for cate in cate_pie if cate['severity'] == 3]),
                'low': len([1 for cate in cate_pie if cate['severity'] == 2]),
            },
            'severity_name': {i['name']: i['severity'] for i in name_pie},
            'severity_level': {
                i['name']: self.severity_name_filter(i['severity']) for i in name_pie
            },
        }
        return context

    def context_ssl(self):
        """协议
        ssl协议漏洞
        ssl证书
        """
        ssl_events = [
            event['detail']
            for event in self.data['task_events']
            .get('target_info', {})
            .get('events', [])
            if event['event_name'] == 'site_info'
        ]
        ssl_details = [
            result
            for event in ssl_events
            for result in event.get('sslcheck', {}).get('results', [])
        ]
        ssl_details = {
            category: list(v)
            for category, v in groupby(
                sorted(ssl_details, key=lambda x: x['category']),
                key=lambda x: x['category'],
            )
        }

        protocol = {}
        certificate = {}
        return {'protocol': protocol, 'certificate': certificate}

    def context_risk_link(self):
        """风险外链"""
        events = []
        for event in self.data['task_events'].get('risk_link', {}).get('events', []):
            if not event['event_name'] == 'risk_link':  # 风险外链
                continue

            item = {
                'url': event['detail']['url'],
                'confidence': event['detail']['confidence'],
                'severity': event['detail']['confidence_level'],
                'result': {
                    k: [
                        cns.get('extra_details', {})
                        .get('zh_cn', '')
                        .replace(rf' "{event["detail"]["url"]}" ', '')
                        for cns in v
                    ]
                    for k, v in groupby(
                        event['detail']['results'], key=lambda x: int(x['risk_type'])
                    )
                },
            }
            events.append(item)

        risk_events = {}
        for event in events:
            url = event["url"]
            if url not in risk_events:
                risk_events[url] = event
            else:
                results = event["result"]
                for key, value in results.items():
                    if key not in risk_events[url]["result"]:
                        risk_events[url]["result"][key] = value
                    else:
                        risk_events[url]["result"][key].extend(value)
        for event in risk_events.values():
            for key, value in event["result"].items():
                event["result"][key] = list(set(value))

        events = list(risk_events.values())
        # 重新计算置信度和风险等级
        for event in events:
            confidence = 0
            icp_score = 0
            keyword_score = 0
            score = 0
            for risk_type, risk_details in event['result'].items():
                if not risk_details:
                    continue
                score = self.RISK_TYPE_TO_SCORE.get(int(risk_type), 0)
                if int(risk_type) == 1:  # RT_ICP
                    icp_score = 10
                elif int(risk_type) == 2:  # RT_KEYWORD
                    keyword_score = 5 * len(risk_details)
                    score += keyword_score
                elif int(risk_type) == 3:  # RT_DOMAIN
                    pass
                else:
                    continue
                confidence += score
            if not score and icp_score:
                confidence = icp_score
            if icp_score and keyword_score:
                confidence += 90
            event['confidence'] = min(confidence, 100)
            if event['confidence'] >= 90:
                event['severity'] = 4
            elif event['confidence'] >= 60:
                event['severity'] = 3
            elif event['confidence'] >= 0:
                event['severity'] = 2
        events.sort(key=lambda x: -x['severity'])

        return {
            'count': len(events),
            'high': len([1 for event in events if event['severity'] == 4]),
            'middle': len([1 for event in events if event['severity'] == 3]),
            'low': len([1 for event in events if event['severity'] == 2]),
            'details': events,
        }

    def caculate_score(self, scores):
        """得分规则
        包含 严重 75 + 严重个数
        包含 高危 50 + 严重个数
        包含 中危 25 + 严重个数
        包含 低危 0 + 严重个数
        """
        securities, high, middle, low = 0, 0, 0, 0
        for _score in scores:
            if _score == 5:
                securities += 1
            elif _score == 4:
                high += 1
            elif _score == 3:
                middle += 1
            elif _score == 2:
                low += 1
        if securities:
            score = 75 + securities
        elif high:
            score = 50 + high
            score = score if score < 75 else 75
        elif middle:
            score = 25 + middle
            score = score if score < 50 else 50
        elif low:
            score = low
            score = score if score < 25 else 25
        else:
            score = 0
        return score if score < 100 else 100  # 最高分100

    def context_summary(self):
        """综述数据 合并"""
        level_l, level_names = (25, 50, 75), ('低危', '中危', '高危', '严重')
        date = datetime.datetime.now()
        date_str = date.strftime('%Y-%m-%d')

        score_names = ('risk_link', 'vul', 'securityEvent', 'change_check')
        scores = []
        for name in score_names:
            for event in self.data['task_events'].get(name, {}).get('events', []):
                severity = event['severity']
                event_name = event.get('event_name')
                if event['event_name'] == 'risk_link':
                    severity = event['detail']['confidence_level']
                broken_link = event.get('detail', {}).get('broken_links')
                if event_name == 'statistics':
                    if broken_link:
                        severity = 2
                    else:
                        continue
                scores.append(severity)
        # scores = [
        #     event['severity']
        #     if event['event_name'] != 'risk_link'
        #     else event['detail']['confidence_level']
        #     for name in score_names
        #     for event in self.data['task_events'].get(name, {}).get('events', [])
        #     if event['event_name'] != 'statistics'
        #     if event['severity'] > 1
        # ]

        score = self.caculate_score(scores)
        level = '安全' if score == 0 else level_names[bisect_left(level_l, score)]

        if score == 0:
            is_empty = True
            for name in score_names:
                if self.data['task_events'].get(name, {}).get('events', []):
                    is_empty = False
                    break
            if is_empty:
                score = '--'
                level = '未知'

        results = {
            'target_name': self.data.get('target_name')
            or self.data.get('target_title'),
            'target_url': self.data.get('target_url'),
            'reportDate': date_str,
            'score': score,
            'level': level,
            'urlCounts': self.data['url_count'],
        }
        if self.data.get("monitored_at"):
            results["monitored_at"] = self.data.get("monitored_at")
        if self.data.get("time_period"):
            monitored_times = self.data.get("monitored_times")
            results.update(monitored_times)
            start_at = self.data.get("start_at")
            end_at = self.data.get("end_at")
            if start_at and end_at:
                start_at = arrow.get(start_at).format("YYYY/MM/DD HH:mm:ss")
                end_at = arrow.get(end_at).format("YYYY/MM/DD HH:mm:ss")
                results["time_period"] = f"{start_at} - {end_at}"
            else:
                results["time_period"] = "--"
        return results

    def context_change_check(self):
        """内容变更"""
        events = []
        TEXT = 'text'
        STRUCTURE = 'structure'
        RESOURCE = 'resource'
        change_types = [TEXT, STRUCTURE, RESOURCE]
        text_count = 0
        structure_count = 0
        resource_count = 0
        resource_urls = 0
        text_max_ratio = 0
        structure_max_ratio = 0
        resource_max_ratio = 0
        change_type_enums = []
        for event in self.data['task_events'].get('change_check', {}).get('events', []):
            if not event['event_name'] == 'change_check':  # 风险外链
                continue

            item = {
                'url': event['detail']['url'],
            }
            for change_type in change_types:
                change_result = event['detail'][f'{change_type}_result']
                if change_result.get('is_checked') and change_result.get(
                    'change_ratio'
                ):
                    change_ratio = round(change_result['change_ratio'] * 100, 2)
                    item[f'has_{change_type}_result'] = True
                    if change_type == TEXT:
                        text_count += 1
                        text_max_ratio = max(text_max_ratio, change_ratio)
                    elif change_type == STRUCTURE:
                        structure_count += 1
                        structure_max_ratio = max(structure_max_ratio, change_ratio)
                    elif change_type == RESOURCE:
                        changes = change_result["changes"]
                        resource_count += len(changes)
                        resource_urls += 1
                        resource_max_ratio = max(resource_max_ratio, change_ratio)
                        for change in changes:
                            baseline_is_existing = change['baseline']['is_existing']
                            current_is_existing = change['current']['is_existing']
                            if baseline_is_existing and current_is_existing:
                                status = "变化"
                            elif baseline_is_existing and not current_is_existing:
                                status = "删除"
                            else:
                                status = "新增"
                            change['status'] = status
                            change['baseline']['status_code'] = int(
                                change['baseline']['status_code']
                            )
                            change['current']['status_code'] = int(
                                change['current']['status_code']
                            )
                    if change_type != RESOURCE:
                        change = change_result['change']
                        change['baseline']['status_code'] = int(
                            change['baseline']['status_code']
                        )
                        change['current']['status_code'] = int(
                            change['current']['status_code']
                        )
                    change_result['change_ratio'] = format(change_ratio, '.2f')
                else:
                    item[f'has_{change_type}_result'] = False
                item[f'{change_type}_result'] = change_result
            events.append(item)
        if text_count:
            change_type_enums.append(CHANGE_CHECK_TYPE_MAP[TEXT])
        if structure_count:
            change_type_enums.append(CHANGE_CHECK_TYPE_MAP[STRUCTURE])
        if resource_count:
            change_type_enums.append(CHANGE_CHECK_TYPE_MAP[RESOURCE])
        change_type_enum = '、'.join(change_type_enums)

        return {
            'count': len(events),
            'text_count': text_count,
            'structure_count': structure_count,
            'resource_count': resource_count,
            'text_urls': text_count,
            'structure_urls': structure_count,
            'resource_urls': resource_urls,
            'text_max_ratio': format(text_max_ratio, '.2f'),
            'structure_max_ratio': format(structure_max_ratio, '.2f'),
            'resource_max_ratio': format(resource_max_ratio, '.2f'),
            'details': events,
            'change_type_enum': change_type_enum,
        }

    def parse_context(self):
        context = {field: getattr(self, f'context_{field}')() for field in self.Fields}
        context.update(self.data.get('about', {}))
        context.update({'IS_OEM': settings.IS_OEM})
        context['chapters'] = self.data.get('chapters', [])
        self.tpl.render(context=context, autoescape=True, jinja_env=self.jinja_env)
        if not self.data.get('history_tpl'):
            self.replace_source()
        update = OxmlElement('w:updateFields')
        update.set(qn('w:val'), 'true')
        self.tpl.settings.element.append(update)
        file_io = BytesIO()
        self.tpl.save(file_io)
        file_io.seek(0)
        return file_io

    def replace_source(self):
        pic_tuple = (
            item.value for item in PictureName if item.value.startswith('report_')
        )
        pic = Picture()

        for item in pic_tuple:
            self.tpl.replace_pic(item, pic.get_file(item))

    @staticmethod
    def _decode_b64str(b64str):
        try:
            result = base64.b64decode(b64str.encode()).decode("utf-8")
            return result
        except Exception:
            return ""

    def request_response_info(self, request, response):
        """从请求和响应结果中解析请求体和响应体"""
        # 请求体
        body = self._decode_b64str(request.get("b64content", ""))
        method = request.get('method', "")
        url = request.get("url", "")
        http = response.get("version", "")
        req_headers_list = [
            f"{key}:{value}" for key, value in request.get("headers", {}).items()
        ]
        req_joins = [method, url, http, body]
        req_joins.extend(req_headers_list)
        req_data = "\n".join(req_joins) if request else ""

        # 响应体
        status_code = str(response.get("status_code", ""))
        res_headers_list = [
            f"{key}:{value}" for key, value in response.get("headers", {}).items()
        ]
        res_joins = [http, status_code]
        res_joins.extend(res_headers_list)
        res_data = "\n".join(res_joins) if response else ""
        return req_data, res_data


class HistorySecurityEventContext(Context):
    Fields = ['securityEvent', 'info', 'summary']


class HistoryVulContext(Context):
    Fields = ['vul', 'info', 'summary']


class HistoryRiskLinkContext(Context):
    Fields = ['risk_link', 'info', 'summary']


class HistoryChangeCheckContext(Context):
    Fields = ['info', 'summary', 'change_check']
