import os
import io
import shutil
import datetime
import logging
import arrow
import zipfile
from typing import Dict, Union, List, Tuple, Optional

from models.target import Targets, TargetGroups
from models.celery_task import CeleryTasks
from models.task import ScanTaskSessions, ScanPages, ScanEvents
from apps.report.ContextTpl import (
    Context,
    HistoryRiskLinkContext,
    HistoryVulContext,
    HistoryChangeCheckContext,
    HistorySecurityEventContext,
)
from tasks import celery_app
from utilities.utils import iso_format_to_local
from utilities.redis import captcha_redis as redis_conn
from wass.settings import EXPORT_DIR, IS_OEM
from utilities.enums import ReportTaskType, ReportExportStatus, TaskType
from utilities.constant import REPORT_TPL_CHAPTERS

logger = logging.getLogger()

HISTORY_EVENT_CONTEXT_MAP = {
    "risk_link": HistoryRiskLinkContext,
    "vul": HistoryVulContext,
    "change_check": HistoryChangeCheckContext,
    "securityEvent": HistorySecurityEventContext,
}

TASK_TYPE_CN_NAME_MAP = {
    "vul": "漏洞",
    "securityEvent": "安全事件",
    "risk_link": "风险外链",
    "change_check": "篡改",
}


@celery_app.task(acks_late=True)
def report_export(export_task_id, uid, about):
    export_task = CeleryTasks.objects(export_task_id=export_task_id).first()
    if not export_task:
        logger.warning("export_task_id: {} not found".format(export_task_id))
        return
    logger.info("export_task_id: {} start".format(export_task_id))
    res = do_export(uid, about, export_task)
    return res


def set_exhkey(key, val, ex=300):
    redis_conn.hmset(key, val)
    redis_conn.expire(key, ex)


def _get_availablity(target_id, local_limit=5, cloud_limit=1):
    """
    可用性监测数据单独处理
    """
    logger.info("开始可用性监测数据统计, target_id: {}".format(target_id))
    local_agg = [
        {
            "$match": {
                "target_id": target_id,
                "addition.task_type": "availability",
            }
        },
        {"$sort": {"updated_at": -1}},
        {
            "$lookup": {
                "from": "scan_events",
                "localField": "task_session_id",
                "foreignField": "task_session_id",
                "as": "event",
            }
        },
        {"$unwind": "$event"},
        {"$limit": local_limit},
    ]
    cloud_agg = [
        {
            "$match": {
                "target_id": target_id,
                "addition.task_type": "availability",
                "job_config_map.site_info.modules": "cloud",
            }
        },
        {"$sort": {"updated_at": -1}},
        {
            "$lookup": {
                "from": "scan_events",
                "localField": "task_session_id",
                "foreignField": "task_session_id",
                "as": "event",
            }
        },
        {"$unwind": "$event"},
        {"$limit": cloud_limit},
    ]
    local = [
        {"http_type": "local", **item.get("event", {})}
        for item in ScanTaskSessions.objects.aggregate(local_agg)
    ]
    cloud = [
        {"http_type": "cloud", **item.get("event", {})}
        for item in ScanTaskSessions.objects.aggregate(cloud_agg)
    ]
    events = [*local, *cloud]
    created_at = min(events, key=lambda x: x["created_at"], default={}).get(
        "created_at", datetime.datetime.utcnow()
    )
    updated_at = max(events, key=lambda x: x["updated_at"], default={}).get(
        "updated_at", datetime.datetime.utcnow()
    )
    created_at = (created_at + datetime.timedelta(hours=8)).strftime(
        "%Y-%m-%d %H:%M:%S"
    )
    updated_at = (updated_at + datetime.timedelta(hours=8)).strftime(
        "%Y-%m-%d %H:%M:%S"
    )
    logger.info("可用性监测数据统计完成, target_id: {}".format(target_id))
    return {
        "events": events,
        "created_at": created_at,
        "updated_at": updated_at,
    }


def get_filepath(temp_dir, word_name):
    i = 1
    filepath = os.path.join(temp_dir, word_name)
    while os.path.isfile(filepath):
        filepath = os.path.join(temp_dir, f"{word_name[:-5]}({i}).docx")
        i += 1

    return filepath


def get_time_period_events(target_id, start_at, end_at):
    logger.info("开始统计时间范围内安全事件, target: {}, start: {}, end: {}".format(target_id, start_at, end_at))
    task_sessions_query = {
        "target_id": target_id,
        "updated_at": {"$gte": start_at, "$lte": end_at},
        "addition.task_type": {"$ne": "availability"},
    }
    task_sessions = ScanTaskSessions.objects.only(
        "task_session_id", "addition.task_type"
    ).find(task_sessions_query)
    task_session_id_type_map = {
        task_session.task_session_id: task_session.addition.get(
            "task_type", ""
        )
        for task_session in task_sessions
    }
    logger.info("统计出{}次监测任务".format(len(task_session_id_type_map)))
    scan_events = ScanEvents.objects.find(
        {"task_session_id": {"$in": list(task_session_id_type_map.keys())}}
    )
    events = {}
    for scan_event in scan_events:
        event = scan_event.to_dict()
        task_type = task_session_id_type_map[scan_event.task_session_id]
        if task_type not in events:
            events[task_type] = {"events": []}
        events[task_type]["events"].append(event)
    events["task_session_ids"] = list(task_session_id_type_map.keys())
    return events


def get_normal_events(target_id):
    logger.info("统计最近一次监测任务的事件, target_id: {}".format(target_id))
    agg = [
        {"$match": {"target_id": target_id}},
        {"$sort": {"updated_at": -1}},
        {
            "$group": {
                "_id": "$addition.task_type",
                "task_session_id": {"$first": "$task_session_id"},
                "created_at": {"$first": "$created_at"},
                "updated_at": {"$first": "$updated_at"},
            }
        },
        {"$match": {"_id": {"$ne": "availability"}}},
        {
            "$lookup": {
                "from": "scan_events",
                "localField": "task_session_id",
                "foreignField": "task_session_id",
                "as": "task_events",
            }
        },
    ]
    results = ScanTaskSessions.objects.aggregate(agg)
    events = {
        item["_id"]: {
            "events": item.get("task_events", []),
            "task_session_id": item["task_session_id"],
            "created_at": arrow.get(item["created_at"])
            .shift(hours=+8)
            .format("YYYY-MM-DD HH:mm:ss"),
            "updated_at": arrow.get(item["updated_at"])
            .shift(hours=+8)
            .format("YYYY-MM-DD HH:mm:ss"),
        }
        for item in results
    }
    logger.info("统计最近一次监测任务的事件完成, target_id: {}, 类别为{}, 每类分别{}条".format(target_id, list(events.keys()), [len(events[key]["events"]) for key in events.keys()]))
    return events


def get_event_by_session(task_session_id, task_type):
    logger.info("开始统计单个安全事件, task_session_id: {}".format(task_session_id))
    scan_events = ScanEvents.objects.find({"task_session_id": task_session_id})
    events = []
    for scan_event in scan_events:
        event = scan_event.to_dict()
        if event["event_name"] == "statistics":
            if not event["detail"].get("broken_links"):
                continue
        events.append(event)
    res = {
        task_type: {
            "events": events,
            "task_session_id": task_session_id,
        }
    }
    logger.info("统计单个安全事件 {} 完成, task_session_id: {}, 一共{}条".format(task_type, task_session_id, len(events)))
    return res


def get_events(
    target_id,
    time_period=False,
    start_at=None,
    end_at=None,
    task_session_id=None,
    task_type=None,
) -> Dict[str, Union[Dict, List, str]]:
    logger.info("开始统计安全事件, target_id: {}".format(target_id))
    if task_session_id:
        events = get_event_by_session(task_session_id, task_type)
        return events
    if not time_period:
        events = get_normal_events(target_id)
    else:
        events = get_time_period_events(target_id, start_at, end_at)
    return events


def scan_urls_count(task_session_ids):
    """
    扫描数量 由取vul count值改为取各个 task 最大值
    """
    logger.info("开始统计扫描url数, task_session_ids: {}".format(task_session_ids))
    url_count = 0
    for task_session_id in task_session_ids:
        page_count = ScanPages.objects(task_session_id=task_session_id).count()
        url_count = max(page_count, url_count)
    if url_count:
        url_count = f"{url_count}条"
    else:
        url_count = "暂无数据"
    logger.info("统计扫描url数完成, task_session_ids: {}, 最高扫描url{}条".format(task_session_ids, url_count))
    return url_count


def get_monitored_times(target_id, start_at, end_at):
    logger.info("开始统计时间范围内监测次数, target_id: {}，时间范围为{}至{}".format(target_id, start_at, end_at))
    agg = [
        {
            "$match": {
                "target_id": target_id,
                "updated_at": {
                    "$gte": start_at,
                    "$lte": end_at,
                },
            }
        },
        {"$group": {"_id": "$addition.task_type", "count": {"$sum": 1}}},
    ]
    monitored_times = {
        res["_id"]: res["count"]
        for res in ScanTaskSessions.objects.aggregate(agg)
        if res
    }
    monitored_times["total_monitoring_times"] = sum(monitored_times.values())
    logger.info("统计时间范围内监测次数完成, target_id: {}, 一共{}次".format(target_id, monitored_times["total_monitoring_times"]))
    return monitored_times


def _statistics(
    uid,
    target_id,
    about,
    time_period=False,
    start_at=None,
    end_at=None,
    export_task=None,
    chapters=[],
    tpl="files/tpl.docx",
):
    if export_task:
        export_task.result.status = ReportExportStatus.pending.value
        export_task.save()
    target = Targets.objects.find_one({"target_id": target_id})
    if not target:
        raise Exception("目标不存在")
    events = get_events(target_id, time_period, start_at, end_at)
    if export_task:
        export_task.result.progress = 30
        export_task.save()
        logger.info("{}: progress 30%".format(target_id))
    data = target.to_mongo().to_dict()
    data["chapters"] = chapters
    events["availability"] = _get_availablity(target_id)
    if export_task:
        export_task.result.progress = 50
        export_task.save()
        logger.info("{}: progress 50%".format(target_id))
    data["task_events"] = events
    if time_period:
        data["time_period"] = True
        data["start_at"] = start_at
        data["end_at"] = end_at
        task_session_ids = events["task_session_ids"]
    else:
        task_session_ids = [
            i["task_session_id"]
            for i in events.values()
            if i.get("task_session_id")
        ]
    data["url_count"] = scan_urls_count(task_session_ids)
    if time_period:
        data["monitored_times"] = get_monitored_times(
            target_id, start_at, end_at
        )
    if export_task:
        export_task.result.progress = 70
        export_task.save()
        logger.info("{}: progress 70%".format(target_id))
    data["about"] = {k: v for k, v in about.items() if v}
    logger.info("开始生成报告: uid: {}, target_id: {}, tpl: {}".format(uid, target_id, tpl))
    file_io = Context(data=data, tpl=tpl).parse_context()
    logger.info("报告生成完成: uid: {}, target_id: {}, tpl: {}".format(uid, target_id, tpl))
    if export_task:
        export_task.result.progress = 90
        export_task.save()
        logger.info("{}: progress 90%".format(target_id))
    name = (
        data.get("target_name")
        or data.get("target_title")
        or data.get("target_url")
    )
    name = f"[{name}]"
    logger.info("target_id: {}, 报告名称: {}".format(target_id, name))
    return file_io, name


def last_task(uid, target_id, about, export_task, chapters=[], single=False):
    tpl = "files/tpl_oem.docx" if IS_OEM else "files/tpl.docx"
    if single:
        return _statistics(uid, target_id, about, export_task=export_task, chapters=chapters, tpl=tpl)
    return _statistics(uid, target_id, about, chapters=chapters, tpl=tpl)


def time_period_task(uid, target_id, about, export_task, chapters=[], single=False):
    tpl = "files/tpl_time_period_oem.docx" if IS_OEM else "files/tpl_time_period.docx"
    if single:
        return _statistics(
            uid,
            target_id,
            about,
            time_period=True,
            start_at=export_task.params.start_at,
            end_at=export_task.params.end_at,
            export_task=export_task,
            chapters=chapters,
            tpl=tpl,
        )
    return _statistics(
        uid,
        target_id,
        about,
        time_period=True,
        start_at=export_task.params.start_at,
        end_at=export_task.params.end_at,
        chapters=chapters,
        tpl=tpl,
    )


def history_task(uid, target_id, about, export_task, chapters=[], single=True):
    task_session_id = export_task.params.targets[0].get("task_session_id", "")
    task_session = (
        ScanTaskSessions.objects(task_session_id=task_session_id)
        .only("target_id", "created_at", "updated_at", "addition.task_type")
        .first()
    )
    if not task_session:
        export_task.result.status = ReportExportStatus.failed.value
        export_task.save()
        raise Exception("task_session_id: {} 不存在".format(task_session_id))
    target_id = task_session.target_id
    task_type = task_session.addition.get("task_type", "")
    target = Targets.objects(target_id=target_id).first()
    if not target:
        export_task.result.status = ReportExportStatus.failed.value
        export_task.save()
        raise Exception("目标不存在: {}".format(target_id))
    last_target_info_task_session_id = (
        ScanTaskSessions.objects(
            target_id=target_id,
            addition__task_type="target_info",
        )
        .order_by("-updated_at")
        .first()
        .task_session_id
    )
    events = get_events(
        target_id, task_session_id=task_session_id, task_type=task_type
    )
    target_info_events = get_events(
        target_id,
        task_session_id=last_target_info_task_session_id,
        task_type="target_info",
    )

    if export_task:
        export_task.result.progress = 30
        export_task.save()
    data = target.to_mongo().to_dict()
    data["task_events"] = {**events, **target_info_events}
    data["monitored_at"] = (
        arrow.get(task_session.updated_at)
        .to("Asia/Shanghai")
        .format("YYYY-MM-DD HH:mm:ss")
    )
    data["url_count"] = scan_urls_count([task_session_id])
    if export_task:
        export_task.result.progress = 60
        export_task.save()
    data["about"] = {k: v for k, v in about.items() if v}
    name = (
        data.get("target_name")
        or data.get("target_title")
        or data.get("target_url")
    )
    if not any([event.get("events", []) for event in list(events.values())]):
        name = f"[{name}]"
        tpl = f"files/tpl_history_{task_type}_safe.docx"
    else:
        name = f"[{name}]"
        tpl = f"files/tpl_history_{task_type}.docx"
    data["history_tpl"] = True
    file_io = HISTORY_EVENT_CONTEXT_MAP.get(task_type, Context)(
        data=data, tpl=tpl
    ).parse_context()
    if export_task:
        export_task.result.progress = 90
        export_task.save()
    return file_io, name


EXPORT_FUNC_MAP = {
    "last_task": last_task,
    "time_period_task": time_period_task,
    "history_task": history_task,
}


def single_export(uid, about, export_task, chapters=[]):
    export_func = EXPORT_FUNC_MAP.get(export_task.task_type)
    assert export_func
    target_id = export_task.params.targets[0].get("target_id", "")
    fileio, name = export_func(uid, target_id, about, export_task, chapters=chapters, single=True)
    return fileio, name


def batch_export(uid, about, export_task, chapters=[]):
    file_dir_path = ""
    try:
        r_key = f"export_batch:{uid}"
        product_nickname_cn = about.get("product_nickname_cn", "")
        targets = export_task.params.targets
        target = targets[0]
        name = (
            (target.get("target_name", "") or target.get("target_url", ""))
            .replace("://", "__")
            .replace("/", "_")
        )
        name = f"[{name}等{len(targets)}个业务系统]"
        file_dir_path, temp_dir, dir_filename = create_tmp_dir(
            uid, product_nickname_cn, r_key, dir_name=name
        )
        logger.info(f"创建临时文件夹 {file_dir_path}")
        export_func = EXPORT_FUNC_MAP.get(export_task.task_type)
        assert export_func
        logger.info(f"开始导出报告 {export_task.task_type}")
        target_ids = [
            target.get("target_id")
            for target in export_task.params.targets
            if target.get("target_id")
        ]
        try:
            files_list = []
            steps = len(target_ids) + 1  # x 个下载任务 + 1个打包压缩
            for idx, target_id in enumerate(target_ids, 1):
                target_info = Targets.objects(target_id=target_id).only("target_group_id").first()
                if not target_info:
                    continue
                target_group = TargetGroups.objects(target_group_id=target_info.target_group_id).first()
                if not target_group:
                    continue
                target_group_name = target_group.target_group_name
                file_io, _name = export_func(
                    uid, target_id, about, export_task, chapters=chapters
                )
                word_name = f"{_name}（{target_group_name}） {product_nickname_cn} 安全监测报告.docx"
                word_name = word_name.replace("://", "__").replace(
                    "/", "_"
                )  # 包含 / 导致文件名导出路径异常
                # filepath = get_filepath(file_dir_path, word_name)
                # with open(filepath, "wb") as f:
                #     f.write(file_io.getvalue())
                files_list.append((file_io, word_name))
                progress = int((idx / steps) * 100)
                r_info = {
                    "value": progress,
                    "title": f"生成第{idx}个word中.",
                    "status": 3,
                    "message": f"生成第{idx}个word中.",
                    "exception": "",
                }
                logger.info(f"生成第{idx}个word, word_name: {word_name}, 进度: {progress}")
                if progress == 100:
                    export_task.result.progress = 99
                else:
                    export_task.result.progress = progress
                export_task.save()
                set_exhkey(r_key, r_info)
        except Exception as e:
            r_info = {
                "status": 0,
                "message": "word生成异常..",
                "exception": str(e),
            }
            set_exhkey(r_key, r_info)
            export_task.result.status = ReportExportStatus.failed.value
            export_task.save()
            logger.exception("word生成异常: {!s:.200}".format(e))
            raise Exception("word生成异常， 导出失败")
        fileio = pack_files(file_contents=files_list, r_key=r_key, file_dir_path=file_dir_path)
        if not fileio:
            export_task.result.status = ReportExportStatus.failed.value
            export_task.save()
            raise Exception("word打包异常， 导出失败")
        return fileio, name
    finally:
        if file_dir_path:
            logger.info(f"删除临时文件夹 {file_dir_path}")
            shutil.rmtree(file_dir_path, ignore_errors=True)


def create_tmp_dir(uid, product_nickname_cn, r_key, dir_name=None):
    try:
        temp_dir = os.path.join(EXPORT_DIR, uid)  # 批次目录
        shutil.rmtree(temp_dir, ignore_errors=True)
        os.mkdir(temp_dir)
        if dir_name:
            dir_filename = f"{dir_name} {product_nickname_cn} 安全报告"
        else:
            datestr = iso_format_to_local(arrow.utcnow().isoformat(), 1)
            dir_filename = f"{datestr} {product_nickname_cn} 安全报告"
        file_dir_path = os.path.join(temp_dir, dir_filename)
        os.makedirs(file_dir_path)
    except Exception as e:
        r_info = {"status": 0, "message": "目录创建失败..", "exception": str(e)}
        set_exhkey(r_key, r_info)
        logger.exception("目录创建失败: {!s:.200}".format(e))
        raise Exception("目录异常， 导出失败")
    return file_dir_path, temp_dir, dir_filename


def pack_files(
    file_contents: List[Tuple[io.BytesIO, str]],
    r_key,
    file_dir_path,
    append_file_type="docx"
) -> Optional[io.BytesIO]:
    """
    :param file_contents: [(file_io1, 'filename1'), (file_io2, 'filename2')]
    :param r_key: redis key
    :param file_dir_path: 导出文件夹
    :param append_file_type: 文件类型
    :return:
    """
    try:
        # 创建一个内存中的文件流
        zip_data = io.BytesIO()
        # 创建一个 ZipFile 对象，将文件流作为参数传入
        zip_file = zipfile.ZipFile(zip_data, 'w', zipfile.ZIP_DEFLATED)
        for content, filename in file_contents:
            # 将字节流写入 ZIP 文件中，设置自定义的文件名
            if not filename.endswith(f".{append_file_type}"):
                filename = f"{filename}.{append_file_type}"
            zip_file.writestr(filename, content.getvalue())

        # 关闭 ZIP 文件
        zip_file.close()
        r_info = {
            "value": 100,
            "title": f"打包完成",
            "status": 1,
            "message": f"打包完成.",
            "exception": "",
        }
        set_exhkey(r_key, r_info)
        logger.info("批量报告打包完成: {}".format(file_dir_path))
        return zip_data
    except Exception as e:
        r_info = {"status": 0, "message": "word打包异常..", "exception": str(e)}
        set_exhkey(r_key, r_info)
        logger.exception("word打包异常: {!s:.200}".format(e))
        return None


def do_export(uid, about, export_task):
    logger.info("开始导出报告, uid: {}, 导出类型: {}, 任务id: {}".format(uid, export_task.task_type, export_task.export_task_id))
    export_task.result.status = ReportExportStatus.pending.value
    export_task.save()
    chapters = export_task.params.chapters
    chapter_names = [value for key, value in REPORT_TPL_CHAPTERS.items() if key in chapters]
    try:
        if len(export_task.params.targets) > 1:
            fileio, file_name = batch_export(uid, about, export_task, chapters=chapter_names)
            file_type = "zip"
        else:
            fileio, file_name = single_export(uid, about, export_task, chapters=chapter_names)
            file_type = "docx"
        export_task.result.file.put(fileio.getvalue())
        export_task.result.filename = file_name
        export_task.result.file_type = file_type
        export_task.result.status = ReportExportStatus.success.value
        export_task.result.created_at = arrow.utcnow().datetime
        export_task.result.progress = 100
        res = export_task.save()
        logger.info("导出任务完成, uid: {}, 导出类型: {}, 任务id: {}".format(uid, export_task.task_type, export_task.export_task_id))
        return res.export_task_id
    except Exception as e:
        logger.exception("导出任务异常: uid: {}, 任务id: {}, exception: {!s:.200}".format(uid, export_task.export_task_id, e))
        export_task.result.status = ReportExportStatus.failed.value
        export_task.save()
        return ""
