import datetime
import json
import os
import io
import arrow
import bson
import openpyxl
from bson import ObjectId
from flask import send_file, g, request, jsonify
from mongoengine import Q

from app.libs.score import Score
from app.libs.display import TASK_TARGET_STATUS_MAP
from app.libs.redprint import RedPrint
from app.libs.report import ExportMonitorDocxHandler, OneTaskExportMonitorDocxHandler
from app.reports.export import export_port
from app.reports.data import WEB_SEC_TASK_TYPE
from app.libs.public_internal_funcs import get_user_name_by_id, Query, QueryType, get_asset_by_job_id, get_job
from app.config.settings import BASE_DIR, DATETIME_FMT, CELERY_TASK_NEW_DATA_CREATED_AT
from app.validators.report import ReportDownloadForm, OneTaskReportDownloadForm, ReportDownloadTaskForm, ExportListForm, ReportDeleteForm
from app.db.models.celery_task import CeleryTask
from app.db.models.tasks import Task
from app.db.models.asset_tasks import AssetTasks
from app.db.models.ipv6_check_urls import IPv6CheckUrls
from app.celery.export_report import celery_app, report_download_task
from app.errors import ServerError, ParameterError


api = RedPrint('report')

DOWNLOAD_TYPE_MAP = {
    "0": "合并导出",
    "1": "批量导出"
}

STATUS_MAP = {
    "normal": "等待中",
    "pending": "进行中",
    "error": "失败",
    "success": "成功",
}


@api.route('/download', methods=['GET'])
# @cache.cached(make_cache_key=make_cache_key_uid, timeout=60*10, query_string=True)
def download():
    form = ReportDownloadForm()
    task_type_mapping = form.get_task_type_mapping()
    task_session_ids = []
    times = []
    task_start_times = []
    for _, tasks in task_type_mapping.items():
        for task in tasks:
            if start_time := task.get("startTime"):
                times.append(start_time)
            if end_time := task.get('endTime'):
                times.append(end_time)
            task_session_ids.append(task.get("taskSessionId"))
            task_start_times.append(task.get("startTime"))
    start = arrow.get(min(times, default=datetime.datetime.utcnow())).shift(hours=8).datetime
    end = arrow.get(max(times, default=datetime.datetime.utcnow())).shift(hours=8).datetime
    base_template = os.path.join(BASE_DIR, "files/ScanVMAX 安全监测报告模版.docx")
    if form.start_date.data:
        start = datetime.datetime.strptime(form.start_date.data, DATETIME_FMT)
        end = datetime.datetime.strptime(form.end_date.data, DATETIME_FMT)

    score_types = ['vul', 'ssl', 'securityEvent', 'content']
    all_warnings = []
    for score_type in score_types:
        type_data = task_type_mapping.get(score_type)
        for data in type_data:
            this_warnings = data.get("result", {}).get("warnings", [])
            all_warnings.extend(this_warnings)

    total_score = Score.score_info(event_map={}, warnings=all_warnings)
    base_context = {
        "host": form.job.target.host,
        "startTime": start,
        "endTime": end,
        "note": form.job.note,
        "target": form.job.targetUrl,
        "reportDate": datetime.datetime.now().strftime("%Y/%m/%d"),
        "score": total_score,
        "http_node_data": form.get_http_node_data(),
        "ping_node_data": form.get_ping_node_data(),
        "task_session_ids": task_session_ids,
        "task_start_times": task_start_times,
        "year": str(datetime.datetime.now().year)
    }

    source_ip_list = []
    for task_list in task_type_mapping.values():
        for task in task_list:
            if _source_ip := task.get('sourceIp'):
                source_ip_list.append(_source_ip)

    base_context['sourceIps'] = list(set(source_ip_list))

    export_handler = ExportMonitorDocxHandler(template=base_template, base_context=base_context,
                                              **task_type_mapping)
    file_io = export_handler.export()
    return send_file(file_io, as_attachment=True, attachment_filename="test.docx")


@api.route('/one_task/download', methods=['GET'])
def once_task_report_download():
    template_map = {
        "asset": "ScanV单次报告_资产变动.docx",
        "ipv6": "ScanV单次报告_IPv6合规检测.docx"
    }
    form = OneTaskReportDownloadForm().validate_for_api()
    task = form.task
    if task.get("taskType") not in WEB_SEC_TASK_TYPE:
        task_template_file = template_map.get(task["taskType"])
        base_template = os.path.join(BASE_DIR, "files", task_template_file)
        all_warnings = task.get("result", {}).get("warnings", [])
        target_status = task.get("result", {}).get("targetStatus", {}).get("status", "good")
        total_score = Score.score_info(event_map={}, warnings=all_warnings)
        if target_status != "good" and not all_warnings:
            total_score.update({"level": "未知"})
        base_context = {
            "host": form.job.target.host,
            "startTime": arrow.get(task.get("startTime", datetime.datetime.utcnow())).shift(hours=8).datetime,
            "endTime": arrow.get(task.get("endTime", datetime.datetime.utcnow())).shift(hours=8).datetime,
            "note": form.job.note,
            "target": form.job.targetUrl,
            "reportDate": datetime.datetime.now().strftime("%Y/%m/%d"),
            "score": total_score,
            "sourceIps": [task.get("sourceIp")] if task.get("sourceIp") else [],
            "targetStatusDisplay": TASK_TARGET_STATUS_MAP.get(target_status, ""),
            "task_session_ids": [task.get("taskSessionId"), ],
            "task_start_times": [task.get("startTime"), ],
            "year": str(datetime.datetime.now().year)
        }
        export_handler = OneTaskExportMonitorDocxHandler(template=base_template, base_context=base_context,
                                                         **form.task_type_mapping)
        file_io = export_handler.export()
    else:
        file_io = export_port(task_obj_id=task.get("_id"))
    return send_file(file_io, as_attachment=True, attachment_filename="test.docx")


@api.route('/download_task', methods=['POST'])
def download_task():
    # 1、校验参数
    form = ReportDownloadTaskForm().validate_for_api()
    uid = form.uid

    operator = None
    if uid and isinstance(uid, ObjectId):
        operator = get_user_name_by_id(str(uid))

    start, end = None, None
    if form.start_date.data:
        start = form.start_date.data
        end = form.end_date.data

    download_type = form.download_type.data
    report_type = form.report_type.data
    # 下载文件类型
    file_type = form.file_type.data
    job_ids = form.job_ids.object_data

    job_id_list = job_ids.split(",")

    _asset_infos = {}
    asset_list = get_asset_by_job_id(job_id_list)
    for _asset_info in asset_list:
        _asset_infos[str(_asset_info.id)] = {
            "target_url": _asset_info.targetUrl,
            "source_ip": _asset_info.sourceIp,
            "name": _asset_info.note
        }

    job_info_list = []
    for job_id in job_id_list:
        job_info_list.append(
            {
                "job_id": job_id,
                "target_url": _asset_infos.get(job_id, {}).get("target_url", ""),
                "source_ip": _asset_infos.get(job_id, {}).get("source_ip", ""),
                "name": _asset_infos.get(job_id, {}).get("name", "")
            }
        )

    # 3、保存任务数据
    params = {
        "job_ids": job_info_list,
        "download_type": download_type,
        "report_type": report_type,
        "start_date": start,
        "end_date": end,
    }
    result = {
        "status": "normal",
        "process": 0,
    }
    celery_task_params = {
        "celery_task_id": "",
        "task_type": "report_download",
        "operator": operator or g.operator,
        "params": params,
        "result": result,
    }
    if request.args.get("is_file_test_and_no_celery_and"):
        file_path = export_port(
            **{
                "download_type": download_type,
                "report_type": report_type,
                "start_time": start,
                "end_time": end,
                "job_ids": job_id_list
            })
        return jsonify({'code': 200, 'msg': 'ok', 'result': file_path})
    celery_task_obj = CeleryTask().from_dict(celery_task_params)
    download_task_id = celery_task_obj.save()
    download_task_id = str(download_task_id.pk)

    # 2、调用celery
    celery_task_id = report_download_task.delay(task_id=download_task_id, file_type=file_type)
    CeleryTask.objects(pk=download_task_id).update(celery_task_id=str(celery_task_id.id))

    return jsonify({'code': 200, 'msg': 'ok', 'result': {"download_task_id": download_task_id}})


@api.route('/download_progress', methods=['GET'])
def download_progress():
    request_args = request.args
    download_task_id = request_args.get('download_task_id', '')
    celery_task = CeleryTask.objects.filter(pk=download_task_id).first()
    if celery_task.result.status == "error":
        message = "文件下载失败，请重新下载"
        raise ServerError(msg=message)
    return jsonify({'code': 200, 'msg': 'ok', 'result': {"progress": celery_task.result.process}})


@api.route('/download_completed', methods=['GET'])
def download_completed():
    request_args = request.args
    download_task_id = request_args.get('download_task_id', '')

    # 1、获取file_id
    celery_task = CeleryTask.objects.filter(pk=download_task_id).first()
    file = celery_task.result.file
    filename = celery_task.result.filename
    if not file or not filename:
        message = "下载的文件不存在，请重新下载"
        if celery_task.result.status in ('normal', 'pending'):
            message = "下载的文件还没有生成好，请稍后再试"
        raise ServerError(msg=message)

    # 2、获取文件
    file.seek(0)
    _attachment_filename_list = filename.split('.')
    attachment_filename = "{}.{}".format("_".join(_attachment_filename_list[:-1]), _attachment_filename_list[-1])
    return send_file(file, as_attachment=True, attachment_filename=attachment_filename)


@api.route('/download_revoke', methods=['POST'])
def download_revoke():
    request_args = json.loads(request.data)
    download_task_id = request_args.get('download_task_id')
    # 1、获取celery_task_id
    celery_task = CeleryTask.objects.filter(pk=download_task_id).first()
    if celery_task:
        if celery_task.celery_task_id and celery_task.result.status not in ("success", "error", "revoke"):
            celery_app.control.revoke(celery_task.celery_task_id, terminate=True)
            CeleryTask.objects(pk=download_task_id).update(result__status="revoke")
    else:
        message = "下载任务不存在"
        raise ServerError(msg=message)
    return jsonify({'code': 200, 'msg': 'ok'})


@api.route('/export_list', methods=['GET'])
def export_list():
    form = ExportListForm().validate_for_api()
    page = form.page.data or 1
    limit = form.limit.data or 10
    asset = form.asset.data
    status = form.status.data
    source_ip = form.source_ip.data
    username = form.username.data

    uid = g.audit_uid
    if g.role.name == '管理员':
        uid = None

    new_data_created_at = arrow.get(CELERY_TASK_NEW_DATA_CREATED_AT).datetime

    queries = {
        "create_time": {
            "$gt": new_data_created_at
        }
    }
    stats_creat_time_query = Q(create_time__gt=new_data_created_at)

    stats_asset_or_source_ip_query = None
    stats_asset_source_ip_query = None
    if asset and source_ip:
        _job_ids = []
        _query = {
            "targetUrl": {
                "$regex": asset
            },
            "sourceIp": {
                "$regex": source_ip
            }
        }
        if uid:
            _query["uid"] = uid
        jobs_res = get_job(_query, fields=["id", "targetUrl", "sourceIp", "note"])
        if jobs_res.results:
            for job in jobs_res.results:
                _job_ids.append(str(job.id))
        else:
            _job_ids = [""]
        if _job_ids:
            queries['params.job_ids.job_id'] = {
                '$in': _job_ids
            }
            stats_asset_source_ip_query = Q(params__job_ids__job_id__in=_job_ids)
    else:
        asset_job_ids = []
        if asset:
            asset_query = {
                "targetUrl": {
                    "$regex": asset
                }
            }
            if uid:
                asset_query["uid"] = uid
            jobs_res = get_job(asset_query, fields=["id", "targetUrl", "sourceIp", "note"])
            if jobs_res.results:
                for job in jobs_res.results:
                    asset_job_ids.append(str(job.id))
            else:
                asset_job_ids = [""]

        source_ip_job_ids = []
        if source_ip:
            source_ip_query = {
                "sourceIp": {
                    "$regex": source_ip
                }
            }
            if uid:
                source_ip_query["uid"] = uid
            jobs_res = get_job(source_ip_query, fields=["id", "targetUrl", "sourceIp", "note"])
            if jobs_res.results:
                for job in jobs_res.results:
                    source_ip_job_ids.append(str(job.id))
            else:
                source_ip_job_ids = [""]

        if any([asset_job_ids, source_ip_job_ids]):
            queries['params.job_ids.job_id'] = {
                '$in': list(set(asset_job_ids + source_ip_job_ids))
            }
            stats_asset_or_source_ip_query = Q(
                params__job_ids__job_id__in=list(set(asset_job_ids + source_ip_job_ids))
            )

    stats_status_query = None
    if status:
        queries['result.status'] = status
        stats_status_query = Q(result__status=status)

    stats_operator_query = None
    if uid:
        username = get_user_name_by_id(uid)
        queries["operator"] = username
        stats_operator_query = Q(operator=username)

    if username:
        queries["operator"] = username
        stats_operator_query = Q(operator=username)

    query = Query(
        query_type=QueryType.FIND,
        queries=queries,
        pagination=(page, limit),
        order_by="-create_time",
        col=CeleryTask,
    )
    res = query.execute()

    stats_query = None
    for _q in [
        stats_creat_time_query,
        stats_asset_or_source_ip_query,
        stats_asset_source_ip_query,
        stats_status_query,
        stats_operator_query,
    ]:
        if not _q:
            continue
        if not stats_query:
            stats_query = _q
        else:
            stats_query = stats_query & _q

    total_count = CeleryTask.objects(stats_query).count()

    if not res.count:
        return jsonify({'code': 200, 'result': [], 'msg': 'ok', 'page': page, 'count': 0, 'total_count': total_count})

    results = []
    for r in res.results:
        _job_ids = r.params.job_ids
        _assets = []
        for _job_id in _job_ids:
            if not _job_id.get("target_url"):
                continue
            if _job_id["source_ip"]:
                _asset = (_job_id["name"], f"{_job_id['target_url']}（{_job_id['source_ip']}）")
            else:
                _asset = (_job_id["name"], f"{_job_id['target_url']}")
            _assets.append(_asset)

        if not _assets:
            _assets = [("", "")]

        if len(_assets) > 1:
            asset_str = f"{_assets[0][1]}等{len(_assets)}个"
        else:
            asset_str = _assets[0][1]

        full_status = f"{STATUS_MAP[r.result.status]} {r.result.process}%"
        is_downloadable = True if r.result.process == 100 else False
        if r.result.create_at:
            if arrow.get(r.result.create_at).shift(days=7) < arrow.utcnow():
                is_downloadable = False
        else:
            is_downloadable = False
        filename = r.result.filename
        result = {
            'download_task_id': str(r.id),
            'asset': asset_str,
            'all_assets': [" ".join(_a) for _a in _assets],
            'download_type': DOWNLOAD_TYPE_MAP[r.params.download_type],
            'task_create_time': arrow.get(r.create_time).isoformat(),
            'report_created_at': arrow.get(r.result.create_at).isoformat() if r.result.create_at else "",
            'report_create_time': {
                'start_date': str(r.params.start_date or ""),
                'end_date': str(r.params.end_date or "")
            },
            'status_text': full_status,
            'status': r.result.status,
            'process': f"{r.result.process}%",
            'is_downloadable': is_downloadable,
            'username': r.operator,
            'file_type': "" if not filename else filename.split(".")[-1]
        }
        results.append(result)
    if uid:
        response = jsonify(
            {
                'code': 200,
                'result': results,
                'msg': 'ok',
                'page': page,
                'count': len(results),
                'total_count': total_count
            }
        )
    else:
        response = jsonify(
            {
                "code": 200,
                "msg": 'ok',
                "results": {
                    "count": len(results),
                    "total_count": total_count,
                    "result": results,
                    "page": page,
                },
            }
        )
    return response


@api.route('/delete', methods=['POST'])
def delete():
    form = ReportDeleteForm().validate_for_api()
    download_task_ids = form.download_task_ids.data
    # 1、获取celery_task_id
    celery_tasks = CeleryTask.objects.filter(pk__in=download_task_ids)
    if celery_tasks:
        for celery_task in celery_tasks:
            if celery_task.celery_task_id:
                celery_app.control.revoke(celery_task.celery_task_id, terminate=True)
            CeleryTask.objects(pk=celery_task.id).delete()
    else:
        message = "下载任务不存在"
        raise ServerError(msg=message)
    return jsonify({'code': 200, 'msg': 'ok'})


@api.route('/download_ipv6', methods=['GET'])
def download_ipv6():
    request_args = request.args
    task_session_id = request_args.get('task_session_id', '')
    query = {"taskSessionId": task_session_id}
    uid = g.audit_uid
    if g.role.name == '管理员':
        uid = None
    if uid:
        query["uid"] = uid
    task = Task.objects.filter(**query).first()
    if not task:
        return jsonify({'code': 200, 'msg': '任务不存在'})
    task_name = task.name
    urls = IPv6CheckUrls.objects.filter(task_session_id=task_session_id, depth__in=[2, 3])
    depth_2_urls = []
    depth_3_urls = []
    for _ in urls:
        data = [
            _.url,
            '可访问' if 200 <= _.status_code < 300 else '不可访问',
            str(_.status_code),
            '外链' if _.is_foreign_link else '内链',
            _.url_type,
            _.referer
        ]
        if _.depth == 2:
            depth_2_urls.append(data)
        elif _.depth == 3:
            depth_3_urls.append(data)
    headers = ["URL地址", "检测结果", "状态码", "类型", "链接类型", "上级页面"]
    worker_excel = openpyxl.Workbook()
    worker_excel.active = 0
    worker_excel.active.title = "二级链接"
    for index, header in enumerate(headers):
        worker_excel.active.cell(row=1, column=index + 1, value=header)
    for index, d in enumerate(depth_2_urls):
        for col, value in enumerate(d):
            worker_excel.active.cell(row=index + 2, column=col + 1, value=value)
    worker_excel.create_sheet("三级链接")
    worker_excel.active = 1
    for index, header in enumerate(headers):
        worker_excel.active.cell(row=1, column=index + 1, value=header)
    for index, d in enumerate(depth_3_urls):
        for col, value in enumerate(d):
            worker_excel.active.cell(row=index + 2, column=col + 1, value=value)
    worker_excel.active = 0
    file_io = io.BytesIO()
    worker_excel.save(file_io)
    file_io.seek(0)
    return send_file(file_io, as_attachment=True, attachment_filename=f"【{task_name}】二三级链接明细.xls")


@api.route('/asset_task_report', methods=['GET'])
def download_asset_task_report():
    asset_task_id = request.args.get('asset_task_id', '')
    if not asset_task_id:
        raise ParameterError(msg="任务参数错误")
    try:
        asset_task_id = bson.ObjectId(asset_task_id)
    except bson.errors.BSONError:
        raise ParameterError(msg="任务参数错误")

    query = {"_id": asset_task_id}
    uid = g.user.id
    if g.role.name == '管理员':
        uid = None
    if uid:
        query["uid"] = uid
    if not (AssetTasks.objects.find(query).first()):
        raise ParameterError(msg="任务不存在")
    file_io = export_port(asset_task_id=str(asset_task_id))
    return send_file(file_io, as_attachment=True, attachment_filename="report.docx")
