import re
import math
from hashlib import md5

from app.db.models.alert_groups import AlertGroup
from app.db.models.jobs import Job
from app.db.models.tasks import TaskSpiderUrl, Task
from app.db.models.wass import ScanEvents
from app.libs.enums import WS_TASK_GROUP_TUPLE


def convert_to_camel(s):
    """下划线转驼峰"""
    contents = re.findall('_[a-z]+', s)
    for content in set(contents):
        s = s.replace(content, content[1:].title())
    return s


def get_warning_id(task_type, job_id, url, risk_title):
    """计算资产漏洞唯一id"""
    s = f"{task_type}|{str(job_id)}|{url}|{risk_title}"
    return md5(s.encode()).hexdigest()


def update_job_last_ipv6_status(task):
    if not task:
        return

    is_support_ipv6 = task.result.addition.get('is_support_ipv6', "unknown")
    ipv6_score = task.result.addition.get('score', 0)

    is_support_ipv6_str = "unknown"
    if isinstance(is_support_ipv6, bool):
        is_support_ipv6_str = "nonsupport"
        if is_support_ipv6:
            is_support_ipv6_str = "support"

    Job.objects(pk=task.jobId).update_one(isSupportIpv6=is_support_ipv6_str, ipv6Score=ipv6_score)


def update_task_alert_settings(job, task):
    task.alertSettings = job.alertSettings
    if job.alertSettings.alertGroupIds:
        alert_group = AlertGroup.objects.filter(pk=job.alertSettings.alertGroupIds[0]).first()
        if alert_group:
            task.alertSettings.enable = alert_group.alertSettings.enable
            task.alertSettings.notification = alert_group.alertSettings.notification
            task.alertSettings.smsAllowedTime = alert_group.alertSettings.smsAllowedTime
            task.alertSettings.emailAllowedTime = alert_group.alertSettings.emailAllowedTime
            task.alertSettings.smsDailyLimit = alert_group.alertSettings.smsDailyLimit
            task.alertSettings.emailDailyLimit = alert_group.alertSettings.emailDailyLimit
        else:
            task.alertSettings.alertGroupIds = []
            job.alertSettings.alertGroupIds = []
    return job, task


def save_spider_urls(task_id):
    task = Task.objects.filter(id=task_id).first()
    if (not task) or (task.taskType not in WS_TASK_GROUP_TUPLE):
        return
    job = Job.objects.filter(id=task.jobId).first()
    if not job:
        return
    # 如果该任务已经写入了，删除之前的数据， 重新在写，比如执行repeat_task任务时
    if exists_objs := TaskSpiderUrl.objects.filter(taskSessionId=task.taskSessionId):
        exists_objs.delete()
    db_data = ScanEvents._get_db()["scan_spider_urls"].find({"task_session_id": task.taskSessionId}, {"url": 1})
    urls = list({p.get("url") for p in db_data if p.get("url")})
    if not urls:
        return
    count = len(urls)
    pages = math.ceil(count / 3000)
    for i in range(0, pages):
        _urls = urls[i * 3000: (i + 1) * 3000]
        TaskSpiderUrl(
            uid=task.uid,
            taskObjId=task.id,
            taskId=task.taskId,
            taskSessionId=task.taskSessionId,
            taskType=task.taskType,
            jobId=task.jobId,
            targetUrl=job.targetUrl,
            spiderUrls=_urls,
            endTime=task.endTime
        ).save()
    return count
