import base64
import copy
import datetime
import json
import logging
import os
import time
from collections import defaultdict
from collections import OrderedDict, defaultdict
from typing import Dict, List, Union
from urllib import request

from celery import group
from flask import current_app
from openstack import exceptions

import json

from tools import db
from app.models.helpersModel import ApsChedulerJob
import traceback

from app import create_app
from tools.apscheduler_tools import scheduler
from app.common.libs.authentication import create_token
from app.common.range_manager import RangeManager
from app.models import (
    CtfQuestion,
    CtfRedBlue,
    CtfRedBlueFlag,
    User,
    ctfModel,
    rangeModel,
    switchModel,
    taasModel,
    userModel,
    Chapter,
    Course,
    VmType,
    Image,
    RangeLinkModule,
    RangeVm, PresetRoleChoices, CtfRedBlueGroup, GroupType, LinkCtfRedBlueScoreInfo, CtfRedBlueScore,
)
from configs.settings import flask_config
from tools import celery_app, db, redis_conn, ops
from tools.celery_ext import ContextTask
from utils.crypto import Crypt
from utils.utils import init_openstack, wait_for_container_status, time_out
from zunclient.v1.containers import Container
from zunclient.common.apiclient import exceptions as zun_api_exceptions
from app.common.zstack_manager import ZStackManager
from apscheduler.jobstores.base import JobLookupError


def get_server_request_info():
    """获取请求自身服务器必要参数"""
    user = User.query.filter(User.username == "admin").first()
    if not user:
        raise ValueError("管理员用户不存在")
    admin_token = create_token(user.id)
    run_port = int(os.environ.get("RUN_PORT", 5000))
    return "127.0.0.1", run_port, admin_token


# my_crypt = Crypt()

# # celery后端客户端
# client = celery_app.channel_client
# # celery task检查器
# inspect = celery_app.inspect

logger = logging.getLogger(__name__)


def init_opserver_conn():
    info = db.session.query(userModel.User).filter(userModel.User.id == 1).first()
    op_conn = init_openstack(info.project_id, current_app.config)
    return op_conn


def wait_for_server_status(op_conn, op_vm_uuid, status="ACTIVE"):
    # 再次获取虚拟机
    server = op_conn.compute.find_server(op_vm_uuid)
    # 等候状态
    server_new = op_conn.compute.wait_for_server(server, status)
    # server = self.conn.compute.wait_for_server({'id': op_vm_uuid}, timeout=1800)
    return server_new


@celery_app.task(name="create_vm_port")
def create_vm_port(vm_name, range_uuid, network_id, op_vm_uuid, vm_uuid):
    op_conn = init_opserver_conn()
    server_info = wait_for_server_status(op_conn, op_vm_uuid, "ACTIVE")

    address = server_info.addresses
    for net_name, net_info_list in address.items():  # noqa
        for addr_info in net_info_list:
            ditc = {"mac_address": addr_info["OS-EXT-IPS-MAC:mac_addr"]}
            ports = op_conn.network.ports(**ditc)
            try:
                port = next(ports)
                if port["network_id"] != network_id:
                    continue
                obj = taasModel.VmPortName(
                    vm_name=vm_name,
                    port_id=port.id,
                    range_uuid=range_uuid,
                    vm_uuid=vm_uuid,
                )
                taasModel.db.session.add(obj)
                taasModel.db.session.commit()
            except StopIteration:
                current_app.logger.exception("%s not has port" % vm_name)


def get_vpn_client_username_pswd():
    import random
    import string

    # 字符串
    data = string.ascii_letters + string.digits
    username, password = "", ""
    # 随机长度k (1<= k <=32)
    for _ in range(2):
        random_length = random.randint(4, 8)
        if username:
            password = "".join(random.sample(data, random_length))
        else:
            username = "".join(random.sample(data, random_length))

    return username, password


def create_vpnserver(op_vm_name, vmInfo: dict, cidr_list, range_uuid, user_id, op_conn):
    usrname, password = get_vpn_client_username_pswd()
    str1 = (
            '#!/bin/sh\necho "%s %s" > /etc/openvpn/psw-file\nservice openvpn@server restart\n'
            % (usrname, password)
    )
    str2 = str1.encode()
    str3 = base64.b64encode(str2)
    str4 = str3.decode()
    networks = []

    if vmInfo["is_external"]:
        ex_port = op_conn.create_port(current_app.config["OPENSTACK"]["external_id"])
        networks.append(
            {
                "uuid": current_app.config["OPENSTACK"]["external_id"],
                "port": ex_port["id"],
            }
        )
        vmport = switchModel.ActivePorts(
            port_id=ex_port.id,
            range_uuid=range_uuid,
            ip=ex_port["fixed_ips"][0]["ip_address"],
            device_owner="ex-port",
            device_id=vmInfo["id"],
            user_id=user_id,
        )
        taasModel.db.session.add(vmport)
        taasModel.db.session.flush()

    cidr_list = cidr_list.split(",") if cidr_list else []
    for cidr in cidr_list:
        switch = switchModel.Switchs.query.filter_by(
            range_uuid=range_uuid, cidr=cidr
        ).first()
        port = op_conn.create_port(
            switch.network_id, fixed_ips=[{"subnet_id": switch.subnet_id}]
        )
        networks.append({"uuid": switch.network_id, "port": port.id})
        vmport = switchModel.ActivePorts(
            port_id=port.id,
            range_uuid=range_uuid,
            ip=port["fixed_ips"][0]["ip_address"],
            device_owner="vpn-server",
            device_id=vmInfo["id"],
            subnet_id=switch.subnet_id,
            user_id=user_id,
        )
        taasModel.db.session.add(vmport)
        taasModel.db.session.flush()

    dict1 = {
        "name": op_vm_name,
        "imageRef": vmInfo["image"],
        "flavorRef": vmInfo["flavor"],
        "networks": networks,
        "OS-EXT-SRV-ATTR:user_data": str4,
        "config_drive": True,
    }
    info = op_conn.compute.create_server(**dict1)
    if vmInfo["is_external"] and info:
        rangeModel.RangeVm.query.filter_by(id=vmInfo["id"], user_id=user_id).update(
            {
                "external_ip": ex_port.fixed_ips[0]["ip_address"],
                "op_vm_uuid": info.id,
            }
        )
    return info, usrname, password


def get_cidrs(json_str, vmInfo):
    cidr_list = []
    topology_json = json.loads(json_str)
    lineList = topology_json.get("lineList")
    line_list_new = []
    vm_yz_id = ""
    switch_dict = {}
    switch_yz_id_list = []

    for line in lineList:
        line_list_new.append([line["from"], line["to"]])

    if "nodeList" in topology_json:
        for node in topology_json["nodeList"]:
            if node.get("type") == vmInfo["type"]:
                if node.get("vm_uuid") == vmInfo["id"]:
                    vm_yz_id = node["id"]
            elif node.get("type") == "switch":
                switch_dict[node["id"]] = node["cidr"]

    for line in line_list_new:
        if vm_yz_id in line:
            index = line.index(vm_yz_id)
            line.pop(index)
            switch_yz_id_list.append(line[0])

    for switch_yz_id in switch_yz_id_list:
        if switch_yz_id in switch_dict:
            cidr_list.append(switch_dict[switch_yz_id])

    return cidr_list


class PowerOnActiveVmTask(ContextTask):
    name = "startVm"
    soft_time_limit = 60 * 10
    time_limit = 60 * 10

    def mark_error_for_this(self, range_uuid):
        ...

    @classmethod
    def save_vm_info(cls, range_uuid, user_id, op_vm_uuid, vmInfo):
        rangeModel.ActiveRangeVm.query.filter_by(id=vmInfo["id"]).update(
            {"op_vm_uuid": op_vm_uuid}
        )

        switchModel.ActivePorts.query.filter_by(
            range_uuid=range_uuid,
            user_id=user_id,
            subnet_id=vmInfo["subnet_id"],
            ip=vmInfo['v4_fixed_ip']
            # port_id=port.id
        ).update(
            {"device_owner": vmInfo["type"], "device_id": vmInfo["new_vm_uuid"]}
        )
        db.session.commit()

    @classmethod
    def power_on_vm_to_vpn(
            cls, json_str, op_vm_name, range_uuid, user_id, op_conn, vmInfo
    ):
        cidr_list = get_cidrs(json_str, vmInfo)
        info, username, password = create_vpnserver(
            op_vm_name, vmInfo, cidr_list, range_uuid, user_id, op_conn
        )

        obj = switchModel.ActiveVpnserver(
            range_uuid=range_uuid,
            vpnserver_id=info.id,
            client_username=username,
            client_passwd=password,
            user_id=user_id,
        )
        db.session.add(obj)
        db.session.commit()
        return info

    @classmethod
    def exec_net(cls, nics, op_conn, vmInfo, range_uuid, user_id):
        switch = switchModel.ActiveSwitchs.query.filter_by(
            subnet_id=vmInfo["subnet_id"], user_id=user_id
        ).first()
        if not switch:
            logger.error(f"subnet_id 为 {vmInfo['subnet_id']} 的 active switch 不存在")
            return
        # 增 port 是否存在校验
        ports = switchModel.ActivePorts.query.filter_by(
            subnet_id=switch.subnet_id,
            user_id=user_id,
            range_uuid=range_uuid,
            network_id=switch.network_id,
            ip=vmInfo["v4_fixed_ip"],
        ).first()
        if vmInfo["is_external"] and not vmInfo["external_ip"]:
            ex_port = op_conn.create_port(
                current_app.config["OPENSTACK"]["external_id"]
            )
            ex_port_id = ex_port["id"]
            nics.append(
                {
                    "net-id": current_app.config["OPENSTACK"]["external_id"],
                    "port-id": ex_port_id,
                }
            )
        if not ports:
            if vmInfo["v4_fixed_ip"]:
                port = op_conn.create_port(
                    switch.network_id,
                    fixed_ips=[
                        {
                            "ip_address": vmInfo["v4_fixed_ip"],
                            "subnet_id": switch.subnet_id,
                        }
                    ],
                )

            else:
                # FIXME 出现过找不到switch 为 None的情况，暂时未复现
                port = op_conn.create_port(switch.network_id)

            ports = switchModel.ActivePorts(
                port_id=port.id,
                range_uuid=range_uuid,
                ip=port.fixed_ips[0]["ip_address"],
                network_id=switch.network_id,
                subnet_id=switch.subnet_id,
                user_id=user_id,
            )

            db.session.add(ports)
            db.session.commit()

        vmInfo['v4_fixed_ip'] = ports.ip
        nics.extend([{"net-id": switch.network_id, "port-id": ports.port_id}])

    @classmethod
    def exec_range_gateway_info(cls, range_uuid, nics, user_id, op_conn):
        """处理靶场网关信息"""

        for val in switchModel.ActiveSwitchs.query.filter_by(range_uuid=range_uuid, user_id=user_id):
            # 获取交换机目标子网网关
            subnet = op_conn.get_subnet(val.subnet_id)
            subnet_gateway = subnet.gateway_ip

            # 获取该子网下所有端口，判断是否为该网关是否已经创建端口
            ports = op_conn.network.ports(
                network_id=val.network_id,
                subnet_id=val.subnet_id
            )
            # 检查网关ip是否已创建对应端口，并检查是否被占用
            for port in ports:
                if port.fixed_ips[0]['ip_address'] == subnet_gateway:
                    if port.device_id:
                        raise ValueError(f"交换机{val.network_id} 网关端口被占用: {port}")
                    op_conn.update_port(port.id, port_security_enabled=False)
                    nics.append({"net-id": port.network_id, "port-id": port.id})
                    gateway_port = port
                    break
            # 未创建则进行创建
            else:
                gateway_port = op_conn.create_port(
                    val.network_id,
                    fixed_ips=[
                        {"ip_address": subnet.gateway_ip, "subnet_id": val.subnet_id}
                    ],
                    port_security_enabled=False
                )
                nics.append({"net-id": gateway_port.network_id, "port-id": gateway_port.id})

            port_obj = switchModel.ActivePorts(
                port_id=gateway_port.id,
                range_uuid=range_uuid,
                ip=subnet.gateway_ip,
                network_id=val.network_id,
                subnet_id=val.subnet_id,
                device_id=val.bind_firewall_uuid,
                device_owner="firewall_gw",
                user_id=user_id,
            )
            db.session.add(port_obj)
            db.session.flush()

    def power_on_all(self, vmInfo, user_name, json_str, range_uuid, user_id, op_conn):

        nics = []
        op_vm_name = f"{vmInfo['type']}-{time.time()}-{user_name}"

        if vmInfo["type"] == VmType.container:
            self.exec_net(nics, op_conn, vmInfo, range_uuid, user_id)
            image = Image.query.filter(Image.image_id == vmInfo["image"]).first()
            container_tags = image.tags
            logger.info(f"创建容器{op_vm_name}：{nics}")

            environment = container_tags.get("environment", None)
            labels = container_tags.get("labels", None)
            container = ops.zun_operate.create_container(
                image=image.image_full_name, name=op_vm_name,
                cpu=container_tags.get("cpu", 0.2), memory=container_tags.get("memory", 500),
                tty=True, interactive=True,
                environment=environment or {
                    "foo": "bar"
                },
                labels=labels or {
                    "app": "hello"
                },
                image_driver="glance",
                availability_zone="nova",
                restart_policy={
                    "Name": "always",
                    "MaximumRetryCount": 0
                },
                nets=[{"port": item["port-id"]} for item in nics],
            )

            self.save_vm_info(range_uuid, user_id, container.uuid, vmInfo)

            for _ in range(5):
                server_info = ops.zun_operate.get_container_details(container.uuid)
                if server_info.status == "Creating":
                    wait_for_container_status(
                        ops.zun_operate.zun_client, container.uuid, status="Created", throw_exception=False
                    )
                elif server_info.status == "Created":
                    # 该逻辑必须被触发
                    try:
                        ops.zun_operate.start_container(server_info.uuid)
                    except Exception as _:  # noqa
                        continue
                    else:
                        break
                elif server_info.status == "Running":
                    break
                elif server_info.status == "Error":
                    raise ValueError(
                        f"{vmInfo['name']}开机失败, status: Error, status_reason: {server_info.status_reason}"
                    )
            else:
                # 5分钟之内尚不能触发break，90%可能为服务器端出现问题，最后一次尝试
                server_info = ops.zun_operate.get_container_details(container.uuid)
                if server_info.status != "Running":
                    try:
                        ops.zun_operate.start_container(server_info.uuid)
                    except Exception as e:
                        logger.error(
                            f"{vmInfo['name']}开机失败, status: {server_info.status}, "
                            f"task_state: {server_info.task_state}, status_reason: {server_info.status_reason}"
                        )
                        raise e

        elif vmInfo["type"] == "vpn":
            info = self.power_on_vm_to_vpn(
                json_str, op_vm_name, range_uuid, user_id, op_conn, vmInfo
            )
            self.save_vm_info(range_uuid, user_id, info.id, vmInfo)
            # 将虚机id写入api表
            server_info = wait_for_server_status(op_conn, info["id"], "ACTIVE")
        else:
            if vmInfo["type"] == VmType.firewall:
                self.exec_range_gateway_info(range_uuid, nics, user_id, op_conn)
            else:
                self.exec_net(nics, op_conn, vmInfo, range_uuid, user_id)

            try:
                logger.info(f"创建虚拟机{op_vm_name}: {nics}")
                info = op_conn.create_server(
                    op_vm_name,
                    auto_ip=False,
                    nics=nics,
                    image=vmInfo["image"],
                    flavor=vmInfo["flavor"],
                )
            except Exception as e:
                logger.error(
                    f"虚拟机{vmInfo['name']}创建失败, "
                    f"op_vm_name: {op_vm_name}"
                )
                raise e

            self.save_vm_info(range_uuid, user_id, info.id, vmInfo)
            # 将虚机id写入api表
            server_info = wait_for_server_status(op_conn, info["id"], "ACTIVE")

        if server_info:
            rangeModel.ActiveRangeVm.query.filter_by(id=vmInfo["id"]).update(
                {"status": "ACTIVE"}
            )

        db.session.commit()


@celery_app.task(bind=True, base=PowerOnActiveVmTask, retry=False)
def startVm(self, user_name, vmInfo: dict, user_id, range_uuid, json_str):
    op_conn = init_opserver_conn()
    if not vmInfo["op_vm_uuid"]:
        self.power_on_all(vmInfo, user_name, json_str, range_uuid, user_id, op_conn)
    else:
        if vmInfo["type"] == VmType.container:
            server = ops.zun_operate.get_container_details(vmInfo["op_vm_uuid"])
            if server.status != "Running":
                if server.status == "Error":
                    rangeModel.ActiveRangeVm.query.filter_by(id=vmInfo["id"]).update(
                        {"status": "Error"}
                    )
                elif server.status == "Created":
                    ops.zun_operate.start_container(server.uuid)
                    wait_for_container_status(
                        ops.zun_operate.zun_client, server.uuid,
                        status="Running", timeout=120, throw_exception=True
                    )
                    rangeModel.ActiveRangeVm.query.filter_by(id=vmInfo["id"]).update(
                        {"status": "ACTIVE"}
                    )

                db.session.commit()
        else:
            server = op_conn.compute.find_server(vmInfo["op_vm_uuid"])
            if server.status != "ACTIVE":
                op_conn.compute.start_server(vmInfo["op_vm_uuid"])
                op_conn.compute.wait_for_server(server, "ACTIVE")
                rangeModel.ActiveRangeVm.query.filter_by(id=vmInfo["id"]).update(
                    {"status": "ACTIVE"}
                )
                db.session.commit()


class PowerOnAllVmTask(ContextTask):
    name = "startAllVm"
    soft_time_limit = 60 * 10
    time_limit = 60 * 10

    @time_out(30)
    def wait_for_container_status(self, container_ident: Union[str, Container], status="Created"):
        # 获取容器
        container_id = container_ident if isinstance(container_ident, str) else container_ident.uuid
        while True:
            container: Container = ops.zun_operate.get_container_details(container_id)
            if container.status == status:
                return container

    def wait_for_server_status(self, conn, op_vm_uuid, status="ACTIVE"):
        # 再次获取虚拟机
        server = conn.compute.find_server(op_vm_uuid)
        # 等候状态
        conn.compute.wait_for_server(server, status)
        return

    @staticmethod
    def get_vpn_client_username_pswd():
        import random
        import string

        # 字符串
        data = string.ascii_letters + string.digits
        username, password = "", ""
        # 随机长度k (1<= k <=32)
        for _ in range(2):
            random_length = random.randint(4, 8)
            if username:
                password = "".join(random.sample(data, random_length))
            else:
                username = "".join(random.sample(data, random_length))

        return username, password

    def create_vpnserver(self, op_vm_name, vmInfo, cidr_list, range_uuid, conn, user_id):

        usrname, password = self.get_vpn_client_username_pswd()
        str1 = (
                '#!/bin/sh\necho "%s %s" > /etc/openvpn/psw-file\nservice openvpn@server restart\n'
                % (usrname, password)
        )
        str2 = str1.encode()
        str3 = base64.b64encode(str2)
        str4 = str3.decode()
        networks = []

        if vmInfo["is_external"]:
            ex_port = conn.create_port(
                current_app.config["OPENSTACK"]["external_id"]
            )
            networks.append(
                {
                    "uuid": current_app.config["OPENSTACK"]["external_id"],
                    "port": ex_port["id"],
                }
            )
            vmport = switchModel.Ports(
                port_id=ex_port.id,
                range_uuid=range_uuid,
                ip=ex_port["fixed_ips"][0]["ip_address"],
                device_owner="ex-port",
                device_id=vmInfo["vm_uuid"],
            )
            taasModel.db.session.add(vmport)
            taasModel.db.session.flush()

        if cidr_list:
            cidr_list = cidr_list.split(",")
        else:
            cidr_list = []
        for cidr in cidr_list:
            switch = switchModel.Switchs.query.filter_by(
                range_uuid=range_uuid, cidr=cidr
            ).first()
            port = conn.create_port(
                switch.network_id, fixed_ips=[{"subnet_id": switch.subnet_id}]
            )
            networks.append({"uuid": switch.network_id, "port": port.id})
            vmport = switchModel.Ports(
                port_id=port.id,
                range_uuid=range_uuid,
                ip=port["fixed_ips"][0]["ip_address"],
                device_owner="vpn-server",
                device_id=vmInfo["vm_uuid"],
                subnet_id=switch.subnet_id,
            )
            taasModel.db.session.add(vmport)
            taasModel.db.session.flush()

        dict1 = {
            "name": op_vm_name,
            "imageRef": vmInfo["image"],
            "flavorRef": vmInfo["flavor"],
            "networks": networks,
            "OS-EXT-SRV-ATTR:user_data": str4,
            "config_drive": True,
        }
        info = conn.compute.create_server(**dict1)
        if vmInfo["is_external"] and info:
            rangeModel.RangeVm.query.filter_by(
                id=vmInfo["id"], user_id=user_id
            ).update(
                {
                    "external_ip": ex_port.fixed_ips[0]["ip_address"],
                    "op_vm_uuid": info.id,
                }
            )

        return info, usrname, password

    def power_on_all_container(self, args, vmInfo, conn, user_id):
        """开启容器"""
        if vmInfo["op_vm_uuid"]:
            try:
                container: Container = ops.zun_operate.get_container_details(vmInfo["op_vm_uuid"])
                if container.status == "Running":
                    logger.info("容器{}开机正常".format(vmInfo["op_vm_name"]))
                    return
                elif container.status == "Creating":
                    self.wait_for_container_status(vmInfo["op_vm_uuid"], status="Created")
                    ops.zun_operate.start_container(vmInfo["op_vm_uuid"])
                    logger.info("容器{}开机中请稍后".format(vmInfo["op_vm_name"]))
                    return
                elif container.status == "Restarting":
                    logger.info("容器{}开机中请稍后".format(vmInfo["op_vm_name"]))
                    return
                else:
                    ops.zun_operate.start_container(vmInfo["op_vm_uuid"])
                    self.wait_for_container_status(vmInfo["op_vm_uuid"], status="Running")
                    logger.info("容器{}开机正常".format(vmInfo["op_vm_name"]))
                    return
            except zun_api_exceptions.Conflict as e:
                logger.error("容器{}开机失败".format(vmInfo["op_vm_name"]))
                raise e

        vmInfo["op_vm_name"] = vmInfo.get('op_vm_name', '') or f'{vmInfo["type"]}-docker-{time.time()}'
        device_owner = VmType.container.value
        nics = []
        row_ports = switchModel.Ports.query.filter_by(subnet_id=vmInfo["subnet_id"], range_uuid=args["range_uuid"],
                                                      device_id=None).all()
        for i in row_ports:
            logger.info(f"docker-row_ports:{i.port_id}")
        if row_ports:
            logger.info(f"docker-row_ports:{row_ports[0].ip}")
            switchModel.Ports.query.filter_by(port_id=row_ports[0].port_id).update(
                {"device_id": vmInfo["vm_uuid"]}
            )
            db.session.commit()
            if row_ports[0].port_id and row_ports[0].port_id.strip():
                sub_port = switchModel.Ports.query.filter_by(
                    port_id=row_ports[0].port_id
                ).first()
                nics.append({"net-id": sub_port.network_id, "port-id": row_ports[0].port_id})
            elif vmInfo["v4_fixed_ip"]:
                switch = switchModel.Switchs.query.filter_by(
                    subnet_id=vmInfo["subnet_id"]
                ).first()
                port = conn.create_port(
                    switch.network_id,
                    fixed_ips=[
                        {
                            "ip_address": vmInfo["v4_fixed_ip"],
                            "subnet_id": switch.subnet_id,
                        }
                    ],
                )
                ports = switchModel.Ports(
                    port_id=port.id,
                    range_uuid=args["range_uuid"],
                    ip=port.fixed_ips[0]["ip_address"],
                    network_id=switch.network_id,
                    subnet_id=switch.subnet_id,
                )
                db.session.add(ports)
                db.session.flush()
                nics.append({"net-id": switch.network_id, "port-id": port.id})

        if not nics:
            logger.info("该非vpn虚拟机没有给定端口参数")
        logger.info(f"999990000111nics:{nics}")
        if vmInfo["is_external"]:
            ex_port = conn.create_port(
                current_app.config["OPENSTACK"]["external_id"]
            )
            ex_port_id = ex_port["id"]
            nics.append(
                {
                    "net-id": current_app.config["OPENSTACK"]["external_id"],
                    "port-id": ex_port_id,
                }
            )

        # 创建容器
        image = Image.query.filter(Image.image_id == vmInfo["image"]).first()
        container_tags = image.tags
        # FIXME: 非管理员账户似乎有问题，无法创建及指定一些东西
        environment = container_tags.get("environment", None)
        labels = container_tags.get("labels", None)
        container = ops.zun_operate.create_container(
            image=image.image_full_name, name=vmInfo.get("op_vm_name", ""),
            cpu=container_tags.get("cpu", 0.2), memory=container_tags.get("memory", 500),
            tty=True, interactive=True,
            environment=environment or {
                "foo": "bar"
            },
            labels=labels or {
                "app": "hello"
            },
            image_driver="glance",
            availability_zone="nova",
            restart_policy={
                "Name": "always",
                "MaximumRetryCount": 0
            },
            nets=[{"port": item["port-id"]} for item in nics],
        )

        if container:
            rangeModel.RangeVm.query.filter_by(
                id=vmInfo["id"], user_id=user_id
            ).update({"op_vm_uuid": container.uuid, "op_vm_name": vmInfo["op_vm_name"]})

            for item in nics:
                switchModel.Ports.query.filter_by(port_id=item["port-id"]).update(
                    {"device_owner": device_owner}
                )
            if vmInfo["is_external"]:
                rangeModel.RangeVm.query.filter_by(
                    id=vmInfo["id"], user_id=self.user.id
                ).update({"external_ip": ex_port.fixed_ips[0]["ip_address"]})
            # db.session.add(vmInfo)
            db.session.commit()

            try:
                self.wait_for_container_status(container.uuid, status="Created")
                ops.zun_operate.start_container(container.uuid)
            except Exception:
                logger.info("开机中")
            else:
                logger.info("开机正常")
        else:
            logger.info("开机失败")

    def power_on_all_vm(self, args, vmInfo, conn, user_id):
        if vmInfo['op_vm_uuid']:
            server = conn.compute.find_server(vmInfo['op_vm_uuid'])
            if server.status != "ACTIVE":
                conn.compute.start_server(vmInfo["op_vm_uuid"])
                conn.compute.wait_for_server(server, "ACTIVE")
            logger.info(f"服务{vmInfo['op_vm_uuid']}开机正常")
            return

        # 虚拟机名称
        vmInfo['op_vm_name'] = vmInfo.get('op_vm_name', '') or f"{vmInfo['type']}-{time.time()}"

        if vmInfo['type'] == "vpn":
            switch = switchModel.Switchs.query.filter_by(
                subnet_id=vmInfo["subnet_id"]
            ).first()
            info, username, password = self.create_vpnserver(
                vmInfo['op_vm_name'], vmInfo, switch.cidr, args["range_uuid"], conn, user_id
            )

            obj = switchModel.Vpnserver(
                range_uuid=args["range_uuid"],
                vpnserver_id=info.id,
                client_username=username,
                client_passwd=password,
            )
            db.session.add(obj)
            db.session.commit()

        else:
            nics = []

            device_owner = 'vm'
            row_ports = switchModel.Ports.query.filter_by(subnet_id=vmInfo["subnet_id"], range_uuid=args["range_uuid"],
                                                          device_id=None).all()

            if vmInfo["v4_fixed_ip"]:
                switch = switchModel.Switchs.query.filter_by(
                    subnet_id=vmInfo["subnet_id"]
                ).first()
                port = conn.create_port(
                    switch.network_id,
                    fixed_ips=[
                        {
                            "ip_address": vmInfo["v4_fixed_ip"],
                            "subnet_id": switch.subnet_id,
                        }
                    ],
                )
                ports = switchModel.Ports(
                    port_id=port.id,
                    range_uuid=args["range_uuid"],
                    ip=port.fixed_ips[0]["ip_address"],
                    network_id=switch.network_id,
                    subnet_id=switch.subnet_id,
                )
                switchModel.db.session.add(ports)
                switchModel.db.session.flush()
                nics.append({"net-id": switch.network_id, "port-id": port.id})

            elif row_ports:
                logger.info(f"vm-row_ports:{row_ports[0].ip}")
                switchModel.Ports.query.filter_by(port_id=row_ports[0].port_id).update(
                    {"device_id": vmInfo["vm_uuid"]}
                )
                db.session.commit()
                if row_ports[0].port_id and row_ports[0].port_id.strip():
                    sub_port = switchModel.Ports.query.filter_by(
                        port_id=row_ports[0].port_id
                    ).first()
                    nics.append({"net-id": sub_port.network_id, "port-id": row_ports[0].port_id})
            elif vmInfo["type"] == 'firewall':  # 防火墙虚机开机逻辑
                device_owner = 'firewall_gw'
                ports = switchModel.Ports.query.filter_by(
                    device_id=vmInfo["vm_uuid"]
                ).all()
                # 防火墙接口数量小于2时 限制开机
                if len(ports) <= 1:
                    logger.info("防火墙绑定网卡数量为1时无法开机")
                for port in ports:
                    nics.append({"net-id": port.network_id, "port-id": port.port_id})

            elif vmInfo["is_external"]:
                ex_port = conn.create_port(
                    current_app.config["OPENSTACK"]["external_id"]
                )
                ex_port_id = ex_port["id"]
                nics.append(
                    {
                        "net-id": current_app.config["OPENSTACK"]["external_id"],
                        "port-id": ex_port_id,
                    }
                )
            if not nics:
                logger.info("该非vpn虚拟机没有给定端口参数")
            logger.info(f"999990000nics:{nics}")
            server = conn.create_server(
                vmInfo.get("op_vm_name", ""),
                auto_ip=False,
                nics=copy.deepcopy(nics),  # 深拷贝防止数据变化
                image=vmInfo.get("image", ""),
                flavor=vmInfo.get("flavor", ""),
            )
            if server:
                rangeModel.RangeVm.query.filter_by(
                    id=vmInfo["id"], user_id=user_id
                ).update({"op_vm_uuid": server.id, "op_vm_name": vmInfo['op_vm_name']})

                for item in nics:
                    switchModel.Ports.query.filter_by(port_id=item["port-id"]).update(
                        {"device_owner": device_owner}
                    )
                if vmInfo["is_external"]:
                    rangeModel.RangeVm.query.filter_by(
                        id=vmInfo['id'], user_id=user_id
                    ).update({"external_ip": ex_port.fixed_ips[0]["ip_address"]})
            db.session.commit()


@celery_app.task(bind=True, base=PowerOnAllVmTask, retry=False)
def startAllVm(self, args, vmInfo, user_id):
    conn = ops.conn
    if vmInfo['type'] == VmType.container:
        time.sleep(5)
        self.power_on_all_container(args, vmInfo, conn, user_id)
    else:
        self.power_on_all_vm(args, vmInfo, conn, user_id)


@celery_app.task(
    bind=True,
    max_retries=5,
    default_retry_delay=30,
    name="mark_failed_active_range",
)
def mark_failed_active_range(self, range_uuid, student_id):
    """标记学生端启动的场景状态为失败"""
    try:
        rangeModel.ActiveRange.query.filter_by(
            range_uuid=range_uuid, user_id=student_id
        ).update({"status": rangeModel.ActiveRangeStatus.error})
        db.session.commit()
    except Exception as e:
        logger.error(f"学生靶场: {range_uuid} 启动失败，且无法标记失败状态", exc_info=e)
        raise self.retry()  # noqa


def check_stu_active_vm_status(range_uuid, user_id, times):
    print(times)
    op_conn = ops.conn

    range_ob = rangeModel.ActiveRange.query.filter_by(
        range_uuid=range_uuid, user_id=user_id
    ).first()
    if not range_ob:
        return

    change_range_status = True
    for vm in rangeModel.ActiveRangeVm.query.filter_by(
            range_uuid=range_uuid, user_id=user_id
    ):
        if vm.type == VmType.container:
            info = ops.zun_operate.get_container_details(vm.op_vm_uuid)
        else:
            info = op_conn.compute.get_server(vm.op_vm_uuid)

        # 状态检查
        if info.status == "Error" or vm.status == "Error":
            logger.error(f"{vm.name} 启动失败，op_vm_uuid: {vm.op_vm_uuid}")
            rangeModel.ActiveRangeVm.query.filter_by(id=vm.id).update(
                {"status": "Error"}
            )
            rangeModel.ActiveRange.query.filter_by(
                range_uuid=range_uuid, user_id=user_id
            ).update({"status": rangeModel.ActiveRangeStatus.error})
            db.session.commit()
            break

        if vm.is_external and not vm.external_ip and vm.op_vm_uuid:
            ext_net = flask_config.OPENSTACK["external_name"]
            addresses = info.addresses[ext_net][0]["addr"]
            # 写入外网ID地址

            rangeModel.ActiveRangeVm.query.filter_by(id=vm.id).update(
                {"external_ip": addresses}
            )
            db.session.commit()
        if vm.status not in ("ACTIVE", "Running"):
            change_range_status = False
    else:
        if range_ob.status not in ("ACTIVE", "Running"):
            if change_range_status:
                rangeModel.ActiveRange.query.filter_by(
                    range_uuid=range_uuid, user_id=user_id
                ).update({"status": "ACTIVE"})
                db.session.commit()
            else:
                times -= 1
                change_active_vm.apply_async(
                    args=(range_uuid, user_id, times), countdown=5
                )
        else:
            return


@celery_app.task(name="change_active_vm")
def change_active_vm(range_uuid, user_id, times=300):
    if times > 0:
        try:
            return check_stu_active_vm_status(range_uuid, user_id, times)
        except Exception as e:
            logger.error(f"{301 - times} 次，检查学生{user_id}活动靶场开启状态任务异常, 5秒后重试", exc_info=e)
            change_active_vm.apply_async(
                args=(range_uuid, user_id, times - 1), countdown=5
            )
    elif times == 0:
        rangeModel.ActiveRange.query.filter_by(
            range_uuid=range_uuid, user_id=user_id
        ).update({"status": "ERROR"})
    else:
        return


# ctf比赛开启
def start_ctf(ctf_id):
    with create_app().app_context():
        db.session.query(ctfModel.Ctf).filter(ctfModel.Ctf.id == ctf_id).update(
            {ctfModel.Ctf.status: 1}
        )


# ctf比赛关闭
def close_ctf(ctf_id):
    with create_app().app_context():
        ctf_info = db.session.query(ctfModel.Ctf).filter(ctfModel.Ctf.id == ctf_id).first()
        if ctf_info:
            # 更新状态为过期
            db.session.query(ctfModel.Ctf).filter(ctfModel.Ctf.id == ctf_id).update(
                {ctfModel.Ctf.status: 2}
            )

            # 判断是否有活动的实例 , 有删除
            rang_activate_List = []
            if ctf_info.range_uuid:
                rang_activate_List = rangeModel.ActiveRangeVm.query.filter_by(
                    range_uuid=ctf_info.range_uuid
                ).all()
            op_conn = init_opserver_conn()
            for rang_activave_vm in rang_activate_List:
                vm_id = rang_activave_vm.op_vm_uuid
                if vm_id:
                    if rang_activave_vm.type == VmType.container:
                        ops.zun_operate.del_container(vm_id, stop=True)
                    else:
                        op_conn.delete_server(vm_id)

            # 删除活动靶场
            rangeModel.ActiveRange.query.filter_by(range_uuid=ctf_info.range_uuid).delete()


def group_score(group: CtfRedBlueGroup, initial_score=0):
    """计算团队总成绩"""
    score_info: List[LinkCtfRedBlueScoreInfo] = group.link_score
    score_record = [item.score_info for item in score_info]

    score_result = defaultdict(list)
    for item in score_record:
        if item:
            score_result[item.change_type].append(item.score)

    plus_score = sum(score_result[CtfRedBlueScore.ChangeTypeEnum.plus.name] or [0])
    less_score = sum(score_result[CtfRedBlueScore.ChangeTypeEnum.less.name] or [0])

    return initial_score + plus_score - less_score

# @celery_app.task(name="get_one_ctf_score")
def get_one_ctf_score(ctf_red_blue_id, params, sort_id):
    """获取某个比赛各个团队成绩"""
    logger.info(f"sort_id1111222:{sort_id}")
    ctf_red_blue = CtfRedBlue.query.filter(CtfRedBlue.id == ctf_red_blue_id)
    if ctf_red_blue.count() != 1:
        return

    ctf = ctf_red_blue.first()
    groups: List[CtfRedBlueGroup] = ctf.groups

    res = {}
    red_initial_score = ctf.red_initial_score
    blue_initial_score = ctf.blue_initial_score
    for item in groups:
        initial_score = (
            red_initial_score
            if item.group_type == GroupType.red.value
            else blue_initial_score
        )
        sorce = group_score(item, initial_score=initial_score)
        res[f"{item.group_type}"] = {"id": item.id, "sorce": sorce}

    ctf_score = OrderedDict(
        sorted(res.items(), key=lambda x: x[1]["sorce"], reverse=True)
    )
    res_ = {'blue': ctf_score['blue']['sorce'], 'red': ctf_score['red']['sorce']}

    ctf_score_obj = ctfModel.CtfScore(
        ctf_red_blue_id=ctf_red_blue_id,
        sort_id=sort_id,
        score_info=json.dumps(res_)
    )
    db.session.add(ctf_score_obj)
    db.session.commit()
    if params["end_date"] - int(time.time()) >0:
        sort_id +=1
        # get_one_ctf_score.apply_async(args=(ctf_red_blue_id, params, sort_id), countdown=3600)


# @celery_app.task(bind=True, name="start_ctf_red_blue")
def start_ctf_red_blue(ctf_id, **params):
    """启动红蓝对抗赛
    - 更新比赛状态
    - 导入比赛flag比计算蓝队初始分
    """
    with create_app().app_context():
        ctf_red_blue: CtfRedBlue = CtfRedBlue.query.get(ctf_id)
        if not ctf_red_blue:
            logger.error(f"开启红蓝对抗{ctf_id}失败，比赛不存在")
            return

        range_manager = RangeManager(
            range_uuid=ctf_red_blue.range_uuid, ignore_private=True
        )

        issue_list = []
        flag_objs = []
        # 兼容未关联场景时开启比赛
        if range_manager.range_model:
            # 获取关联场景的所有绑定赛题
            issue_map: Dict[str, List[CtfQuestion]] = defaultdict(list)
            for item in range_manager.range_model.vmList:
                for val in item.bind_question:
                    issue_map[item.image].append(val)
                    issue_list.append(val)

            # 生成flag对象
            range_id = range_manager.range_model.range_id
            flag_objs = [
                CtfRedBlueFlag(
                    **{
                        "ctf_red_blue_id": ctf_id,
                        "range_id": range_id,
                        "flag": val.answer,
                        "describe": val.describe,
                        "score": val.score,
                        "ctf_question_type": val.type,
                        "ctf_question_id": val.id,
                        "type": "red",
                        "is_master": 0,
                        "image_id": image_id,
                        "mark": f"{1 << issue_list.index(val)}",
                    }
                )
                for image_id, issues in issue_map.items()
                for val in issues
            ]

        try:
            ctf_red_blue.status = CtfRedBlue.StatusChoices.in_progress
            ctf_red_blue.blue_initial_score = (
                sum(item.score for item in issue_list) if issue_list else 0
            )

            db.session.add(ctf_red_blue)
            if flag_objs:
                db.session.add_all(flag_objs)
            db.session.commit()
            get_one_ctf_score(ctf_id, params, 0)
        except Exception as e:
            logger.error("开启ctf红蓝对抗赛失败", exc_info=e)
            db.session.rollback()
            return


# @celery_app.task(
#     name="close_ctf_red_blue",
#     autoretry_for=(Exception,),
#     retry_backoff=True,
#     retry_jitter=True,
#     retry_backoff_max=10,
#     max_retries=3,
# )
def close_ctf_red_blue(ctf_id, **kwargs):
    from app.api.v1.organizer.mathManage import CTFManualJudgment, CTFRedBlueScore
    with create_app().app_context():
        ctf_query: ctfModel.CtfRedBlue = db.session.query(ctfModel.CtfRedBlue).filter(
            ctfModel.CtfRedBlue.id == ctf_id
        )

        if ctf_query.count() == 1:

            # 更新比赛信息及关联靶场信息
            ctf_info = ctf_query.first()
            # 比赛已结束则直接结束任务
            if ctf_info.status == ctfModel.CtfRedBlue.StatusChoices.closed.value:
                return
            # range_uuid = ctfModel.CtfRedBlue.range_uuid
            update_info = {
                ctfModel.CtfRedBlue.status: ctfModel.CtfRedBlue.StatusChoices.closed.value,
                # ctfModel.CtfRedBlue.range_uuid: None
            }
            if not ctf_info.king:
                red_group = ctfModel.CtfRedBlueGroup.query.filter(
                    ctfModel.CtfRedBlueGroup.ctf_red_blue_id == ctf_id,
                    ctfModel.CtfRedBlueGroup.group_type == ctfModel.GroupType.red.value,
                ).first()
                red_score = CTFRedBlueScore.group_score(
                    red_group, ctf_info.red_initial_score
                )
                blue_group = ctfModel.CtfRedBlueGroup.query.filter(
                    ctfModel.CtfRedBlueGroup.ctf_red_blue_id == ctf_id,
                    ctfModel.CtfRedBlueGroup.group_type == ctfModel.GroupType.blue.value,
                ).first()
                blue_score = CTFRedBlueScore.group_score(
                    blue_group, ctf_info.blue_initial_score
                )

                update_info[ctfModel.CtfRedBlue.king] = (
                    ctfModel.GroupType.red.value
                    if red_score > blue_score
                    else ctfModel.GroupType.blue.value
                )

            ctf_query.update({**update_info})

            # 关闭该比赛未处理的人工裁决
            msg = kwargs.get("msg", "比赛结束")
            CTFManualJudgment.close_other_manual_judgment(ctf_id, msg)

            # 暂时保留靶场
            # if range_uuid:
            #     range_info = rangeModel.RangeLinkModule.filter(
            #         rangeModel.RangeLinkModule.range_uuid == range_uuid
            #     ).first()
            #     rangeModel.Range.query.filter(rangeModel.Range.id == range_info.range_id).delete()
            #     db.session.delete(range_info)

            db.session.commit()


# 删除虚机
@celery_app.task(name="delete_vm")
def delete_vm(op_vm_uuid):
    op_conn = init_opserver_conn()  # noqa


# 删除网络
@celery_app.task(name="delete_net")
def delete_net(msg):
    print(msg)


def get_tow_hours_time():
    ctime = time.time()
    return datetime.datetime.utcfromtimestamp(ctime) + datetime.timedelta(hours=2)


@celery_app.task(name="participant_timing_close_vm")
def participant_timing_close_vm(range_uuid, new_vm_uuid):
    try:
        vm_info = rangeModel.ActiveRangeVm.query.filter_by(
            range_uuid=range_uuid, new_vm_uuid=new_vm_uuid
        ).first()
        if not vm_info:
            return

        if vm_info.type == VmType.container:
            ops.zun_operate.del_container(vm_info.op_vm_uuid, stop=True)
        else:
            ops.conn.compute.delete_server(vm_info.op_vm_uuid)

        rangeModel.ActiveRangeVm.query.filter_by(
            range_uuid=range_uuid, new_vm_uuid=new_vm_uuid
        ).update({"op_vm_uuid": None})
        logger.info("delete_participant_vm_success")
    except Exception as e:
        logger.error(f"delete_participant_vm_error: {e}")


@celery_app.task(bind=True, retry=False)
def check_one_range_plc_status(self, range_uuid, external_ip):
    """获取某个靶场plc状态
    eg：request.urlopen("http://172.16.3.85/ajax.php")
    """
    # 锁与value中save_time配合保证celery始终保存正确的最新的监控信息
    with redis_conn.lock(f"CHECK_ON_RANGE_PLC_STATUS::{range_uuid}", timeout=5):
        try:
            # 注意超时时间防止锁超时
            response = request.urlopen(f"http://{external_ip}/ajax.php", timeout=2)
        except Exception as e:
            logger.error(
                f"通过 {external_ip} 获取内部PLC信息失败，请确认是否需要忽略, {e.__class__.__name__}: {e}"
            )
        else:
            try:
                # 任务执行与缓存时间确认
                cache_key = (
                    f"{current_app.config['CHECK_RANGE_PLC_STATUS_KEY']}::{range_uuid}"
                )
                last_cache_info = redis_conn.get(cache_key)
                if (
                        last_cache_info and
                        json.loads(last_cache_info.decode())["save_time"] > time.time()
                ):
                    return True
                info: dict = json.loads(response.read().decode())
                cache_info = {
                    key: 0 if val == "0.0" else 1
                    for key, val in info.items()
                    if "plc" in key.lower()
                }
                redis_conn.set(
                    cache_key,
                    json.dumps({"save_time": time.time(), "data": cache_info}),
                )
            except Exception as e:
                logger.error(
                    f"解析或保存通过 {external_ip} 获取的内部PLC信息失败, {e.__class__.__name__}: {e}"
                )
                return False


@celery_app.task(bind=True, retry=False)
def check_all_range_plc_status(self):
    all_external_mis = (
        db.session.query(rangeModel.RangeVm.range_uuid, rangeModel.RangeVm.external_ip)
        .filter(
            rangeModel.RangeVm.is_external == 1,
            rangeModel.RangeVm.type == "mis",
            rangeModel.RangeVm.external_ip.isnot(None),
        )
        .all()
    )

    if all_external_mis:
        task_list = [check_one_range_plc_status.s(*val) for val in all_external_mis]
        if task_list:
            group(task_list).apply_async()


@celery_app.task(bind=True, queue="delay_port_handler")
def delete_port(self, port_id, op_vm_uuid, count=0, vm_type=VmType.vm):
    if count >= 120:
        return

    def del_port(_port_id):
        switchModel.Ports.query.filter_by(port_id=_port_id).update(
            {"device_id": None, "device_owner": None}
        )
        db.session.commit()

    def delete_port_vm(_port_id, _op_vm_uuid):
        try:
            op_vm = ops.conn.compute.get_server(_op_vm_uuid)
        except Exception as e:
            logger.error(f"虚拟机{_op_vm_uuid}不存在", exc_info=e)
            return
        try:
            port_info = ops.conn.compute.get_server_interface(_port_id, op_vm)
        except exceptions.ResourceNotFound as e:
            logger.exception(f"{_op_vm_uuid} 已分离 {_port_id}, {e.__class__.__name__}: {e}")
            del_port(_port_id)
        else:
            if port_info:
                try:
                    ops.conn.compute.delete_server_interface(_port_id, op_vm)
                except Exception as e:
                    logger.exception(
                        f"{_op_vm_uuid} 可能已分离 {_port_id}, {e.__class__.__name__}: {e}"
                    )
                else:
                    delete_port.apply_async(
                        args=(_port_id, _op_vm_uuid), kwargs={"count": count + 1}, countdown=5
                    )
            else:
                del_port(_port_id)

    def delete_port_container(_port_id, _op_vm_uuid):
        """将容器从某端口分离
        """
        try:
            ops.zun_operate.get_container_details(_op_vm_uuid)
        except Exception as e:
            logger.error(f"无法获取容器: {_op_vm_uuid} 信息", exc_info=e)
        else:
            try:
                network_id_list = [val for val in ops.zun_operate.network_list(_op_vm_uuid) if val.port_id == _port_id]
                if not network_id_list:
                    logger.error(f"端口: {_port_id}已从容器：{_op_vm_uuid}中分离")
                ops.zun_operate.network_detach(_op_vm_uuid, port=_port_id)
            except Exception as e:
                logger.error(f"容器: {_op_vm_uuid} 分离端口 {_port_id} 出现问题", exc_info=e)
            else:
                del_port(_port_id)

    if op_vm_uuid:
        if vm_type != VmType.container:
            delete_port_vm(port_id, op_vm_uuid)
        else:
            delete_port_container(port_id, op_vm_uuid)
    else:
        del_port(port_id)


def update_courseware_info(args):
    db.session.execute(
        f"""update api_chapter_courseware set env_status='{args["env_status"]}', 
        env_start_time={args["env_start_time"]}, env_end_time={args["env_end_time"]} 
        where courseware_id={args["courseware_uuid"]} and chapter_id='{args["chapter_uuid"]}'"""
    )


def start_courseware(args):
    update_courseware_info(args)


def delete_redis_info(chapter_uuid):
    try:
        course_uuid = Chapter.query.get(chapter_uuid).course_id
        course_users = (
            Course.query.filter_by(id=course_uuid)
            .first()
            .course_users()
        )
        for user in course_users:
            redis_conn.delete(f"{course_uuid}_" + str(user["id"]))
    except Exception as e:
        current_app.logger.exception(e)


@celery_app.task(name="close_courseware")
def close_courseware(args):
    args.update({"env_status": "0", "env_start_time": "null", "env_end_time": "null"})
    update_courseware_info(args)
    delete_redis_info(args["chapter_uuid"])

def clean_range_vm(range_uuid):
    with create_app().app_context():
        try:
            zstack_manager = ZStackManager()
            range_vm_list = rangeModel.RangeVm.query.filter_by(range_uuid=range_uuid).all()
            for range_vm in range_vm_list:
                op_vm_uuid = range_vm.op_vm_uuid
                # 放入回收站
                zstack_manager.del_vm_destroy(op_vm_uuid)
                rangeModel.RangeVm.query.filter_by(
                    vm_uuid=range_vm.vm_uuid
                ).update({
                    "op_vm_uuid": None,
                    "op_vm_name": None
                })
            db.session.commit()    
        except Exception as e:
            db.session.rollback()
            return
        
                     

def add_job_to_scheduler(task_id, func, trigger, args, args_dict=None, **kwargs):
    """
    动态向 APScheduler 中添加定时任务
    :param task_id: 任务 ID, 应该是唯一的
    :param func: 任务函数，用于执行具体的任务逻辑
    :param trigger: 触发器，用于指定任务执行的时间规则
    :param args:  方法参数
    :param kwargs: 时间参数
    """
    # 存储数据库
    try:
        time_type, time_value = list(kwargs.items())[0]
        scheduler.add_job(
            id=task_id,
            func=eval(func) if isinstance(func, str) else func,
            trigger=trigger,
            run_date=time_value,
            args= (args,) if not isinstance(args, list) else args,
            kwargs=args_dict
            )
        current_app.logger.info(f"任务 {task_id} 添加成功！下次执行时间为：{time_value}")
        # 存在则更新
        save_or_update_job({"task_id": task_id, 
                            "func_name": kwargs["func_name"],
                            "args": args,
                            "trigger": trigger,
                            "kwargs": kwargs,#增加参数
                            "args_dict": args_dict,
                            "time_value": time_value
                            })
    except Exception:
        db.session.rollback()
        current_app.logger.exception(traceback.print_exc())
        return False 
    return True


def save_or_update_job(kwargs):
    task_id = kwargs.get("task_id")
    existing_job = ApsChedulerJob.query.filter_by(task_id=task_id).first()

    if existing_job:
        # 如果数据库中存在相同trigger的记录，则更新现有记录的内容
        existing_job.func = kwargs["func_name"]
        existing_job.args = kwargs["args"]
        existing_job.args_dict = json.dumps(kwargs["args_dict"])
        existing_job.kwargs = json.dumps(kwargs["kwargs"])
        #existing_job.kwargs = json.dumps(kwargs)
        existing_job.next_run_time = kwargs["time_value"]
        db.session.commit()
    else:
        # 如果数据库中不存在相同trigger的记录，则插入新记录
        job = ApsChedulerJob(
            func=kwargs["func_name"],
            args=kwargs["args"],
            args_dict=json.dumps(kwargs["args_dict"]),
            #kwargs=json.dumps(kwargs),
            kwargs=json.dumps(kwargs["kwargs"]),
            trigger=kwargs.get("trigger"),
            next_run_time=kwargs["time_value"],
            task_id=kwargs["task_id"]
        )
        db.session.add(job)
        db.session.commit()


def stop_job_scheduler(task_id):
    """停止任务"""
    try:
        scheduler.remove_job(task_id)
        return True
    except JobLookupError:
        return True
    except Exception:
        return False
    
def revoke_task(task_ids):
    """撤销已注册任务"""
    if isinstance(task_ids, list):
        for task_id in task_ids:
            if scheduler.get_job(task_id):
                scheduler.remove_job(task_id)
    elif scheduler.get_job(task_ids):
        scheduler.remove_job(task_ids)
    return
    
