import datetime
import json
import logging
import math
import threading
import time
import uuid

from kubernetes import watch
from myapp.apis.pay import (
    model_compress_billing,
    model_training_billing,
    need_model_compress_billing,
    need_model_training_billing,
)
from myapp.apis.workflow import (
    create_model_if_finished,
    deep_get,
    start_service_if_finished,
    stop_service_if_failed,
    sync_job_status,
)
from myapp.app import app
from myapp.const.pool import EnumPoolType
from myapp.const.service import (
    ServiceStatus,
)
from myapp.const.task_category import EnumTaskCategory
from myapp.models.model_job import Pipeline, Task, Workflow
from myapp.models.model_serving import InferenceService
from myapp.project import push_message
from myapp.third.k8s.py_k8s import check_status_time
from myapp.third.k8s.py_karmada import get_k8s_client_by_region
from myapp.third.k8s.tools import ignore_404
from myapp.utils.error_code import get_error_msg
from myapp.utils.exception import ignore_exception, log_exception
from myapp.utils.log import set_task_id
from myapp.utils.py.py_prometheus import Prometheus
from myapp.utils.region import get_region_keys
from myapp.utils.sess import session_scope
from myapp.utils.time import get_local_time
from myapp.watch.watch_service import listen_service, update_service_status
import requests
import yaml


log = logging.getLogger(__name__)

conf = app.config
prometheus = Prometheus(conf.get('PROMETHEUS', ''))


# 推送微信消息
# @pysnooper.snoop()
def deliver_message(workflow, dbsession):
    if not workflow:
        return

    receivers = workflow.username.split(',')
    receivers = [receiver.strip() for receiver in receivers]

    pipeline_id = json.loads(workflow.labels).get('pipeline-id', '')
    if pipeline_id and int(pipeline_id) > 0:
        pipeline = dbsession.query(Pipeline).filter_by(id=int(pipeline_id)).first()
        alert_user = pipeline.alert_user.split(',') if pipeline.alert_user else []
        alert_user = [user.strip() for user in alert_user if user.strip()]
        receivers += alert_user

    if not receivers:
        logging.info('no receivers')
        return

    info_json = json.loads(workflow.info_json)
    # logging.info(info_json,workflow.status)
    if (
        workflow.status in info_json['alert_status']
        and workflow.status not in info_json['has_push']
    ):
        receivers = list(set(receivers))
        # data = {
        #     "Sender": sender,
        #     "Rcptto":receivers,
        # }
        status_more = json.loads(workflow.status_more)
        start_time = (
            status_more.get('startedAt', '').replace('T', ' ').replace('Z', '')
            if status_more.get('startedAt', '')
            else ''
        )
        if start_time:
            start_time = (
                datetime.datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S')
                + datetime.timedelta(hours=0)
            ).strftime('%Y-%m-%d %H:%M:%S')

        finish_time = (
            status_more.get('finishedAt', '').replace('T', ' ').replace('Z', '')
            if status_more.get('finishedAt', '')
            else ''
        )
        if finish_time:
            finish_time = (
                datetime.datetime.strptime(finish_time, '%Y-%m-%d %H:%M:%S')
                + datetime.timedelta(hours=0)
            ).strftime('%Y-%m-%d %H:%M:%S')
        help_url = 'http://%s/pipeline_modelview/web/pod/%s' % (conf.get('HOST'), pipeline_id)
        message = (
            'workflow: %s \npipeline: %s(%s) \nnamespace: %s\nstatus: % s \nstart_time:'
            ' %s\nfinish_time: %s\n'
            % (
                workflow.name,
                info_json.get('pipeline_name', ''),
                info_json.get('describe', ''),
                workflow.namespace,
                workflow.status,
                start_time,
                finish_time,
            )
        )
        message += '\n'
        link = {'pod详情': help_url}
        if message:
            push_message(receivers, message, link)


def is_http_accessible(address, port=8080, route='health'):
    """
    检查指定地址的8080端口HTTP服务是否可访问。
    :param address: 目标地址，可以是域名或IP。
    :return: 布尔值，True表示服务响应正常，False表示失败。
    """
    try:
        url = f'http://{address}:{port}/{route}'
        response = requests.get(url, timeout=5)
        return response.ok  # 状态码为2xx或3xx时返回True
    except requests.exceptions.RequestException as ex:
        logging.warning(f'[is_http_accessible] {ex}')
        return False


# @pysnooper.snoop()
def check_has_push(crd, dbsession):
    """更新mysql表里任务的状态"""
    pipeline_id = crd['labels'].get('pipeline-id', '')
    namespace = crd['namespace']
    pipeline = dbsession.query(Pipeline).filter_by(id=int(pipeline_id)).first()
    if not pipeline:
        return False, None

    run_id = crd['labels'].get('pipeline/runid', '')
    alert_status = ''
    if pipeline and pipeline.alert_status:
        alert_status = pipeline.alert_status
    logging.info(
        'Event: % s %s %s %s %s %s'
        % (crd['name'], pipeline.describe, pipeline.name, crd['username'], crd['status'], run_id)
    )

    workflow = (
        dbsession.query(Workflow)
        .filter(Workflow.name == crd['name'])
        .filter(Workflow.namespace == namespace)
        .first()
    )
    if workflow is None:
        logging.error('no workflow')
        return False, None

    info_json = {}
    if workflow.info_json:
        exist_info_json = json.loads(workflow.info_json)
        info_json = exist_info_json
        info_json['run_id'] = run_id
        info_json['alert_status'] = alert_status
        # info_json['has_push']=exist_info_json.get('has_push','')

    # 更新节点状态
    snapshot = ''
    if workflow.snapshot:
        snapshot = json.loads(workflow.snapshot)
        snapshot['pipeline_expand'] = pipeline.expand
        nodes = crd['status_more'].get('nodes', {})
        pods = {}
        for node_name in nodes:
            if nodes[node_name]['type'] == 'Pod':
                pods[node_name] = nodes[node_name]
        for pod_name in pods:
            task_name = pods[pod_name]['displayName']
            db_task_name = task_name[: task_name.index('(')] if '(' in task_name else task_name
            if db_task_name not in snapshot['dag_json']:
                dag_json = json.loads(pipeline.dag_json)
                task = (
                    dbsession.query(Task)
                    .filter(Task.pipeline_id == int(pipeline_id))
                    .filter(Task.name == task_name)
                    .first()
                )
                snapshot['dag_json'][db_task_name] = {
                    'upstream': dag_json[task_name].get('upstream', []),
                    'task': task.as_dict(),
                    'status': '',
                }
                continue

            snapshot['dag_json'][db_task_name]['status'] = pods[pod_name]['phase']

    has_model = json.loads(workflow.status_more).get('has_model', 0)
    # workflow.create_time = crd['create_time']
    workflow.status = crd['status']
    workflow.display_status = crd['status']
    workflow.change_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    workflow.annotations = json.dumps(crd['annotations'], indent=4, ensure_ascii=False)
    workflow.labels = json.dumps(crd['labels'], indent=4, ensure_ascii=False)
    workflow.spec = (json.dumps(crd['spec'], indent=4, ensure_ascii=False),)
    workflow.status_more = json.dumps(crd['status_more'], indent=4, ensure_ascii=False)
    workflow.username = crd['username']
    pod_name = ''

    with log_exception:
        k8s_client = get_k8s_client_by_region(region=workflow.region, action='search')
        pods = k8s_client.get_pods(
            namespace=namespace,
            labels={'workflows.argoproj.io/workflow': workflow.name, 'show-log': 'true'},
        )
        log.info(f'[check_has_push] len: {len(pods)}; pods: {pods}')
        events = []
        for pod in pods:
            pod_name = pod['name']
            pod_events = k8s_client.get_event_namespace(namespace, pod['name'])
            events += pod_events
        status_more = json.loads(workflow.status_more)
        run_ids = deep_get(status_more, f'nodes.{workflow.name}.children')
        run_id = run_ids[0]
        status_more['nodes'][run_id]['podName'] = pod_name
        status_more['has_model'] = has_model

        # 写错误信息
        if crd['status'] == 'Failed':
            exit_code = status_more['nodes'][run_id].get('outputs', {}).get('exitCode', '')
            status_more['error_code'] = exit_code
            status_more['error_msg'] = get_error_msg(exit_code)
        workflow.status_more = json.dumps(status_more, indent=4, ensure_ascii=False)
        logging.info('status_more:%s', workflow.status_more)
        events.sort(key=lambda x: x['last_timestamp'])
        for item in events:
            item['last_timestamp'] = get_local_time(item['last_timestamp'])
        db_info_json = json.loads(workflow.info_json)
        if len(events) == 0:
            info_json['events'] = db_info_json.get('events', [])
        else:
            info_json['events'] = events

    workflow.info_json = json.dumps(info_json, indent=4, ensure_ascii=False)
    workflow.snapshot = json.dumps(snapshot, indent=4, ensure_ascii=False)
    dbsession.commit()

    if crd['status'] in info_json['alert_status'] and crd['status'] not in info_json['has_push']:
        return False, workflow
    else:
        return True, workflow


# 推送修改通知
# @pysnooper.snoop()
def push_resource_rec(workflow, dbsession):
    pipeline_id = json.loads(workflow.labels).get('pipeline-id', '')
    pipeline = dbsession.query(Pipeline).filter_by(id=int(pipeline_id)).first()
    if pipeline:
        init_message = (
            'pipeline(%s)根据近10次的任务训练资源使用情况，系统做如下调整:\n' % pipeline.describe
        )
        message = init_message
        tasks = dbsession.query(Task).filter(Task.pipeline_id == int(pipeline_id)).all()
        for task in tasks:
            if 'NO_RESOURCE_CHECK' not in task.job_template.env.replace('-', '_').upper():
                task_monitorings = json.loads(task.monitoring).get('task', [])
                if len(task_monitorings) > 9:
                    max_cpu = 0
                    max_memory = 0
                    for task_monitoring in task_monitorings:
                        if float(task_monitoring.get('cpu', 0)) > max_cpu:
                            max_cpu = float(task_monitoring.get('cpu', 0))
                        if float(task_monitoring.get('memory', 0)) > max_memory:
                            max_memory = float(task_monitoring.get('memory', 0))
                    if max_cpu:
                        rec_cpu = math.ceil(max_cpu * 1.4) + 2
                        if rec_cpu > 150:
                            rec_cpu = 150
                        if rec_cpu != int(task.resource_cpu):
                            message += (
                                'task(%s)，原申请cpu:%s，近10次最大使用cpu:%s，新申请值:%s\n'
                                % (task.label, task.resource_cpu, max_cpu, rec_cpu)
                            )
                            task.resource_cpu = str(rec_cpu)
                    if max_memory:
                        rec_memory = math.ceil(max_memory * 1.4) + 2
                        if rec_memory > 350:
                            rec_memory = 350
                        if rec_memory != int(
                            task.resource_memory.replace('G', '').replace('M', '')
                        ):
                            message += (
                                'task(%s)，原申请mem:%s，近10次最大使用mem:%s(G)，新申请值:%s\n'
                                % (
                                    task.label,
                                    task.resource_memory,
                                    max_memory,
                                    str(rec_memory) + 'G',
                                )
                            )
                            task.resource_memory = str(rec_memory) + 'G'
                    dbsession.commit()
        if message != init_message:
            alert_user = pipeline.alert_user.split(',') if pipeline.alert_user else []
            alert_user = [user.strip() for user in alert_user if user.strip()]
            receivers = alert_user + [pipeline.created_by.username]
            receivers = list(set(receivers))

            push_message(receivers, message)


# 推送训练耗时通知
# @pysnooper.snoop()
def push_task_time(workflow, dbsession):
    if not workflow:
        return

    nodes = json.loads(workflow.status_more).get('nodes', {})
    pods = {}
    for node_name in nodes:
        if nodes[node_name]['type'] == 'Pod' and nodes[node_name]['phase'] == 'Succeeded':
            pods[node_name] = nodes[node_name]
    pipeline_id = json.loads(workflow.labels).get('pipeline-id', '')
    if pipeline_id and pods:
        pipeline = dbsession.query(Pipeline).filter_by(id=pipeline_id).first()
        if pipeline:
            message = '\n%s %s，各task耗时，酌情优化:\n' % (
                pipeline.describe,
                pipeline.created_by.username,
            )
            task_pod_time = {}
            for pod_name in pods:
                # logging.info(pods[pod_name])
                task_name = pods[pod_name]['displayName']
                finishedAt = datetime.datetime.strptime(
                    pods[pod_name]['finishedAt'].replace('T', ' ').replace('Z', ''),
                    '%Y-%m-%d %H:%M:%S',
                )
                startAt = datetime.datetime.strptime(
                    pods[pod_name]['startedAt'].replace('T', ' ').replace('Z', ''),
                    '%Y-%m-%d %H:%M:%S',
                )
                run_time = round(
                    (finishedAt - startAt).days * 24 + (finishedAt - startAt).seconds / 60 / 60, 2
                )
                db_task_name = task_name[: task_name.index('(')] if '(' in task_name else task_name
                task = (
                    dbsession.query(Task)
                    .filter(Task.pipeline_id == int(pipeline_id))
                    .filter(Task.name == db_task_name)
                    .first()
                )
                if startAt in task_pod_time and task_pod_time[startAt]:
                    task_pod_time[startAt].append({'task': task.label, 'run_time': str(run_time)})
                else:
                    task_pod_time[startAt] = [{'task': task.label, 'run_time': str(run_time)}]

            task_pod_time_sorted = sorted(task_pod_time.items(), key=lambda item: item[0])
            max_task_run_time = 0
            for task_pods in task_pod_time_sorted:
                for task_pod in task_pods[1]:
                    message += task_pod['task'] + ':' + task_pod['run_time'] + '(h)\n'
                    try:
                        if float(task_pod['run_time']) > max_task_run_time:
                            max_task_run_time = float(task_pod['run_time'])
                    except Exception as e:
                        logging.error(e)

            # 记录是否已经推送，不然反复推送不好
            info_json = json.loads(workflow.info_json)
            if info_json.get('push_task_time', ''):
                pass
            else:
                info_json['push_task_time'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                workflow.info_json = json.dumps(info_json, indent=4, ensure_ascii=False)
                dbsession.commit()
                message += '\n'
                link = {
                    '点击查看资源的使用': 'http://%s/pipeline_modelview/web/monitoring/%s'
                    % (conf.get('HOST'), pipeline_id)
                }
                # 有单任务运行时长超过4个小时才通知
                if max_task_run_time > 4:
                    push_message(conf.get('ADMIN_USER').split(','), message, link)

                alert_user = pipeline.alert_user.split(',') if pipeline.alert_user else []
                alert_user = [user.strip() for user in alert_user if user.strip()]
                receivers = alert_user + [workflow.username]
                receivers = list(set(receivers))

                push_message(receivers, message, link)


# @pysnooper.snoop()
def save_monitoring(workflow, dbsession):
    try:
        if workflow.status == 'Succeeded':
            # 获取下面的所有pod
            nodes = json.loads(workflow.status_more).get('nodes', {})
            pods = {}
            for node_name in nodes:
                if nodes[node_name]['type'] == 'Pod' and nodes[node_name]['phase'] == 'Succeeded':
                    pods[node_name] = nodes[node_name]
            pipeline_id = json.loads(workflow.labels).get('pipeline-id', '')
            if pipeline_id and pods:
                for pod_name in pods:
                    logging.info(pods[pod_name])
                    task_name = pods[pod_name]['displayName']
                    task_name = (
                        task_name[: task_name.index('(')] if '(' in task_name else task_name
                    )

                    task = (
                        dbsession.query(Task)
                        .filter(Task.pipeline_id == int(pipeline_id))
                        .filter(Task.name == task_name)
                        .first()
                    )
                    metrics = prometheus.get_resource_metric(pod_name, namespace='pipeline')
                    monitoring = json.loads(task.monitoring) if task and task.monitoring else {}
                    task_monitoring = monitoring.get('task', [])
                    if metrics:
                        task_monitoring.append(
                            {
                                'cpu': metrics.get('cpu', ''),
                                'memory': metrics.get('memory', ''),
                                'pod_name': pod_name,
                                'update_time': datetime.datetime.now().strftime(
                                    '%Y-%m-%d %H:%M:%S'
                                ),
                            }
                        )

                    # 清理监控记录
                    task_monitoring_new = []
                    for metric in task_monitoring:
                        # 采集结果不对的，和采集结果太久远的都清理掉
                        if (
                            float(metric.get('cpu', 0)) > 0.1
                            and float(metric.get('memory', 0)) > 0.1
                            and metric['update_time']
                            > (datetime.datetime.now() - datetime.timedelta(days=30)).strftime(
                                '%Y-%m-%d %H:%M:%S'
                            )
                        ):
                            task_monitoring_new.append(metric)

                    if len(task_monitoring_new) > 10:
                        del task_monitoring_new[0]

                    monitoring_new = {}
                    monitoring_new['task'] = task_monitoring_new
                    monitoring_new['tfjob'] = monitoring.get('tfjob', [])

                    logging.info(monitoring_new)
                    if task:
                        task.monitoring = json.dumps(monitoring_new, ensure_ascii=False, indent=4)
                        dbsession.commit()

            push_task_time(workflow, dbsession)

            push_resource_rec(workflow, dbsession)

    except Exception as e:
        logging.error(e)


# @pysnooper.snoop()
def save_history(workflow, dbsession):
    info_json = json.loads(workflow.info_json)
    if info_json['has_push']:
        if workflow.status not in info_json['has_push']:
            info_json['has_push'] += ',' + workflow.status
    else:
        info_json['has_push'] = workflow.status
    workflow.info_json = json.dumps(info_json, indent=4, ensure_ascii=False)
    dbsession.commit()


# @pysnooper.snoop()
def deal_event(crd_object, is_vcjob=False):
    """处理监听时间主逻辑：更新任务状态，压缩、计费、创建模型等"""
    log.info(
        f"[{threading.current_thread().name}][deal_event] {crd_object['metadata']['name']} {crd_object['metadata']['namespace']}"
    )
    with log_exception:
        creat_time = crd_object['metadata']['creationTimestamp'].replace('T', ' ').replace('Z', '')
        creat_time = (
            datetime.datetime.strptime(creat_time, '%Y-%m-%d %H:%M:%S')
            + datetime.timedelta(hours=8)
        ).strftime('%Y-%m-%d %H:%M:%S')

        org_workflow_status = None
        with session_scope() as dbsession:
            workflow = (
                dbsession.query(Workflow)
                .filter(Workflow.name == crd_object['metadata']['name'])
                .filter(Workflow.namespace == crd_object['metadata']['namespace'])
                .first()
            )
            logging.info(
                f"[{threading.current_thread().name}][deal_event][{crd_object['metadata']['name']}] {workflow}; region: {workflow.region}; name: {workflow.name}; status: {workflow.status}"
            )
            if workflow:
                org_workflow_status = workflow.status
            # 不能直接使用里面的状态
            status = ''
            if 'status' in crd_object and 'nodes' in crd_object['status']:
                keys = list(crd_object['status']['nodes'].keys())
                last_node = crd_object['status']['nodes'][keys[-1]]

                # 如果使用了volcano调度器，status设置为Pending
                with log_exception:
                    if org_workflow_status and org_workflow_status == 'Pending':
                        for item in crd_object['spec']['templates']:
                            if item['name'] == last_node['displayName']:
                                manifest = yaml.load(item['resource']['manifest'], yaml.FullLoader)
                                if manifest['spec']['schedulerName'] == 'volcano':
                                    if last_node['phase'] == 'Running':
                                        logging.info(
                                            'using volcano scheduler,status set to Pending'
                                        )
                                        status = 'Pending'

                logging.info(
                    f"[{threading.current_thread().name}][deal_event][{crd_object['metadata']['name']}] status: {status}; workflow.foreign_key: {workflow.foreign_key}"
                )
                if status == '':
                    status = last_node['phase']
                if status != 'Pending':
                    status = crd_object['status']['phase']
                if status == 'Running' and workflow.foreign_key == '':
                    logging.info(f"[deal_event][{crd_object['metadata']['name']}] enter Running")
                    with log_exception:
                        ns = crd_object['metadata']['namespace']
                        # k8s_client = K8s()
                        k8s_client = get_k8s_client_by_region(
                            region=workflow.region, action='search'
                        )
                        pods = k8s_client.v1.list_namespaced_pod(
                            namespace=ns,
                            label_selector=f'pipeline-id={workflow.pipeline_id},task-name={last_node["displayName"]}',
                        )
                        logging.info(
                            f"[{threading.current_thread().name}][deal_event][{crd_object['metadata']['name']}] len: {len(pods)}; {pods}"
                        )
                        if len(pods.items) > 0:
                            pod = pods.items[0]
                            for container_status in pod.status.container_statuses:
                                if container_status.name != 'main':
                                    continue
                                started_at = container_status.state.running.started_at
                                workflow.foreign_key = (
                                    started_at + datetime.timedelta(hours=8)
                                ).strftime('%Y-%m-%d %H:%M:%S')
                                dbsession.commit()
                                break

            if status == '' and 'status' in crd_object:
                if is_vcjob is True:
                    status = crd_object['status']['state']['phase']
                else:
                    status = crd_object['status']['phase']
            if status == '':
                status = 'Pending'

            back_object = {
                'name': crd_object['metadata']['name'],
                'namespace': crd_object['metadata']['namespace']
                if 'namespace' in crd_object['metadata']
                else '',
                'annotations': crd_object['metadata'].get('annotations', {}),
                'labels': crd_object['metadata'].get('labels', {}),
                'spec': crd_object['spec'],
                'create_time': creat_time,
                'status': status,
                'status_more': (
                    check_status_time(crd_object['status']) if 'status' in crd_object else {}
                ),
            }

            if 'run-rtx' in back_object['labels']:
                back_object['username'] = back_object['labels']['run-rtx']
            elif 'upload-rtx' in back_object:
                back_object['username'] = back_object['labels']['upload-rtx']

            has_push, workflow = check_has_push(back_object, dbsession)  # 更新mysql表里任务的状态
            sync_job_status(workflow.id, dbsession=dbsession)

            if workflow.status == 'Succeeded':
                algorithm = json.loads(workflow.algorithm)
                resource = algorithm.get('resource', {})
                task_category = resource.get('task_category', EnumTaskCategory.auto_learning.value)
                pool_type = resource.get('pool_type', EnumPoolType.public.value)
                if pool_type == EnumPoolType.public.value:
                    if task_category == EnumTaskCategory.merge.value:
                        pass
                    elif task_category == EnumTaskCategory.compress.value:
                        if need_model_compress_billing(workflow.username):
                            model_compress_billing(workflow, dbsession)
                    else:
                        if need_model_training_billing(workflow.username):
                            model_training_billing(workflow, dbsession)

                if task_category == EnumTaskCategory.merge.value:
                    # lora merge完成，启动服务
                    start_service_if_finished(workflow, dbsession=dbsession)
                else:
                    # 训练完成，创建模型
                    create_model_if_finished(workflow, dbsession=dbsession)

            elif workflow.status == 'Failed':
                algorithm = json.loads(workflow.algorithm)
                resource = algorithm.get('resource', {})
                task_category = resource.get('task_category', EnumTaskCategory.auto_learning.value)
                if task_category == EnumTaskCategory.merge.value:
                    stop_service_if_failed(workflow, dbsession=dbsession)

            # if not has_push:
            #     try:
            #         deliver_message(workflow, dbsession)
            #     except Exception as e1:
            #         logging.error('push fail:', e1)
            #         push_message(conf.get('ADMIN_USER').split(','), 'push fail' + str(e1))
            # save_history(workflow, dbsession)
            # save_monitoring(workflow, dbsession)


# @pysnooper.snoop()
def listen_workflow(region='default'):
    logging.warning(f'[listen_workflow] start. region: {region} ')
    workflow_info = conf.get('CRD_INFO')['workflow']
    namespace = conf.get('PIPELINE_NAMESPACE')
    # label = 'pipelines.kubeflow.org/kfp_sdk_version=1.0.4'
    logging.info('begin listen workflow')
    group = workflow_info['group']
    version = workflow_info['version']
    plural = workflow_info['plural']
    # k8s_cli = K8s()
    k8s_cli = get_k8s_client_by_region(region=region, action='search')

    w = watch.Watch()
    while True:
        try:
            for event in w.stream(
                k8s_cli.custom_objects_api.list_cluster_custom_object,
                group=group,
                version=version,
                plural=plural,
                timeout_seconds=60,
            ):
                request_id = str(uuid.uuid4())
                set_task_id(request_id)
                ns = event['object']['metadata']['namespace']
                job_name = event['object']['metadata']['name']

                log.info(f"[listen_workflow][enter] {event['type']} {ns} {job_name}")
                if ns.startswith(namespace) is False:
                    log.info(f'[listen_workflow] {ns} not startswith {namespace}, continue..')
                    continue

                if (
                    event['type'] == 'ADDED'
                    or event['type'] == 'MODIFIED'
                    or event['type'] == 'DELETED'
                ):
                    crd = None
                    with ignore_404:
                        crd = k8s_cli.custom_objects_api.get_namespaced_custom_object(
                            group, version, ns, plural, job_name
                        )
                    if not crd:
                        logging.warning(f'[listen_workflow] {job_name} not exist in k8s')
                        continue
                    else:
                        logging.warning(f'[listen_workflow] {job_name} exist in k8s')

                    deal_event(crd)
        except Exception:
            log.error('listen workflow error', exc_info=True)
            w = watch.Watch()
            time.sleep(60)


def listen_volcano_jobs(region='default'):
    logging.warning(f'[listen_volcano_jobs] start. region: {region} ')
    vcjob_info = conf.get('CRD_INFO')['vcjob']
    namespace = conf.get('PIPELINE_NAMESPACE')
    srv_namespace = conf.get('SERVICE_NAMESPACE')
    logging.info('begin listen volcano jobs')
    group = vcjob_info['group']
    version = vcjob_info['version']
    plural = vcjob_info['plural']
    # k8s_cli = K8s()
    k8s_cli = get_k8s_client_by_region(region=region, action='search')

    w = watch.Watch()
    while True:
        try:
            for event in w.stream(
                k8s_cli.custom_objects_api.list_cluster_custom_object,
                group=group,
                version=version,
                plural=plural,
                timeout_seconds=60,
            ):
                # volcano任务只用于更新状态和开始训练时间
                request_id = str(uuid.uuid4())
                set_task_id(request_id)
                ns = event['object']['metadata']['namespace']
                job_name = event['object']['metadata']['name']

                log.info(f"[listen_volcano_jobs][enter] {event['type']} {ns} {job_name}")
                if ns.startswith(namespace) is True:
                    # 处理训练的 volcano
                    with ignore_404:
                        # 读取最新的CRD信息
                        obj = k8s_cli.custom_objects_api.get_namespaced_custom_object(
                            group, version, ns, plural, job_name
                        )
                    if not obj:
                        logging.info(f'[listen_volcano_jobs][train] {job_name} not exist in k8s')
                        continue

                    workflow_name = job_name
                    task_name = job_name
                    status = obj['status']['state']['phase']
                    logging.info(
                        f'[listen_volcano_jobs] task_name:{task_name}, workflow_name:{workflow_name},status:{status}'
                    )
                    with session_scope() as session:
                        workflow = session.query(Workflow).filter_by(name=workflow_name).first()
                        if workflow is None:
                            # 如果找不到，尝试用原来的方式获取
                            with ignore_exception:
                                tasks = obj['spec']['tasks']

                            if tasks is None:
                                logging.info('tasks not found')
                                continue
                            t = tasks[0]
                            labels = t['template']['metadata']['labels']
                            task_name = labels['task-name']
                            old_workflow_name = workflow_name
                            workflow_name = labels['workflows.argoproj.io/workflow']
                            logging.info(
                                f'[listen_volcano_jobs] {old_workflow_name} not found, use: {workflow_name} to found'
                            )
                            workflow = (
                                session.query(Workflow).filter_by(name=workflow_name).first()
                            )
                            if workflow is None:
                                logging.info(
                                    f'[listen_volcano_jobs] {old_workflow_name} and {workflow_name} not found !!!'
                                )
                                continue

                        status_more = json.loads(workflow.status_more)
                        nodes = status_more.get('nodes')
                        node_phase = None
                        for k in nodes.keys():
                            if nodes[k]['displayName'] == task_name:
                                node_phase = nodes[k]['phase']
                                break

                        if node_phase in ['Running'] and status in ['Running']:
                            last_transition_time = obj['status']['state']['lastTransitionTime']
                            last_transition_time = last_transition_time.replace('T', ' ').replace(
                                'Z', ''
                            )
                            workflow.foreign_key = (
                                datetime.datetime.strptime(
                                    last_transition_time, '%Y-%m-%d %H:%M:%S'
                                )
                                + datetime.timedelta(hours=8)
                            ).strftime('%Y-%m-%d %H:%M:%S')
                            workflow.status = status
                            workflow.display_status = status
                            session.commit()

                if ns.startswith(srv_namespace) is True:
                    # 处理推理的 volcano（多机多卡）
                    with ignore_404:
                        # 读取最新的CRD信息
                        obj = k8s_cli.custom_objects_api.get_namespaced_custom_object(
                            group, version, ns, plural, job_name
                        )
                    if not obj:
                        logging.info(f'[listen_volcano_jobs][service] {job_name} not exist in k8s')
                        continue

                    address = '.'.join([job_name, ns])
                    logging.info(f'[listen_volcano_jobs] address: {address}')
                    if is_http_accessible(address=address) is True:
                        logging.info(f'[listen_volcano_jobs][success] ping {address}')
                        with session_scope() as session:
                            ins = (
                                session.query(InferenceService)
                                .filter(
                                    InferenceService.name == job_name,
                                    InferenceService.status.in_(
                                        [
                                            ServiceStatus.deploying.value,
                                            ServiceStatus.upgrading.value,
                                            ServiceStatus.running.value,
                                        ]
                                    ),
                                )
                                .first()
                            )
                            if ins is None:
                                continue
                            else:
                                logging.info(
                                    f'[listen_volcano_jobs]  {ins.name}, {ins.status} -> {ServiceStatus.running.value}'
                                )

                            update_service_status(
                                ins.id,
                                ins.service_id,
                                ins.status,
                                ServiceStatus.running.value,
                                '',
                                session,
                            )
                    else:
                        logging.info(f'[listen_volcano_jobs][failed] ping {address}')

        except Exception:
            log.error('listen_volcano_jobs error', exc_info=True)
            w = watch.Watch()
            time.sleep(60)


def main():
    logging.info('[watch] start ...')
    thread_list = []
    regions_key = get_region_keys()
    logging.info(f'[watch] regions_key: {regions_key}')

    for region in regions_key:
        t1 = threading.Thread(
            target=listen_workflow, args=(region,), name=f'{region}_listen_workflow'
        )
        t2 = threading.Thread(
            target=listen_volcano_jobs,
            args=(region,),
            name=f'{region}_listen_volcano_jobs',
        )
        t3 = threading.Thread(
            target=listen_service, args=(region,), name=f'{region}_listen_service'
        )

        t1.daemon = True
        t2.daemon = True
        t3.daemon = True

        t1.start()
        t2.start()
        t3.start()

        # 将线程添加到列表中
        thread_list.append(t1)
        thread_list.append(t2)
        thread_list.append(t3)

    logging.info(f'[watch] len: {len(thread_list)}; thread_list: {thread_list}')

    while True:
        # 遍历所有线程
        for thread in thread_list:
            if not thread.is_alive():
                log.error(f'线程 {thread.name} 异常退出！')

        time.sleep(60)


# 不能使用异步io，因为stream会阻塞
if __name__ == '__main__':
    main()
