"""Utility functions used across Myapp"""

import base64
import copy
import datetime
import json
import logging
import os
import re
import shutil
import subprocess
import time
import traceback
import uuid

import croniter
import docker
import modelarts
from pypinyin import lazy_pinyin
import pysnooper
import requests
from sqlalchemy import func as sql_func, not_, or_
from sqlalchemy.orm import load_only
import yaml

from myapp.apis.notebook import NotebookApi
from myapp.apis.workflow import create_model_if_finished, process_job_err_msg, sync_job_status

# Myapp framework imports
from myapp.app import app
from myapp.biz.service import delete_old_service
from myapp.configs.model_manage import MODEL_EXPORT_TEMPLATE
from myapp.configs.tasks import MODELARTS_TASKS_CFGS
from myapp.const.base import SharedMntDir
from myapp.const.dataset import (
    EnumDatasetPublishStatus,
    EnumDatasetStatus,
    MergePathPrefix,
    OriginPathPrefix,
)
from myapp.const.service import (
    EnumDockerCommitStatus,
    EnumModelExportStatus,
    EnumModelStatus,
    EnumModelType,
    EnumNotebookStatus,
    ServiceStatus,
)
from myapp.models.model_aihub import Aihub
from myapp.models.model_auto_learning import AutoLearning
from myapp.models.model_dataset import Dataset, DatasetV2
from myapp.models.model_docker_commits import DockerCommits
from myapp.models.model_eval_dataset import EvalDataset
from myapp.models.model_job import (
    Images,
    Pipeline,
    Pytorchjob,
    Repository,
    RunHistory,
    Task,
    Tfjob,
    Workflow,
    Xgbjob,
)
from myapp.models.model_model_export import ModelExport
from myapp.models.model_model_version import Model_Version
from myapp.models.model_notebook import Notebook
from myapp.models.model_serving import InferenceService, Service
from myapp.models.model_user import MyUser
from myapp.models.user_attributes import UserAttribute
from myapp.project import push_admin, push_message
from myapp.tasks.celery_app import celery_app
from myapp.third.argo.workflow import get_workflow_by_name, run_workflow_by_yaml
from myapp.third.k8s import py_k8s
from myapp.third.k8s.py_k8s import K8s
from myapp.third.modelarts import ma_client
from myapp.third.modelarts.model import get_model_info
from myapp.third.modelarts.tools import parse_apig_exception
from myapp.third.redis.client import RedisClient
from myapp.utils import env
from myapp.utils.annotation import download_annotation_file, get_annotation_info
from myapp.utils.calc_unit import hum_convert
from myapp.utils.env import is_modelarts, is_private
from myapp.utils.exception import ignore_exception, log_exception
from myapp.utils.region import get_region_keys
from myapp.utils.region_storage import StorageMgrFactory
from myapp.utils.resource import is_multi_node
from myapp.utils.sess import session_scope
from myapp.utils.storage import (
    DEFAULT_STORAGE_SIZE,
    delete_local_dir,
    get_path_size,
    get_private_path,
    juicefs_quota_set,
    storage_mgr,
)
from myapp.utils.strings import id_generator
from myapp.views.view_pipeline import dag_to_pipeline, run_pipeline
from myapp.views.view_serving import Service_ModelView_Api


class Pusherror(Exception):
    pass


conf = app.config
logging.getLogger('task.delete_workflow').setLevel(logging.INFO)

model_map = {'tfjobs': Tfjob, 'workflows': Workflow, 'pytorchjobs': Pytorchjob, 'xgbjobs': Xgbjob}

logger = logging.getLogger(__name__)


# @pysnooper.snoop()
def delete_old_crd(object_info, region='default'):
    timeout = int(object_info.get('timeout', 60 * 60 * 24 * 3))
    clusters = conf.get('CLUSTERS', {})
    for cluster_name in clusters:
        cluster = clusters[cluster_name]
        k8s_client = K8s(cluster.get('KUBECONFIG', ''))
        # k8s_client = get_k8s_client_by_region(region=region)

        crd_objects = []
        try:
            crd_objects = k8s_client.get_crd_all_namespaces(
                group=object_info['group'],
                version=object_info['version'],
                plural=object_info['plural'],
                pool=False,
            )
        except Exception as e:
            print(e)
        # print('crd_objects',crd_objects)

        with session_scope() as dbsession:
            for crd_object in crd_objects:
                print(crd_object['status'], crd_object['create_time'], crd_object['finish_time'])

                # # 如果当前还在运行，上层workflow已停止，直接删除
                # if crd_object['status']=='Running':
                run_id = json.loads(crd_object['labels']).get('run-id', '').strip()
                if run_id:
                    try:
                        # 如果workflow被删除了，则下面的也一并被删除
                        workflows = (
                            dbsession.query(Workflow)
                            .filter(Workflow.labels.contains(run_id))
                            .all()
                        )
                        print(workflows)
                        for workflow in workflows:
                            if workflow.status == 'Deleted':
                                crd_names = k8s_client.delete_crd(
                                    group=object_info['group'],
                                    version=object_info['version'],
                                    plural=object_info['plural'],
                                    namespace=crd_object['namespace'],
                                    name=crd_object['name'],
                                )
                                time.sleep(10)
                                if object_info['plural'] in model_map:
                                    db_crds = (
                                        dbsession.query(model_map[object_info['plural']])
                                        .filter(
                                            model_map[object_info['plural']].name.in_(crd_names)
                                        )
                                        .all()
                                    )
                                    for db_crd in db_crds:
                                        db_crd.status = 'Deleted'
                                    dbsession.commit()
                    except Exception as e:
                        print(e)

                try:
                    # 如果在运行，时间比较长，就推送通知
                    if crd_object['status'] == 'Running':
                        if crd_object['create_time'] < (
                            datetime.datetime.now() - datetime.timedelta(seconds=timeout)
                        ).strftime('%Y-%m-%d %H:%M:%S'):
                            if object_info['plural'] == 'workflows':
                                username = ''

                                label = json.loads(crd_object['labels'])
                                pipeline_id = label.get('pipeline-id', '')
                                if 'run-rtx' in label:
                                    username = label['run-rtx']
                                elif 'upload-rtx' in label:
                                    username = label['upload-rtx']
                                if username:
                                    push_message(
                                        [username] + conf.get('ADMIN_USER', '').split(','),
                                        '%s %s %s %s 创建时间 %s， 已经运行时间过久，注意修正'
                                        % (
                                            username,
                                            object_info['plural'],
                                            crd_object['name'],
                                            pipeline_id,
                                            crd_object['create_time'],
                                        ),
                                    )
                    else:
                        # 如果运行结束已经1天，就直接删除
                        if crd_object['finish_time'] and crd_object['finish_time'] < (
                            datetime.datetime.now() - datetime.timedelta(hours=3)
                        ).strftime('%Y-%m-%d %H:%M:%S'):
                            print(
                                'delete %s.%s namespace=%s, name=%s success'
                                % (
                                    object_info['group'],
                                    object_info['plural'],
                                    crd_object['namespace'],
                                    crd_object['name'],
                                )
                            )
                            crd_names = k8s_client.delete_crd(
                                group=object_info['group'],
                                version=object_info['version'],
                                plural=object_info['plural'],
                                namespace=crd_object['namespace'],
                                name=crd_object['name'],
                            )
                            if object_info['plural'] in model_map:
                                db_crds = (
                                    dbsession.query(model_map[object_info['plural']])
                                    .filter(model_map[object_info['plural']].name.in_(crd_names))
                                    .all()
                                )
                                for db_crd in db_crds:
                                    db_crd.status = 'Deleted'
                                dbsession.commit()
                except Exception as e:
                    print(e)


# 删除过期任务
@celery_app.task(name='task.delete_workflow', bind=True)
def delete_workflow(task):
    print('begin delete task')

    workflow_info = conf.get('CRD_INFO', {}).get('workflow', {})
    print(workflow_info)
    if workflow_info:
        try:
            delete_old_crd(workflow_info)
        except Exception as e:
            print(e)

    time.sleep(1)

    vcjob_info = conf.get('CRD_INFO', {}).get('vcjob', {})
    print(vcjob_info)
    if vcjob_info:
        try:
            delete_old_crd(vcjob_info)
        except Exception as e:
            print(e)

    time.sleep(1)

    # 删除deployment
    clusters = conf.get('CLUSTERS', {})
    for cluster_name in clusters:
        cluster = clusters[cluster_name]
        k8s_client = K8s(cluster.get('KUBECONFIG', ''))

        deployments = k8s_client.AppsV1Api.list_namespaced_deployment(namespace='pipeline').items
        for deploy in deployments:
            run_id = deploy.metadata.labels.get('run-id', '').strip()
            if run_id:
                with session_scope() as dbsession:
                    try:
                        workflows = (
                            dbsession.query(Workflow)
                            .filter(Workflow.labels.contains(run_id))
                            .all()
                        )
                        for workflow in workflows:
                            if (
                                workflow.status == 'Succeeded'
                                or workflow.status == 'Deleted'
                                or workflow.status == 'Failed'
                            ):
                                k8s_client.delete_deployment(
                                    namespace='pipeline', name=deploy.name
                                )
                    except Exception as e:
                        print(e)

    time.sleep(1)

    # 删除daemon
    clusters = conf.get('CLUSTERS', {})
    for cluster_name in clusters:
        cluster = clusters[cluster_name]
        try:
            k8s_client = K8s(cluster.get('KUBECONFIG', ''))

            daemon_sets = k8s_client.AppsV1Api.list_namespaced_daemon_set(
                namespace='pipeline'
            ).items
            for daemon_set in daemon_sets:
                # print(deploy)
                run_id = daemon_set.metadata.labels.get('run-id', '').strip()
                if run_id:
                    with session_scope() as dbsession:
                        try:
                            workflows = (
                                dbsession.query(Workflow)
                                .filter(Workflow.labels.contains(run_id))
                                .all()
                            )
                            for workflow in workflows:
                                if (
                                    workflow.status == 'Succeeded'
                                    or workflow.status == 'Deleted'
                                    or workflow.status == 'Failed'
                                ):
                                    k8s_client.AppsV1Api.delete_namespaced_daemon_set(
                                        namespace='pipeline', name=daemon_set.name
                                    )
                        except Exception as e:
                            print(e)

        except Exception as e:
            print(e)

    time.sleep(1)

    # 删除sts
    clusters = conf.get('CLUSTERS', {})
    for cluster_name in clusters:
        cluster = clusters[cluster_name]
        try:
            k8s_client = K8s(cluster.get('KUBECONFIG', ''))

            stss = k8s_client.AppsV1Api.list_namespaced_stateful_set(namespace='pipeline').items
            for sts in stss:
                run_id = sts.metadata.labels.get('run-id', '').strip()
                if run_id:
                    with session_scope() as dbsession:
                        try:
                            workflows = (
                                dbsession.query(Workflow)
                                .filter(Workflow.labels.contains(run_id))
                                .all()
                            )
                            for workflow in workflows:
                                if (
                                    workflow.status == 'Succeeded'
                                    or workflow.status == 'Deleted'
                                    or workflow.status == 'Failed'
                                ):
                                    k8s_client.AppsV1Api.delete_namespaced_stateful_set(
                                        namespace='pipeline', name=sts.name
                                    )
                        except Exception as e:
                            print(e)
        except Exception as e:
            print(e)


# 删除无用的rank table的config map
@celery_app.task(name='task.delete_ring_cfgmap', bind=True)
def delete_ring_cfgmap(task):
    clusters = conf.get('CLUSTERS', {})
    crd_info = conf.get('CRD_INFO', {}).get('vcjob', {})
    logging.info('开始清理rings config map')
    for cluster_name in clusters:
        cluster = clusters[cluster_name]
        k8s_client = K8s(cluster.get('KUBECONFIG', ''))
        all_configmaps = k8s_client.get_configmaps(
            namespace='pipeline', labels={'ring-controller.atlas': 'ascend-910'}
        )
        for one in all_configmaps:
            try:
                configmap_name = one.metadata.name
                vcjob_name = configmap_name.replace('rings-config-', '')
                vcjob_crd = k8s_client.get_one_crd(
                    group=crd_info['group'],
                    version=crd_info['version'],
                    plural=crd_info['plural'],
                    namespace='pipeline',
                    name=vcjob_name,
                )
                should_delete = False
                if not vcjob_crd:
                    diff_day = (
                        datetime.datetime.now()
                        - one.metadata.creation_timestamp.replace(tzinfo=None)
                        - datetime.timedelta(hours=8)
                    )
                    if diff_day.days > 7:
                        should_delete = True
                elif vcjob_crd['status'].lower() in [
                    'completed',
                    'error',
                    'terminated',
                    'failed',
                    'aborted',
                ]:
                    last_transition_time = json.loads(vcjob_crd['status_more'])['state'][
                        'lastTransitionTime'
                    ]
                    diff_day = (
                        datetime.datetime.now()
                        - datetime.datetime.strptime(last_transition_time, '%Y-%m-%dT%H:%M:%SZ')
                        - datetime.timedelta(hours=8)
                    )
                    if diff_day.day > 7:
                        should_delete = True
                if should_delete:
                    logging.info(f'删除rings config map: {configmap_name}')
                    k8s_client.delete_configmap(namespace='pipeline', name=configmap_name)
            except Exception as e:
                logging.error(e)


@celery_app.task(name='task.delete_notebook', bind=True)
def delete_notebook(task):
    # 删除jupyter
    print('begin delete notebook')
    object_info = conf.get('CRD_INFO', {}).get('notebook', {})
    print(object_info)
    timeout = int(object_info.get('timeout', 60 * 60 * 24 * 3))
    namespace = conf.get('NOTEBOOK_NAMESPACE')
    with session_scope() as dbsession:
        # 删除vscode的pod
        try:
            alert_time = (
                datetime.datetime.now()
                - datetime.timedelta(seconds=timeout)
                + datetime.timedelta(days=1)
            )

            # 获取过期的gpu notebook  删除
            notebooks = (
                dbsession.query(Notebook)
                .filter(Notebook.changed_on < alert_time)
                .filter(Notebook.resource_gpu != '0')
                .all()
            )
            for notebook in notebooks:
                if notebook.changed_on < (
                    datetime.datetime.now() - datetime.timedelta(seconds=timeout)
                ):
                    k8s_client = K8s(notebook.project.cluster.get('KUBECONFIG', ''))
                    vscode_pods = k8s_client.get_pods(namespace=namespace, pod_name=notebook.name)
                    if vscode_pods:
                        vscode_pod = vscode_pods[0]
                        # print(vscode_pod)
                        k8s_client.delete_pods(namespace=namespace, pod_name=vscode_pod['name'])
                        user = vscode_pod['labels'].get('user', '')
                        if user:
                            pass
                            push_message(
                                [user],
                                '您的notebook %s已清理释放资源，如果需要可reset后重新使用。'
                                % vscode_pod['name'],
                            )
                else:
                    message = (
                        '您的notebook %s即将过期，如要继续使用，请尽快续期，每次有效期3天\n'
                        % notebook.name
                    )
                    push_message([notebook.created_by.username], message)

        except Exception as e:
            print(e)


@celery_app.task(name='task.delete_debug_docker', bind=True)
def delete_debug_docker(task):
    clusters = conf.get('CLUSTERS', {})
    # 删除完成的任务
    for cluster_name in clusters:
        cluster = clusters[cluster_name]
        notebook_namespace = conf.get('NOTEBOOK_NAMESPACE')
        pipeline_namespace = conf.get('PIPELINE_NAMESPACE')
        k8s_client = K8s(cluster.get('KUBECONFIG', ''))
        k8s_client.delete_pods(namespace=notebook_namespace, status='Succeeded')
        pipeline_pods = k8s_client.get_pods(pipeline_namespace)
        for pod in pipeline_pods:
            if pod['name'][0:6] == 'debug-' or pod['name'][0:4] == 'run-':
                run_id = pod['labels'].get('run-id', '')
                if run_id:
                    k8s_client.delete_workflow(
                        all_crd_info=conf.get('CRD_INFO', {}),
                        namespace=pipeline_namespace,
                        run_id=run_id,
                    )
                    k8s_client.delete_pods(namespace=pipeline_namespace, labels={'run-id': run_id})

    # 删除debug和test的服务
    for cluster_name in clusters:
        cluster = clusters[cluster_name]
        namespace = conf.get('SERVICE_NAMESPACE')
        k8s_client = K8s(cluster.get('KUBECONFIG', ''))
        with session_scope() as dbsession:
            try:
                inferenceservices = dbsession.query(InferenceService).all()
                for inferenceservic in inferenceservices:
                    try:
                        name = 'debug-' + inferenceservic.name
                        k8s_client.delete_deployment(namespace=namespace, name=name)
                        k8s_client.delete_configmap(namespace=namespace, name=name)
                        k8s_client.delete_service(namespace=namespace, name=name)
                        k8s_client.delete_istio_ingress(namespace=namespace, name=name)
                        if inferenceservic.model_status == 'debug':
                            inferenceservic.model_status = 'offline'
                            dbsession.commit()

                        name = 'test-' + inferenceservic.name
                        k8s_client.delete_deployment(namespace=namespace, name=name)
                        k8s_client.delete_configmap(namespace=namespace, name=name)
                        k8s_client.delete_service(namespace=namespace, name=name)
                        k8s_client.delete_istio_ingress(namespace=namespace, name=name)
                        if inferenceservic.model_status == 'test':
                            inferenceservic.model_status = 'offline'
                            dbsession.commit()

                    except Exception as e1:
                        print(e1)

            except Exception as e:
                print(e)

    push_message(conf.get('ADMIN_USER', '').split(','), 'debug pod 清理完毕')

    # 删除 notebook 容器
    print('begin delete idex')
    namespace = conf.get('NOTEBOOK_NAMESPACE')
    for cluster_name in clusters:
        cluster = clusters[cluster_name]
        k8s_client = K8s(cluster.get('KUBECONFIG', ''))
        pods = k8s_client.get_pods(namespace=namespace, labels={'pod-type': 'jupyter'})
        for pod in pods:
            try:
                k8s_client.v1.delete_namespaced_pod(pod['name'], namespace, grace_period_seconds=0)
            except Exception as e:
                print(e)
            try:
                k8s_client.v1.delete_namespaced_service(
                    pod['name'], namespace, grace_period_seconds=0
                )
            except Exception as e:
                print(e)
            try:
                object_info = conf.get('CRD_INFO', {}).get('virtualservice', {})
                k8s_client.delete_crd(
                    group=object_info['group'],
                    version=object_info['version'],
                    plural=object_info['plural'],
                    namespace=namespace,
                    name=pod['name'],
                )

            except Exception as e:
                print(e)

    push_message(conf.get('ADMIN_USER', '').split(','), 'idex jupter pod 清理完毕')

    # 删除调试镜像的pod 和commit pod
    namespace = conf.get('NOTEBOOK_NAMESPACE')
    for cluster_name in clusters:
        cluster = clusters[cluster_name]
        k8s_client = K8s(cluster.get('KUBECONFIG', ''))
        k8s_client.delete_pods(namespace=namespace, labels={'pod-type': 'docker'})

    push_message(conf.get('ADMIN_USER', '').split(','), 'docker 调试构建 pod 清理完毕')


# 推送微信消息
# @pysnooper.snoop()
def deliver_message(pipeline, message=''):
    receivers = pipeline.created_by.username.split(',')
    receivers = [receiver.strip() for receiver in receivers if receiver.strip()]
    alert_users = pipeline.alert_user.split(',') if pipeline.alert_user else []
    alert_users = [alert_user.strip() for alert_user in alert_users if alert_user.strip()]
    receivers += alert_users
    # 失败的时候将详细推送给管理员
    receivers = list(set(receivers))
    if not receivers:
        print('no receivers')
        return

    if not message:
        message = 'pipeline: %s(%s) \nnamespace: %s\ncrontab: %s\ntime: %s\nstart run' % (
            pipeline.name,
            pipeline.describe,
            pipeline.namespace,
            pipeline.cron_time,
            datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
        )
    else:
        message = (
            'pipeline: %s(%s) \nnamespace: %s\ncrontab: %s\ntime: %s\nfail start run:\n%s'
            % (
                pipeline.name,
                pipeline.describe,
                pipeline.namespace,
                pipeline.cron_time,
                datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                message,
            )
        )

    push_message(receivers, message)
    # push_message(conf.get('ADMIN_USER').split(','),message)


# @pysnooper.snoop()
def save_history(dbsession, pipeline, message=''):
    schedule_history = RunHistory(
        created_on=datetime.datetime.now(),
        pipeline_id=pipeline.id,
        pipeline_argo_id=pipeline.pipeline_id,
        pipeline_file=pipeline.pipeline_file,
        version_id=pipeline.version_id,
        run_id=pipeline.run_id,
        message=message,
    )
    dbsession.add(schedule_history)
    dbsession.commit()


# 获取预计发送时间。控制发送频率不要太频繁
# @pysnooper.snoop()
def next_schedules(cron_time, start_at, stop_at, resolution=0):
    crons = croniter.croniter(cron_time, start_at - datetime.timedelta(seconds=1))
    previous = start_at - datetime.timedelta(days=1)

    for eta in crons.all_next(datetime.datetime):
        # Do not cross the time boundary
        if eta >= stop_at:
            break

        if eta < start_at:
            continue

        # 去除频率过高的点
        if eta - previous < datetime.timedelta(seconds=resolution):
            continue

        yield eta
        previous = eta


# 用户每次配置定时，都会记录定时配置时间。作为start_time参考
# start_time表示最近一次的定时调度配置生效，所有处理和检测的历史，都是在start_time 之后
# 平台 worker 产生任务的进程 损坏情况恢复后  任务的时间变量模板
# 只关注created之前的任务
# 用户可以多次修改定时调度与否或者定时调度周期
# 同一个pipeline手动定时两不冲突


# 产生定时任务各个时间点的任务配置
@celery_app.task(name='task.make_timerun_config', bind=True)
def make_timerun_config(task):
    print('============= begin make timerun config')
    # 先产生所有要产生的任务。可能也会产生直接的任务。
    with session_scope() as dbsession:
        try:
            resolution = (
                conf.get('PIPELINE_TASK_CRON_RESOLUTION', 0) * 60
            )  # 设置最小发送时间间隔，15分钟

            pipelines = (
                dbsession.query(Pipeline).filter(Pipeline.schedule_type == 'crontab').all()
            )  # 获取model记录
            for pipeline in pipelines:  # 循环发起每一个调度
                if pipeline.cronjob_start_time:
                    start_at = datetime.datetime.strptime(
                        pipeline.cronjob_start_time, '%Y-%m-%d %H:%M:%S'
                    )
                else:
                    start_at = datetime.datetime.now()

                # 缩小指定范围，认为最后一个任务记录之前是调度记录都是已经产生的
                last_run = (
                    dbsession.query(RunHistory)
                    .filter(RunHistory.pipeline_id == pipeline.id)
                    .order_by(RunHistory.id.desc())
                    .first()
                )
                if last_run:
                    last_execution_date = datetime.datetime.strptime(
                        last_run.execution_date, '%Y-%m-%d %H:%M:%S'
                    )
                    if last_execution_date > start_at:
                        start_at = last_execution_date

                stop_at = datetime.datetime.now() + datetime.timedelta(
                    seconds=300
                )  # 下一个调度时间点，强制5分钟调度一次。这之前的 任务，该调度的都发起或者延迟发起

                # print('begin make timerun config %s'%pipeline.name)
                # 计算start_at和stop_at之间，每一个任务的调度时间，
                # 并保障最小周期不超过设定的resolution。
                try:
                    for eta in next_schedules(
                        pipeline.cron_time, start_at, stop_at, resolution=resolution
                    ):  #
                        # print('执行时间点', eta)
                        execution_date = eta.strftime('%Y-%m-%d %H:%M:%S')
                        if execution_date > pipeline.cronjob_start_time:
                            # 要检查是否重复添加记录了
                            exist_timeruns = (
                                dbsession.query(RunHistory)
                                .filter(RunHistory.pipeline_id == pipeline.id)
                                .filter(RunHistory.execution_date == execution_date)
                                .all()
                            )
                            if not exist_timeruns:
                                pipeline_file = dag_to_pipeline(
                                    pipeline=pipeline,
                                    dbsession=dbsession,
                                    execution_date=execution_date,
                                )  # 合成workflow
                                # print('make pipeline file %s' % pipeline_file)
                                if pipeline_file:
                                    schedule_history = RunHistory(
                                        created_on=datetime.datetime.now(),
                                        pipeline_id=pipeline.id,
                                        pipeline_argo_id='',
                                        pipeline_file=pipeline_file,
                                        version_id='',
                                        run_id='',
                                        message='',
                                        status='comed',
                                        execution_date=execution_date,
                                    )
                                    dbsession.add(schedule_history)
                                    dbsession.commit()
                                else:
                                    push_message(
                                        conf.get('ADMIN_USER').split(','),
                                        'pipeline %s make config fail' % pipeline.name,
                                    )
                            if len(exist_timeruns) > 1:
                                for i in range(1, len(exist_timeruns)):
                                    exist_timerun = exist_timeruns[i]
                                    dbsession.delete(exist_timerun)
                                    dbsession.commit()
                                push_message(
                                    conf.get('ADMIN_USER').split(','),
                                    '发现%s 任务流在 %s 时刻存在多个定时记录'
                                    % (pipeline.name, execution_date),
                                )

                    # 无论产生任务怎么样，上传都是要执行的，可能会上传之前没有上传的任务
                    # 直接触发一次，在5分钟以内的都延迟提交。
                    # upload_timerun(pipeline,stop_at)
                except Exception as e:
                    print(e)

                upload_timerun(
                    pipeline_id=pipeline.id, stop_time=stop_at.strftime('%Y-%m-%d %H:%M:%S')
                )

        except Exception as e:
            print(e)


# 计算那些任务可以准备上传了
# @pysnooper.snoop()
def upload_timerun(pipeline_id, stop_time):
    # print('============= begin upload timerun')

    with session_scope() as dbsession:
        try:
            pipeline = dbsession.query(Pipeline).filter(Pipeline.id == int(pipeline_id)).first()
            start_time = pipeline.cronjob_start_time
            # 获取当前pipeline  还没有处理的任务，其他的不关系
            timeruns = []
            if start_time:
                timeruns = (
                    dbsession.query(RunHistory)
                    .filter(RunHistory.pipeline_id == pipeline.id)
                    .filter(RunHistory.execution_date > start_time)
                    .filter(RunHistory.execution_date <= stop_time)
                    .filter(RunHistory.status == 'comed')
                    .order_by(RunHistory.execution_date.desc())
                    .all()
                )

            if timeruns:
                # 如果依赖过去运行历史的运行状态，只检测最早的一个timerun是否可以运行
                if pipeline.depends_on_past:
                    timerun = timeruns[-1]  # 最早的一个应该调度的

                    kwargs = {'timerun_id': timerun.id, 'pipeline_id': pipeline_id}
                    # 获取前一个定时调度的timerun
                    pass_run = (
                        dbsession.query(RunHistory)
                        .filter(RunHistory.pipeline_id == pipeline.id)
                        .filter(RunHistory.execution_date > start_time)
                        .filter(RunHistory.execution_date < timerun.execution_date)
                        .order_by(RunHistory.execution_date.desc())
                        .first()
                    )
                    if not pass_run:
                        upload_workflow.apply_async(kwargs=kwargs, expires=120, retry=False)
                    elif pass_run.status == 'created':
                        # 这里要注意处理一下 watch组件坏了，或者argo controller组件坏了的情况。
                        # 以及误操作在workflow界面把记录删除了的情况
                        workflow = (
                            dbsession.query(Workflow)
                            .filter(Workflow.labels.contains(pass_run.run_id))
                            .first()
                        )
                        if workflow:
                            if workflow.status == 'Deleted' or workflow.status == 'Succeeded':
                                print('pass workflow success finish')
                                upload_workflow.apply_async(
                                    kwargs=kwargs, expires=120, retry=False
                                )

                        else:
                            # 直接查询实际是否有个，记录是啥，
                            crds = pipeline.get_workflow()
                            for crd in crds:
                                if pass_run.run_id in crd['labels']:
                                    # 这里可以手动把记录加进去
                                    workflow = Workflow(
                                        name=crd['name'],
                                        namespace=crd['namespace'],
                                        create_time=crd['create_time'],
                                        status=crd['status'],
                                        annotations=crd['annotations'],
                                        labels=crd['labels'],
                                        spec=crd['spec'],
                                        status_more=crd['status_more'],
                                        username=pipeline.created_by.username,
                                    )
                                    dbsession.add(workflow)
                                    dbsession.commit()

                                    label = json.loads(crd['labels'])
                                    if (
                                        crd['status'] == 'Succeeded'
                                        and label.get('pipeline/runid', '') == pass_run.run_id
                                    ):
                                        print('pass workflow success finish')
                                        upload_workflow.apply_async(
                                            kwargs=kwargs, expires=120, retry=False
                                        )
                # 按时间倒序，只保留最新的n个实例，之前的要删掉
                elif pipeline.expired_limit:
                    # 获取最新的n个
                    timeruns = (
                        dbsession.query(RunHistory)
                        .filter(RunHistory.pipeline_id == pipeline.id)
                        .filter(RunHistory.execution_date > start_time)
                        .filter(RunHistory.execution_date <= stop_time)
                        .order_by(RunHistory.execution_date.desc())
                        .limit(pipeline.expired_limit)
                    )

                    latest_run_ids = [timerun.run_id for timerun in timeruns]  # 可以运行的timerun

                    # 如果有旧的在运行，就先删掉
                    exist_workflows = pipeline.get_workflow()
                    for exist_workflow in exist_workflows:
                        argo_run_id = json.loads(exist_workflow['labels']).get(
                            'pipeline/runid', ''
                        )
                        run_id = json.loads(exist_workflow['labels']).get('run-id', '')
                        if argo_run_id and run_id:
                            pass_run = (
                                dbsession.query(RunHistory)
                                .filter(RunHistory.pipeline_id == pipeline.id)
                                .filter(RunHistory.execution_date > start_time)
                                .filter(RunHistory.run_id == argo_run_id)
                                .first()
                            )
                            # 如果是定时任务发起的实例，并且已经过期，就直接删除
                            if pass_run and argo_run_id not in latest_run_ids:
                                k8s_client = K8s(pipeline.project.cluster.get('KUBECONFIG', ''))
                                k8s_client.delete_workflow(
                                    all_crd_info=conf.get('CRD_INFO', {}),
                                    namespace='pipeline',
                                    run_id=run_id,
                                )
                                workflow = (
                                    dbsession.query(Workflow)
                                    .filter(Workflow.labels.contains(run_id))
                                    .first()
                                )
                                workflow.status = 'Deleted'
                                dbsession.commit()

                    # 如果有新的还没运行的，就运行
                    for timerun in timeruns:
                        if timerun.status == 'comed':
                            kwargs = {'timerun_id': timerun.id, 'pipeline_id': pipeline_id}
                            upload_workflow.apply_async(kwargs=kwargs, expires=120, retry=False)

                # 按时间顺序并发运行
                else:
                    # 检测正在运行的workflow与激活并发限制是否符合
                    running_workflows = pipeline.get_workflow()
                    running_workflows = [
                        running_workflow
                        for running_workflow in running_workflows
                        if running_workflow['status'] == 'Running'
                        or running_workflow['status'] == 'Created'
                        or running_workflow['status'] == 'Pending'
                    ]
                    if len(running_workflows) < pipeline.max_active_runs:
                        more_run_num = pipeline.max_active_runs - len(running_workflows)
                        for i in range(more_run_num):
                            if len(timeruns) > i:
                                timerun = timeruns[-i - 1]

                                kwargs = {'timerun_id': timerun.id, 'pipeline_id': pipeline_id}

                                upload_workflow.apply_async(
                                    kwargs=kwargs, expires=120, retry=False
                                )

        except Exception as e:
            print(e)


# 真正去做上传动作。
@celery_app.task(name='task.upload_workflow', bind=True)
def upload_workflow(task, timerun_id, pipeline_id):
    with session_scope() as dbsession:
        try:
            pipeline = dbsession.query(Pipeline).filter(Pipeline.id == int(pipeline_id)).first()
            timerun = dbsession.query(RunHistory).filter(RunHistory.id == int(timerun_id)).first()
            # 如果想回填，可以把这个手动配置为comed
            if timerun.status == 'created':
                print('timerun %s has upload' % timerun_id)
                push_message(
                    conf.get('ADMIN_USER').split(','),
                    '阻止重复提交 timerun %s, pipeline %s, exec time %s'
                    % (timerun.id, pipeline.name, timerun.execution_date),
                )
                return

            print('begin upload workflow %s %s' % (pipeline.name, datetime.datetime.now()))
            # print('read pipeline file %s' % timerun.pipeline_file)
            # return
            print('begin upload and run pipeline %s' % pipeline.name)

            pipeline_argo_id, version_id, run_id = run_pipeline(
                pipeline_file=timerun.pipeline_file,
                pipeline_name=pipeline.name,
                kfp_host=pipeline.project.cluster.get('KFP_HOST'),
                pipeline_argo_id=timerun.pipeline_argo_id,
                pipeline_argo_version_id=timerun.version_id,
            )
            print(
                'success upload and run pipeline %s,pipeline_argo_id %s, version_id %s,run_id %s '
                % (pipeline.name, pipeline_argo_id, version_id, run_id)
            )
            if pipeline_argo_id and version_id and run_id:
                timerun.pipeline_argo_id = pipeline_argo_id
                timerun.version_id = version_id
                timerun.run_id = run_id  # 这个是kfp产生的
                timerun.status = 'created'

                dbsession.commit()  # 更新
                deliver_message(pipeline)  # 没有操作事务
            else:
                push_message(
                    conf.get('ADMIN_USER').split(','),
                    'crontab pipeline %s exec time %s upload fail'
                    % (pipeline.name, timerun.execution_date),
                )

        except Exception as e:
            print('kubeflow cronjob run pipeline error:', e)
            try:
                deliver_message(pipeline, 'kubeflow cronjob run pipeline error:' + str(e))
            except Exception as e2:
                print(e2)


def delDir(dir, iteration=False):
    datatime01 = datetime.datetime.strftime(
        datetime.datetime.now() - datetime.timedelta(days=10), '%Y-%m-%d %H:%M:%S'
    )
    # 获取文件夹下所有文件和文件夹
    files = os.listdir(dir)
    for file in files:
        # filepath = os.path.join(dir , file)#路径拼接
        filePath = dir + '/' + file
        # 判断是否是文件
        if os.path.isfile(filePath):
            # 最后一次修改的时间
            last1 = os.stat(filePath).st_mtime  # 获取文件的时间戳
            filetime = time.strftime(
                '%Y-%m-%d %H:%M:%S', time.localtime(last1)
            )  # 将时间戳格式化成时间格式的字符串
            # 删除30天前的文件
            if datatime01 > filetime:
                # datatime01是当前时间7天前的时间，filetime是文件修改的时间，
                # 如果文件时间小于(早于)datatime01时间，就删除
                print(filePath + ' was removed!', filetime)
                os.remove(filePath)

        elif os.path.isdir(filePath):
            if iteration:
                # 如果是文件夹，继续遍历删除
                delDir(filePath, iteration)
                # 如果是空文件夹，删除空文件夹
                if not os.listdir(filePath):
                    os.rmdir(filePath)
                    print(filePath + ' was removed!')


# 删除过期垃圾数据
@celery_app.task(name='task.delete_old_data', bind=True)
def delete_old_data(task):
    # 获取路径
    paths = conf.get('DELETE_OLD_DATA', [])
    for path in paths:
        print('delete dir', path)
        if os.path.exists(path):
            delDir(path, iteration=True)
            print('delete dir finish', path)
            time.sleep(10)


# 获取37度标注平台数据集信息
@celery_app.task(name='task.get_out_annotation_info', bind=True)
def get_out_annotation_info(task):
    task_id = uuid.uuid4()
    current_time = datetime.datetime.now()
    # 从环境变量获取拉取的时间间隔
    ten_minutes_ago = current_time - datetime.timedelta(
        minutes=int(os.getenv('GET_OUT_ANNOTATION_INTERVAL', 30))
    )
    ten_minutes_ago_str = ten_minutes_ago.strftime('%Y-%m-%d %H:%M:%S')
    current_time_str = current_time.strftime('%Y-%m-%d %H:%M:%S')
    logging.info(f'{task_id} get_out_annotation_info begin work {ten_minutes_ago_str}')
    with session_scope() as dbsession:
        try:
            user_datasets = get_annotation_info(
                task_id,
                [],
                ten_minutes_ago_str,
                current_time_str,
            )
            if user_datasets is None or len(user_datasets) == 0:
                logging.info(f'{task_id} get_out_annotation_info 无有效用户')
                return
            logging.info(f'{task_id} get_out_annotation_info 有效用户数量:{len(user_datasets)}')
            for user_dataset in user_datasets:
                dataset = user_dataset.get('dataset')
                user_id = user_dataset.get('user_id')
                user = dbsession.query(MyUser.username).filter(MyUser.id == int(user_id)).first()
                if user is None:
                    continue
                logging.info(f'{task_id} get_out_annotation_info 开始处理{user_id}用户数据')
                for ds in dataset:
                    annotation_id = ds.get('annotation_id')
                    annotation_task_id = ds.get('annotation_task_id')
                    datasets = (
                        dbsession.query(
                            Dataset.dataset_id,
                            Dataset.annotation_id,
                            Dataset.annotation_task_id,
                        )
                        .filter(
                            Dataset.annotation_id == annotation_id,
                            Dataset.annotation_task_id == annotation_task_id,
                        )
                        .first()
                    )
                    if datasets is not None:
                        logging.info(
                            f'{task_id} get_out_annotation_info 处理{user_id}用户数据 annotation_task_id:{annotation_task_id} 数据已存在'
                        )
                        continue
                    logging.info(
                        f'{task_id} get_out_annotation_info 处理{user_id}用户数据 an_id:{annotation_id},an_task_id:{annotation_task_id}'
                    )
                    # 无数据 则将数据分别插入dataset_v2 跟 dataset 表
                    dataset = DatasetV2()
                    dataset.name = ds.get('name') + '_' + ds.get('file_name')
                    dataset.data_type = 'txt'
                    dataset.label_type = (
                        'Text Generation Pretrain'
                        if ds.get('template_id') == 308
                        else 'Multiple Rounds of Text Question Answer'
                    )
                    dataset.source = 'base_annotation'
                    dataset.region = 'default'
                    dataset.created_on = ds.get('annotation_finish_time')
                    dataset.changed_on = ds.get('annotation_finish_time')
                    dataset.created_by_fk = user_dataset.get('user_id')
                    dataset.changed_by_fk = user_dataset.get('user_id')
                    dbsession.add(dataset)
                    dbsession.flush()
                    dbsession.commit()

                    dataset_version = Dataset()
                    dataset_version.dataset_id = dataset.id
                    dataset_version.region = 'default'
                    dataset_version.origin_path = ds.get('name')
                    dataset_version.data_type = 'txt'
                    dataset_version.label_type = (
                        'Text Generation Pretrain'
                        if ds.get('template_id') == 308
                        else 'Multiple Rounds of Text Question Answer'
                    )
                    dataset_version.source = 'base_annotation'
                    dataset_version.name = 'v1'
                    dataset_version.describe = ''
                    dataset_version.entries_num = ds.get('mark_total')
                    dataset_version.status = EnumDatasetStatus.processing.value
                    dataset_version.pub_status = EnumDatasetPublishStatus.unpublished.value
                    dataset_version.progress = 0
                    dataset_version.annotation_id = annotation_id
                    dataset_version.annotation_task_id = ds.get('annotation_task_id')
                    dataset_version.labeled = 1
                    dbsession.add(dataset_version)
                    dbsession.flush()
                    dataset_version.dataset_path = (
                        user.username + '/dataset/' + str(dataset_version.id) + '/'
                    )
                    dbsession.commit()
        except Exception as e:
            logging.error(
                f'{task_id} get_out_annotation_info cronjob run error:{repr(e)}', exc_info=True
            )
    end_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    logging.info(f'{task_id} get_out_annotation_info end work {end_time}')


# 下载37度标注平台数据集文件
@celery_app.task(name='task.download_out_annotation_file', bind=True)
def download_out_annotation_file(task):
    task_id = uuid.uuid4()
    begin_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    logging.info(f'{task_id} download_out_annotation_file begin work {begin_time}')
    with session_scope() as dbsession:
        try:
            datasets = (
                dbsession.query(Dataset.id, Dataset.annotation_task_id, Dataset.dataset_path)
                .filter(
                    Dataset.status == EnumDatasetStatus.processing.value,
                    Dataset.source == 'base_annotation',
                )
                .all()
            )
            logging.info(f'{task_id} 本次需下载的记录数：' + str(len(datasets)))
            for ds in datasets:
                # 下载文件
                res = download_annotation_file(task_id, ds.annotation_task_id, ds.dataset_path)
                logging.info(f'{task_id} 标注任务id:{ds.annotation_task_id} ,下载文件结果：{res}')
                if res == 1:
                    # 下载成功
                    dbsession.query(Dataset).filter(Dataset.id == ds.id).update(
                        {'status': EnumDatasetStatus.succeed.value, 'progress': 100, 'labeled': 1}
                    )
                else:
                    # 下载失败
                    dbsession.query(Dataset).filter(Dataset.id == ds.id).update(
                        {'status': EnumDatasetStatus.failed.value}
                    )
                dbsession.commit()
                logging.info(f'{task_id} download_out_annotation_file work finished')
        except Exception as e:
            logging.error(
                f'{task_id} download_out_annotation_file cronjob run error:{repr(e)}',
                exc_info=True,
            )
    end_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    logging.info(f'{task_id} get_out_annotation_info end work {end_time}')


# 获取训练时长
# @pysnooper.snoop()
def get_run_time(workflow):
    start_time = json.loads(workflow.status_more).get('startedAt', '')
    finish_time = json.loads(workflow.status_more).get('finishedAt', '')
    try:
        start_time = datetime.datetime.strptime(
            start_time.replace('T', ' ').replace('Z', ''), '%Y-%m-%d %H:%M:%S'
        )
    except Exception as e:
        print(e)
        start_time = datetime.datetime.now()

    try:
        finish_time = datetime.datetime.strptime(
            finish_time.replace('T', ' ').replace('Z', ''), '%Y-%m-%d %H:%M:%S'
        )
    except Exception as e:
        print(e)
        finish_time = datetime.datetime.now()

    return round(
        (finish_time - start_time).days * 24 + (finish_time - start_time).seconds / 60 / 60, 2
    )


# 检查pipeline的运行时长
# @pysnooper.snoop()
def check_pipeline_time():
    with session_scope() as dbsession:
        try:
            monitoring_workflow = {}
            today_workflows = (
                dbsession.query(Workflow)
                .filter(or_(Workflow.status == 'Running', Workflow.status == 'Succeeded'))
                .filter(Workflow.create_time > datetime.datetime.now().strftime('%Y-%m-%d'))
                .all()
            )  # 获取model记录
            for today_workflow in today_workflows:
                # 读取
                pipeline_id = json.loads(today_workflow.labels).get('pipeline-id', '')
                if pipeline_id and pipeline_id not in monitoring_workflow:
                    pipeline = (
                        dbsession.query(Pipeline).filter(Pipeline.id == int(pipeline_id)).first()
                    )  # 获取model记录
                    monitoring_workflow[pipeline_id] = {
                        'time': [],
                        'status': today_workflow.status,
                        'user': today_workflow.username,
                        'pipeline': pipeline.describe if pipeline else '未知',
                    }
                    old_workflows = (
                        dbsession.query(Workflow)
                        .filter(Workflow.labels.contains('"pipeline-id": "%s"' % pipeline_id))
                        .order_by(Workflow.id.desc())
                        .limit(10)
                        .all()
                    )  # 获取model记录
                    for old_workflow in old_workflows:
                        run_time = get_run_time(old_workflow)
                        # print(old_workflow.name)
                        monitoring_workflow[pipeline_id]['time'].append(run_time)
            message = ''
            for pipeline_id in monitoring_workflow:
                work = monitoring_workflow[pipeline_id]
                message += (
                    '\npipeline:%s' % work['pipeline']
                    + '\nuser:%s' % work['user']
                    + '\nstatus:%s' % work['status']
                    + '\n每次训练耗时(h):%s' % work['time']
                    + '\n'
                )

            print(message)
            if message:
                push_admin(message)

        except Exception as e:
            print(e)


# 检查pipeline的运行资源
# @pysnooper.snoop()
def check_pipeline_resource():
    with session_scope() as dbsession:
        try:
            monitoring_workflow = {}
            today_workflows = (
                dbsession.query(Workflow)
                .filter(Workflow.status == 'Succeeded')
                .filter(Workflow.create_time > datetime.datetime.now().strftime('%Y-%m-%d'))
                .all()
            )  # 获取model记录

            for today_workflow in today_workflows:
                # 读取
                pipeline_id = json.loads(today_workflow.labels).get('pipeline-id', '')
                if pipeline_id and pipeline_id not in monitoring_workflow:
                    pipeline = (
                        dbsession.query(Pipeline).filter(Pipeline.id == int(pipeline_id)).first()
                    )  # 获取model记录
                    monitoring_workflow[pipeline_id] = {
                        'user': today_workflow.username,
                        'pipeline': pipeline.describe if pipeline else '未知',
                        'task': {},
                    }
                    tasks = (
                        dbsession.query(Task).filter(Task.pipeline_id == int(pipeline_id)).all()
                    )  # 获取model记录
                    for task in tasks:
                        try:
                            task_resources = json.loads(task.monitoring).get('task', [])
                            tfjob_resources = json.loads(task.monitoring).get('tfjob', [])
                            monitoring_workflow[pipeline_id]['task'][task.label] = {}
                            if task_resources:
                                monitoring_workflow[pipeline_id]['task'][task.label].update(
                                    {
                                        'cpu': [
                                            task_resource['cpu']
                                            for task_resource in task_resources
                                        ],
                                        'memory': [
                                            task_resource['memory']
                                            for task_resource in task_resources
                                        ],
                                        'cpu限制': task.resource_cpu,
                                        'memory限制': task.resource_memory,
                                    }
                                )
                            if tfjob_resources:
                                monitoring_workflow[pipeline_id]['task'][task.label].update(
                                    {
                                        'tfjob_cpu': [
                                            tfjob_resource['cpu']
                                            for tfjob_resource in tfjob_resources
                                        ],
                                        'tfjob_memory': [
                                            tfjob_resource['memory']
                                            for tfjob_resource in tfjob_resources
                                        ],
                                        'tfjob_cpu限制': re.findall('"cpu":.*', task.args)[0]
                                        .replace('"cpu":', '')
                                        .replace('"', '')
                                        .replace(',', '')
                                        .replace(' ', ''),
                                        'tfjob_memory限制': re.findall('"memory":.*', task.args)[0]
                                        .replace('"memory":', '')
                                        .replace('"', '')
                                        .replace(',', '')
                                        .replace(' ', ''),
                                    }
                                )
                        except Exception as e:
                            print(e)

            for pipeline_id in monitoring_workflow:
                message = ''
                work = monitoring_workflow[pipeline_id]
                import copy

                work1 = copy.deepcopy(work)
                for key in work1['task']:
                    if not work1['task'][key]:
                        del work['task'][key]

                if work['task']:
                    message += '\npipeline: %s' % work['pipeline'] + '\nuser:%s' % work['user']
                    for task_name in work['task']:
                        message += '\ntask: ' + task_name + '，tfjob资源使用率:'
                        message += '\n使用cpu: ' + str(work['task'][task_name]['tfjob_cpu'])
                        message += '\n使用mem: ' + str(work['task'][task_name]['tfjob_memory'])
                        message += '\n限制cpu: ' + str(work['task'][task_name]['tfjob_cpu限制'])
                        message += '\n限制mem: ' + str(work['task'][task_name]['tfjob_memory限制'])
                        message += '\n\n自行增加tfjob资源配置或worker数目'
                    print(message)
                    if message:
                        # push_message(conf.get('ADMIN_USER','').split(','),message)
                        push_message(conf.get('ADMIN_USER').split(','), message)
                        push_message([work['user']], message)

        except Exception as e:
            print(e)


@celery_app.task(name='task.check_pipeline_run', bind=True)
def check_pipeline_run(task):
    check_pipeline_time()
    check_pipeline_resource()


@pysnooper.snoop()
def get_dir_size(dir):
    dir_size = {}
    try:
        if os.path.isdir(dir):
            command = 'ls -lh %s' % dir
            result = subprocess.getoutput(command)
            # print(result)
            rows = result.split('\n')
            for row in rows:
                row = [item for item in row.split(' ') if item]
                # print(row)
                if len(row) == 9:
                    size, file_name = row[4], row[8]
                    # print(size,username)

                    if 'K' in size:
                        size = float(size.replace('K', ''))
                    elif 'M' in size:
                        size = float(size.replace('M', '')) * 1024
                    elif 'G' in size:
                        size = float(size.replace('G', '')) * 1024 * 1024
                    elif 'T' in size:
                        size = float(size.replace('T', '')) * 1024 * 1024 * 1024

                    dir_size[file_name] = round(float(size) / 1024 / 1024, 2)
                    # dir_size[file_name] = float(size) / 1024 / 1024

            # size = subprocess.check_output(command)
            # print(size)
    except Exception as e:
        print(e)

    print(dir_size)
    return dir_size


@celery_app.task(name='task.push_workspace_size', bind=True)
def push_workspace_size(task):
    # 获取路径
    paths = conf.get('CHECK_WORKSPACE_SIZE', [])

    for path in paths:
        message = '\n目录%s,目录大小前10名:\n' % path[path.rindex('/') + 1 :]
        print('get size dir', path)
        dir_sizes = get_dir_size(path)
        dir_sizes = sorted(dir_sizes.items(), key=lambda item: item[1], reverse=True)
        for i in range(min(10, len(dir_sizes))):
            dir_size = dir_sizes[i]
            message += str(dir_size[0]) + ':' + str(dir_size[1]) + 'G\n'

        # push_admin(message)

        for dir_size in dir_sizes:
            user = dir_size[0]
            size = float(dir_size[1])
            if size > 2500:  # 如果操作1200G，就提醒
                try:
                    push_message(
                        [user],
                        '%s 检测到您的工作目录当前占用磁盘大小为%sG。'
                        '目前每个用户工作目录上限为2500G，超出后部分功能可能受限，'
                        '请及时进入个人notebook清理旧数据' % (user, str(size)),
                    )
                    push_admin(
                        '%s 检测到您的工作目录当前占用磁盘大小为%sG。'
                        '目前每个用户工作目录上限为2500G，超出后部分功能可能受限，'
                        '请及时进入个人notebook清理旧数据' % (user, str(size))
                    )

                except Exception as e:
                    print(e)


@celery_app.task(name='task.watch_gpu', bind=True)
def watch_gpu(task):
    clusters = conf.get('CLUSTERS', {})
    for cluster_name in clusters:
        cluster = clusters[cluster_name]
        k8s_client = K8s(cluster.get('KUBECONFIG', ''))

        all_gpu_pods = k8s_client.get_uesd_gpu(
            namespaces=['pipeline', 'automl', 'jupyter', 'service']
        )

        print(all_gpu_pods)
        message = ''
        used_gpu = 0
        for pod in all_gpu_pods:
            used_gpu += pod['gpu']
            message += (
                pod['namespace']
                + ','
                + pod['user']
                + ','
                + pod['name']
                + ','
                + str(pod['gpu'])
                + '\n'
            )
        print(message)
        message += '%s集群共已使用%s张卡' % (cluster_name, int(used_gpu))
        push_message(conf.get('ADMIN_USER', '').split(','), message)
        # push_admin("%s集群共已使用%s张卡"%(cluster_name,int(used_gpu)))


# @celery_app.task(name="task.share_public", bind=True)
# @pysnooper.snoop()
# def share_public(task):
#     pass


# 各项目组之间相互均衡的方案，一台机器上可能并不能被一个项目组占完，所以可能会跑多个项目组的任务
@celery_app.task(name='task.adjust_node_resource', bind=True)
def adjust_node_resource(task):
    clusters = conf.get('CLUSTERS', {})
    for cluster_name in clusters:
        cluster = clusters[cluster_name]
        k8s_client = K8s(cluster.get('KUBECONFIG', ''))
        all_node = k8s_client.get_node()
        all_node_json = {}
        pending_pods = {}
        # 获取每台机器的资源容纳量
        for node in all_node:  # list 转dict
            ip = node['hostip']
            if (
                node['labels'].get('share', 'true') == 'true'
                and node['labels'].get('train', 'false') == 'true'
            ):  # 前提要求机器允许被其他项目组共享
                if (
                    node['labels'].get('cpu', 'false') == 'true'
                    or node['labels'].get('gpu', 'false') == 'true'
                ):
                    all_node_json[ip] = node
                    all_node_json[ip]['used_memory'] = []
                    all_node_json[ip]['used_cpu'] = []
                    all_node_json[ip]['used_gpu'] = []

        # print(all_node_json)
        for namespace in ['jupyter', 'pipeline', 'automl', 'service']:
            all_pods = k8s_client.get_pods(namespace=namespace)
            for pod in all_pods:
                if pod['host_ip'] not in all_node_json:
                    continue
                if pod['status'] == 'Running':
                    # print(namespace,pod)
                    all_node_json[pod['host_ip']]['used_memory'].append(pod['memory'])
                    all_node_json[pod['host_ip']]['used_cpu'].append(pod['cpu'])
                    all_node_json[pod['host_ip']]['used_gpu'].append(pod['gpu'])
                    # print(all_node_json[pod['host_ip']])
                # 有挂起等待超过5分钟的情况，立刻划资源过去，并推送通知，因为挂起不一定是因为资源。
                if (
                    pod['status'] == 'Pending'
                    and (datetime.datetime.now() - pod['start_time']).seconds > 300
                ):
                    # 如果因为资源不足就通过资源调度解决
                    containers = pod['status_more'].get('conditions', [])
                    messages = ','.join(
                        [
                            container['message'] if container['message'] else ''
                            for container in containers
                        ]
                    )

                    if 'insufficient' in messages.lower():
                        pending_pods[pod['name']] = {
                            'namespace': namespace,
                            'cluster': cluster_name,
                            'node_selector': pod['node_selector'],
                        }
                        push_message(
                            conf.get('ADMIN_USER', '').split(','),
                            'cluster %s, namespace %s pod %s 因资源问题 pending'
                            % (cluster_name, namespace, pod['name']),
                        )
                    else:
                        push_message(
                            conf.get('ADMIN_USER', '').split(','),
                            'cluster %s, namespace %s pod %s 因其他问题 pending'
                            % (cluster_name, namespace, pod['name']),
                        )

        for ip in all_node_json:
            all_node_json[ip]['used_memory'] = int(sum(all_node_json[ip]['used_memory']))
            all_node_json[ip]['used_cpu'] = int(sum(all_node_json[ip]['used_cpu']))
            all_node_json[ip]['used_gpu'] = int(sum(all_node_json[ip]['used_gpu']))

        # 获取每个资源组的资源申请量，cpu机器和gpu单独看。
        all_org_resource = {}
        for ip in all_node_json:
            org = all_node_json[ip]['labels'].get('org', 'public')
            if org not in all_org_resource:
                all_org_resource[org] = {
                    'cpu_node_num': 0,
                    'gpu_node_num': 0,
                    'cpu_req_total': 0,
                    'gpu_req_total': 0,
                    'cpu_allocatable_total': 0,
                    'gpu_allocatable_total': 0,
                }
            if all_node_json[ip]['labels'].get('cpu', 'false') == 'true':
                all_org_resource[org]['cpu_node_num'] += 1
                all_org_resource[org]['cpu_req_total'] += all_node_json[ip]['used_cpu']
                all_org_resource[org]['cpu_allocatable_total'] += all_node_json[ip]['cpu']

            if all_node_json[ip]['labels'].get('gpu', 'false') == 'true':
                all_org_resource[org]['gpu_node_num'] += 1
                all_org_resource[org]['gpu_req_total'] += all_node_json[ip]['used_gpu']
                all_org_resource[org]['gpu_allocatable_total'] += all_node_json[ip]['gpu']

        # 计算申请率最大最小集群
        max_cpu_org = max_gpu_org = min_cpu_org = min_gpu_org = 'public'
        max_cpu_per = max_gpu_per = 0
        min_cpu_per = min_gpu_per = 1
        for org in all_org_resource:
            org_resource = all_org_resource[org]
            if org_resource['cpu_node_num'] > 2:  # 至少3台机器，才参与调度融合
                if (
                    org_resource['cpu_req_total'] / org_resource['cpu_allocatable_total']
                    > max_cpu_per
                ):
                    max_cpu_per = (
                        org_resource['cpu_req_total'] / org_resource['cpu_allocatable_total']
                    )
                    max_cpu_org = org
                if (
                    org_resource['cpu_req_total'] / org_resource['cpu_allocatable_total']
                    < min_cpu_per
                ):
                    min_cpu_per = (
                        org_resource['cpu_req_total'] / org_resource['cpu_allocatable_total']
                    )
                    min_cpu_org = org

            if org_resource['gpu_node_num'] > 2:  # 至少3台机器，才参与调度融合
                if (
                    org_resource['gpu_req_total'] / org_resource['gpu_allocatable_total']
                    > max_gpu_per
                ):
                    max_gpu_per = (
                        org_resource['gpu_req_total'] / org_resource['gpu_allocatable_total']
                    )
                    max_gpu_org = org
                if (
                    org_resource['gpu_req_total'] / org_resource['gpu_allocatable_total']
                    < min_gpu_per
                ):
                    min_gpu_per = (
                        org_resource['gpu_req_total'] / org_resource['gpu_allocatable_total']
                    )
                    min_gpu_org = org

        # 获取项目组下面，每台机器的cpu申请量
        def get_cpu_per_node(org, all_node_json_):
            org_node_cpu_per = {}
            for ip in all_node_json_:
                if (
                    all_node_json_[ip]['labels'].get('org', '') == org
                    and all_node_json_[ip]['labels'].get('cpu', 'false') == 'true'
                ):
                    org_node_cpu_per[ip] = (
                        all_node_json_[ip]['used_cpu'] / all_node_json_[ip]['cpu']
                    )

            org_node_cpu_per = sorted(
                org_node_cpu_per.items(), key=lambda x: x[1], reverse=False
            )  # 从小到大排序
            return org_node_cpu_per

        # 获取项目组下面，每台机器的gpu申请量
        def get_gpu_per_node(org, all_node_json_):
            org_node_gpu_per = {}
            for ip in all_node_json_:
                if (
                    all_node_json_[ip]['labels'].get('org', '') == org
                    and all_node_json_[ip]['labels'].get('gpu', 'false') == 'true'
                ):
                    org_node_gpu_per[ip] = (
                        all_node_json_[ip]['used_gpu'] / all_node_json_[ip]['gpu']
                    )
            org_node_gpu_per = sorted(
                org_node_gpu_per.items(), key=lambda x: x[1], reverse=False
            )  # 从小到大排序
            return org_node_gpu_per

        # 如果存在资源问题pending，直接调整
        if pending_pods:
            for pod_name in pending_pods:
                des_org = pending_pods[pod_name]['node_selector'].get('org', 'public')
                # 如果缺少cpu
                if (
                    pending_pods[pod_name]['node_selector'].get('cpu', 'false') == 'true'
                    and des_org != min_cpu_org
                ):
                    # 直接将申请量最小的集群中申请量最小的cpu机器迁移过去
                    org_node_cpu_per = get_cpu_per_node(min_cpu_org, all_node_json)
                    print(org_node_cpu_per)
                    adjust_node = [node[0] for node in org_node_cpu_per[:1]]  # 每次调整一台机器
                    push_message(
                        conf.get('ADMIN_USER').split(','),
                        '集群 %s 调整项目组 %s 下 cpu机器 %s 到项目组%s'
                        % (cluster_name, min_cpu_org, ','.join(adjust_node), des_org),
                    )
                    k8s_client.label_node(adjust_node, labels={'org': des_org})
                    return

                if (
                    pending_pods[pod_name]['node_selector'].get('gpu', 'false') == 'true'
                    and des_org != min_gpu_org
                ):
                    org_node_gpu_per = get_gpu_per_node(min_gpu_org, all_node_json)
                    print(org_node_gpu_per)
                    adjust_node = [node[0] for node in org_node_gpu_per[:1]]  # 每次调整一台机器
                    push_message(
                        conf.get('ADMIN_USER').split(','),
                        '集群 %s 调整项目组 %s 下 gpu机器 %s 到项目组%s'
                        % (cluster_name, min_gpu_org, ','.join(adjust_node), des_org),
                    )
                    k8s_client.label_node(adjust_node, labels={'org': des_org})
                    return

        # 不存在资源挂起的情况，保持最大最小集群申请量差异在20%以下
        print(all_org_resource)
        # 如果差别最大的两个不同的资源组，cpu申请率差距在20%，
        # 则将申请率最小的资源组中的申请率最小的机器转为到另一个资源组
        print(max_cpu_org, min_cpu_org, max_gpu_org, min_gpu_org)
        if max_cpu_org != min_cpu_org and max_cpu_per > min_cpu_per + 0.2:
            org_node_cpu_per = get_cpu_per_node(min_cpu_org)
            print(org_node_cpu_per)
            adjust_node = [node[0] for node in org_node_cpu_per[:1]]  # 每次调整一台机器
            push_message(
                conf.get('ADMIN_USER').split(','),
                '集群 %s 调整项目组 %s 下 cpu机器 %s 到项目组%s'
                % (cluster_name, min_cpu_org, ','.join(adjust_node), max_cpu_org),
            )
            k8s_client.label_node(adjust_node, labels={'org': max_cpu_org})
            return

        # 将差距最大的两个gpu资源组，进行调配
        if max_gpu_org != min_gpu_org and max_gpu_per > min_gpu_per + 0.2:
            org_node_gpu_per = get_gpu_per_node(min_gpu_org)
            print(org_node_gpu_per)
            adjust_node = [node[0] for node in org_node_gpu_per[:1]]  # 每次调整一台机器
            push_message(
                conf.get('ADMIN_USER').split(','),
                '集群 %s 调整项目组 %s 下 gpu机器 %s 到项目组%s'
                % (cluster_name, min_gpu_org, ','.join(adjust_node), max_gpu_org),
            )
            k8s_client.label_node(adjust_node, labels={'org': max_gpu_org})
            return


# get_dir_size('/data/k8s/kubeflow/pipeline/workspace')
@pysnooper.snoop()
def get_deployment_node_selector(name, namespace):
    from kubernetes import client

    exist_dp = client.AppsV1Api().read_namespaced_deployment(name=name, namespace=namespace)

    node_selector = {}
    try:
        # aa=client.V1NodeSelector
        node_affinity = exist_dp.spec.template.spec.affinity.node_affinity
        if node_affinity and node_affinity.required_during_scheduling_ignored_during_execution:
            match_expressions = exist_dp.spec.template.spec.affinity.node_affinity.required_during_scheduling_ignored_during_execution.node_selector_terms  # noqa: E501
            match_expressions = [ex.match_expressions for ex in match_expressions]
            match_expressions = match_expressions[0]
            for match_expression in match_expressions:
                if match_expression.operator == 'In':
                    node_selector[match_expression.key] = match_expression.values[0]
                if match_expression.operator == 'Equal':
                    node_selector[match_expression.key] = match_expression.values

    except Exception as e:
        print(e)
        pass

        # print(e)
    if exist_dp.spec.template.spec.node_selector:
        node_selector.update(exist_dp.spec.template.spec.node_selector)

    print(node_selector)

    pass


# # 不同优先级的服务之间调节算力
@celery_app.task(name='task.adjust_service_resource', bind=True)
@pysnooper.snoop(watch_explode=())
def adjust_service_resource(task):
    from kubernetes import client

    cluster_name = 'tke'
    namespace = conf.get('SERVICE_NAMESPACE')
    cluster = conf.get('CLUSTERS', {})[cluster_name]
    with session_scope() as dbsession:
        try:
            k8s_client = K8s(cluster.get('KUBECONFIG', ''))
            hpas = (
                client.AutoscalingV2beta1Api()
                .list_namespaced_horizontal_pod_autoscaler(namespace=namespace)
                .items
            )
            for hpa in hpas:
                inferenceserving = (
                    dbsession.query(InferenceService)
                    .filter_by(name=hpa.metadata.name)
                    .filter_by(model_status='online')
                    .first()
                )
                if not inferenceserving:
                    message = (
                        cluster_name + '：请删除hpa，因' + hpa.metadata.name + '服务下线或者不存在'
                    )
                    push_message(conf.get('ADMIN_USER').split(','), message=message)
                    continue
                else:
                    if (
                        inferenceserving.resource_gpu
                        and inferenceserving.resource_gpu != '0'
                        and inferenceserving.priority == 1
                    ):
                        current_replicas = hpa.status.current_replicas
                        desired_replicas = hpa.status.desired_replicas
                        if desired_replicas > current_replicas:  # 期望扩容
                            pass
                            # 如果没有扩张，或者持续时间太久，就缩小低优先级服务
                            if (
                                not hpa.status.last_scale_time
                                or datetime.datetime.now().timestamp()
                                - hpa.status.last_scale_time.astimezone(
                                    datetime.timezone(datetime.timedelta(hours=8))
                                ).timestamp()
                                > 400
                            ):
                                push_message(
                                    conf.get('ADMIN_USER').split(','),
                                    '寻找扩服务%s一卡' % (inferenceserving.name,),
                                )
                                target_node_selector = get_deployment_node_selector(
                                    name=inferenceserving.name, namespace=namespace
                                )

                                # 获取同项目组，低优先级的推理
                                low_inferenceservings = (
                                    dbsession.query(InferenceService)
                                    .filter_by(priority=0)
                                    .filter_by(project_id=inferenceserving.project_id)
                                    .all()
                                )
                                low_inferenceservings.sort(
                                    key=lambda item: item.max_replicas - item.min_replicas
                                )  # 从大到小排序
                                for service in low_inferenceservings:
                                    if service.resource_gpu and service.resource_gpu != '0':  #
                                        current_replicas = (
                                            client.AppsV1Api()
                                            .read_namespaced_deployment(
                                                name=service.name, namespace=namespace
                                            )
                                            .spec.replicas
                                        )
                                        # 如果当前副本数大于最小副本数
                                        if current_replicas > service.min_replicas:
                                            # 随意缩放一个pod
                                            if not target_node_selector.get('gpu-type', ''):
                                                client.AppsV1Api().patch_namespaced_deployment_scale(
                                                    service.name,
                                                    namespace,
                                                    [
                                                        {
                                                            'op': 'replace',
                                                            'path': '/spec/replicas',
                                                            'value': current_replicas - 1,
                                                        }
                                                    ],
                                                )
                                                push_message(
                                                    [
                                                        service.created_by.username,
                                                        inferenceserving.created_by.username,
                                                    ]
                                                    + conf.get('ADMIN_USER').split(','),
                                                    '缩服务%s一卡，扩服务%s一卡'
                                                    % (service.name, inferenceserving.name),
                                                )
                                                return
                                            # 缩放指定pod
                                            else:
                                                node_selector = get_deployment_node_selector(
                                                    name=service.name, namespace=namespace
                                                )
                                                target_gpu_type = target_node_selector['gpu-type']
                                                exist_gpu_type = node_selector.get('gpu-type', '')
                                                if (
                                                    exist_gpu_type
                                                    and exist_gpu_type != target_gpu_type
                                                ):
                                                    print('服务gpu卡型不匹配')
                                                    break
                                                # 如果低级别服务没有gpu机型限制。
                                                # 就查看是否有符合需求的机器型号，缩放指定pod
                                                pods = k8s_client.get_pods(
                                                    namespace=namespace,
                                                    labels={
                                                        'app': service.name,
                                                        'pod-type': 'inference',
                                                    },
                                                )
                                                nodeips = [pod['host_ip'] for pod in pods]
                                                for nodeip in nodeips:
                                                    node = k8s_client.get_node(nodeip)
                                                    if (
                                                        node['labels'].get('gpu-type', '')
                                                        == target_gpu_type
                                                    ):
                                                        # 缩放指定pod
                                                        can_scale_pods = [
                                                            pod
                                                            for pod in pods
                                                            if pod['host_ip'] == nodeip
                                                        ]
                                                        if can_scale_pods:
                                                            k8s_client.v1.delete_namespaced_pod(
                                                                can_scale_pods[0]['name'],
                                                                namespace,
                                                                grace_period_seconds=0,
                                                            )
                                                            client.AppsV1Api().patch_namespaced_deployment_scale(
                                                                service.name,
                                                                namespace,
                                                                [
                                                                    {
                                                                        'op': 'replace',
                                                                        'path': '/spec/replicas',
                                                                        'value': current_replicas
                                                                        - 1,
                                                                    }
                                                                ],
                                                            )
                                                            push_message(
                                                                [
                                                                    service.created_by.username,
                                                                    inferenceserving.created_by.username,
                                                                ]
                                                                + conf.get('ADMIN_USER').split(
                                                                    ','
                                                                ),
                                                                '缩服务%s一卡，扩服务%s一卡'
                                                                % (
                                                                    service.name,
                                                                    inferenceserving.name,
                                                                ),
                                                            )

                                                            return

        except Exception as e:
            print(e)


@celery_app.task(name='task.update_aihub', bind=True)
@pysnooper.snoop()
def update_aihub(task):
    # time.sleep(random.randint(10,600))
    from myapp.utils.core import run_shell

    # 更新git
    info_path = 'info.json'
    status = run_shell(
        'rm -rf /cube-studio && cd / && git clone https://github.com/tencentmusic/cube-studio.git'
    )
    if status:
        print('clone fail')
        return
    else:
        if os.path.exists(info_path):
            info_path = '/cube-studio/aihub/info.json'

    aihubs = json.load(open(info_path))
    with session_scope() as dbsession:
        try:
            if len(aihubs) > 0:
                dbsession.query(Aihub).delete()
                dbsession.commit()
                for data in aihubs:
                    print(data)
                    name = data.get('name', '')
                    label = data.get('label', '')
                    describe = data.get('describe', '')
                    uuid = data.get('uuid', '')
                    if name and label and describe and uuid:
                        aihub = dbsession.query(Aihub).filter_by(uuid=uuid).first()
                        if not aihub:
                            aihub = Aihub()
                        aihub.doc = data.get('doc', '')
                        aihub.name = name
                        aihub.label = label
                        aihub.describe = describe
                        aihub.field = data.get('field', '')
                        aihub.scenes = data.get('scenes', '')
                        aihub.type = data.get('type', '')
                        aihub.pic = data.get('pic', '')
                        aihub.status = data.get('status', '')
                        aihub.uuid = uuid
                        aihub.version = data.get('version', '')
                        aihub.dataset = json.dumps(data.get('dataset', {}))
                        aihub.notebook = json.dumps(data.get('notebook', {}))
                        aihub.job_template = json.dumps(data.get('job_template', {}))
                        aihub.pre_train_model = json.dumps(data.get('pre_train_model', {}))
                        aihub.inference = json.dumps(data.get('inference', {}))
                        aihub.service = json.dumps(data.get('service', {}))
                        aihub.hot = int(data.get('hot', '0'))
                        aihub.price = int(data.get('price', '0'))
                        aihub.source = data.get('source', '')
                        if not aihub.id:
                            dbsession.add(aihub)
                        dbsession.commit()
        except Exception as e:
            print(e)


@celery_app.task(name='task.sync_job_status', bind=True)
def sync_modelarts_job(task):
    logger.info(task)
    logger.info('modelarts')
    with session_scope() as dbsession:
        running_jobs = (
            dbsession.query(Workflow)
            .options(load_only(Workflow.id, Workflow.username))
            .filter(
                Workflow.status.in_(
                    ['Creating', 'Pending', 'Running', 'Terminating', 'Completed', 'Succeeded']
                ),
                not_(Workflow.status_more.contains('"has_model": 1')),
            )
            .all()
        )
        for job in running_jobs:
            data = sync_job_status(job.id, dbsession=dbsession)
            if not data:
                continue
            logger.info(f'{job.id} status: {data.status}')
            if data.status == 'Completed' or data.status == 'Succeeded':
                create_model_if_finished(data, dbsession=dbsession)
            elif data.status == 'Failed':
                process_job_err_msg(data, dbsession=dbsession)


@celery_app.task(name='task.docker_commit', bind=True)
def docker_commit(task):
    logger.info(task)

    # 清理超时的任务
    last_time = datetime.datetime.now() - datetime.timedelta(hours=6)
    with session_scope() as session:
        session.query(DockerCommits).filter(
            DockerCommits.status.in_([EnumDockerCommitStatus.running.value]),
            DockerCommits.changed_on < last_time,
        ).update(
            {'status': EnumDockerCommitStatus.failed.value, 'err_msg': '保存镜像超时'},
            synchronize_session=False,
        )

    k8s_client = K8s(conf.get('CLUSTERS').get(conf.get('ENVIRONMENT')).get('KUBECONFIG', ''))

    last_id = 0
    limit = 1
    # 分页查询遍历数据库
    while True:
        with session_scope() as session:
            results = (
                session.query(DockerCommits)
                .filter(DockerCommits.status.in_(['pending']), DockerCommits.id > last_id)
                .order_by('id')
                .limit(limit)
                .all()
            )

            pods = k8s_client.v1.list_namespaced_pod(
                namespace='infra', label_selector='app=docker-daemon'
            )
            host_ip2pod_ip = {}
            for pod in pods.items:
                host_ip2pod_ip[pod.status.host_ip] = pod.status.pod_ip

            for item in results:
                logger.info(f'[docker_commit] 正在处理任务(id:{item.id})')
                agent_pod_ip = host_ip2pod_ip.get(item.container_host_ip)
                if agent_pod_ip is None or len(agent_pod_ip) == 0:
                    logger.info(f'{item.id} 找不到对应的docker agent pod')
                    continue

                try:
                    repo = session.query(Repository).filter(Repository.id == item.repo_id).first()
                    if repo is None:
                        logger.info(f'{item.id} 仓库不存在')
                        item.status = EnumDockerCommitStatus.failed.value
                        item.err_msg = '镜像仓库不存在'
                        continue

                    # 标记为处理中
                    item.status = EnumDockerCommitStatus.running.value
                    session.commit()

                    repository = f'{item.image_name}:{item.image_version}'
                    client = docker.DockerClient(
                        base_url=f'tcp://{agent_pod_ip}:2375', timeout=30 * 60
                    )
                    container = client.containers.get(item.container_id)

                    container.commit(repository)
                    logger.info(f'[docker_commit] commit 镜像成功 任务id:{item.id}')
                    resp = client.images.push(
                        repository, auth_config={'username': repo.user, 'password': repo.password}
                    )
                    for msg in resp.split('\r\n'):
                        msg = msg.strip()
                        if msg == '':
                            continue
                        msg = json.loads(msg)
                        err = msg.get('error')
                        if err is not None:
                            item.status = EnumDockerCommitStatus.failed.value
                            item.err_msg = err
                            break

                    if item.status == EnumDockerCommitStatus.failed.value:
                        continue

                    item.status = EnumDockerCommitStatus.succeeded.value
                    item.err_msg = ''

                    # 创建镜像
                    img = Images()
                    img.repository_id = item.repo_id
                    img.name = item.image_name
                    img.image_version = item.image_version
                    img.describe = item.image_desc
                    img.type = item.source
                    img.created_by_fk = item.created_by_fk
                    img.changed_by_fk = item.changed_by_fk

                    session.add(img)
                except Exception as e:
                    item.status = EnumDockerCommitStatus.failed.value
                    item.err_msg = str(e)
                    logger.error(e)
                finally:
                    if item.status in [
                        EnumDockerCommitStatus.failed.value,
                        EnumDockerCommitStatus.succeeded.value,
                    ]:
                        res = (
                            session.query(Notebook)
                            .filter(
                                Notebook.id == item.source_id,
                                Notebook.status == EnumNotebookStatus.saving.value,
                            )
                            .first()
                        )
                        if res:
                            if item.status == EnumDockerCommitStatus.failed.value:
                                res.status = EnumNotebookStatus.running.value
                                res.err_msg = '保存镜像失败，原因：' + item.err_msg
                            elif item.status == EnumDockerCommitStatus.succeeded.value:
                                res.status = EnumNotebookStatus.running.value
                                res.err_msg = ''

            session.commit()
            if len(results) < limit:
                break
            last_id = results[-1].id


# 自动停止
@celery_app.task(name='task.service_auto_stop', bind=True)
def service_auto_stop(task):
    logger.info('task service_auto_stop start')
    # todo 刷库
    # update service
    # left join model m on service.model_id = m.id
    # set advanced = '{"auto_stop": {"enable": true, "seconds": 3600}}'
    # where m.source = 1;
    for service_ids in scan_table(
        Service,
        filters=(Service.status.in_([ServiceStatus.running.value, ServiceStatus.upgrading.value])),
    ):
        with session_scope() as dbsession:
            srvs = dbsession.query(Service).filter(Service.id.in_(service_ids)).all()

            for item in srvs:
                auto_stop = item.auto_stop
                if not auto_stop.get('enable', False):
                    logger.info(f'服务 id:{item.id} 未开启自动停止')
                    continue

                seconds = auto_stop.get('seconds', 3600)
                deploy_completed_time = item.deploy_completed_time
                if deploy_completed_time is None:
                    logger.info(f'服务 id:{item.id} 没有设置部署完成时间，使用创建时间')
                    deploy_completed_time = item.created_on

                time_diff = datetime.datetime.now() - deploy_completed_time
                if time_diff.total_seconds() <= seconds:
                    logger.info(f'服务 id:{item.id} 未到达自动停止时间，跳过')
                    continue

                try:
                    logger.info(f'服务 id:{item.id} 到达自动停止时间，开始停止服务')
                    Service_ModelView_Api.stop_by_id(item.id)
                except Exception as e:
                    logger.error(f'自动学习服务超时停止失败，id:{item.id},error:{repr(e)}')
                    logger.error(traceback.format_exc())


@celery_app.task(name='task.notebook_auto_stop', bind=True)
def notebook_auto_stop(task):
    last_id = 0
    limit = 100
    # 分页查询遍历数据库
    while True:
        with session_scope() as session:
            results = (
                session.query(Notebook)
                .filter(
                    Notebook.status.in_([EnumNotebookStatus.running.value]), Notebook.id > last_id
                )
                .order_by('id')
                .limit(limit)
                .all()
            )
            for item in results:
                auto_stop = item.get_auto_stop()
                if not auto_stop.get('enable', False):
                    logger.info(f'notebook id:{item.id} 未开启自动停止')
                    continue

                seconds = auto_stop.get('seconds', 3600)
                running_start_time = item.running_start_time
                if running_start_time is None:
                    logger.info(f'notebook id:{item.id} 没有设置运行开始时间，使用创建时间')
                    running_start_time = item.created_on

                time_diff = datetime.datetime.now() - running_start_time
                if time_diff.total_seconds() <= seconds:
                    logger.info(f'notebook id:{item.id} 未到达自动停止时间，跳过')
                    continue

                try:
                    logger.info(f'notebook id:{item.id} 到达自动停止时间，开始停止服务')
                    NotebookApi.stop_by_id(item.id)
                except Exception as e:
                    logger.error(f'notebook 超时停止失败，id:{item.id},error:{repr(e)}')
                    logger.error(traceback.format_exc())

            logger.info(f'处理成功{len(results)}条记录')
            session.commit()

            if len(results) < limit:
                break
            last_id = results[-1].id


@celery_app.task(name='task.sync_model_export', bind=True)
def sync_model_export(task):
    logger.info(task)

    last_id = 0
    limit = 1
    max_exporting_limit = 1
    # 分页查询遍历数据库
    while True:
        with session_scope() as session:
            export_tasks = (
                session.query(ModelExport)
                .filter(
                    ModelExport.status.in_([EnumModelExportStatus.waiting.value]),
                    ModelExport.id > last_id,
                )
                .order_by('id')
                .limit(limit)
                .all()
            )

            logger.info('任务数量：' + str(len(export_tasks)))
            if export_tasks is None or len(export_tasks) == 0:
                break

            for item in export_tasks:
                try:
                    task_name = f'me-{item.id}'
                    info = get_workflow_by_name(task_name)

                    if info:
                        try:
                            status = info['status']
                        except Exception as e:
                            logging.exception(f'info={info}', exc_info=e)
                            status = 'exception'

                        if status == 'exception':
                            continue

                        if status == 'Running':
                            continue

                        if status == 'Succeeded':
                            item.status = EnumModelExportStatus.success.value
                            item.err_msg = ''
                            session.commit()
                            continue

                        if status == 'Failed':
                            item.status = EnumModelExportStatus.failed.value
                            item.err_msg = '模型导出失败'
                            session.commit()
                            continue

                    query = session.query(Model_Version).filter(
                        Model_Version.id == item.model_version_id
                    )
                    mv = query.first()
                    if mv is None:
                        item.status = EnumModelExportStatus.failed.value
                        item.err_msg = f'模型版本id {item.model_version_id}不存在'
                        session.commit()
                        continue

                    model_service_image = mv.image.strip()
                    if model_service_image is None or len(model_service_image) == 0:
                        item.status = EnumModelExportStatus.failed.value
                        item.err_msg = '模型服务镜像为空'
                        session.commit()
                        continue

                    # 检查导出中的任务数量
                    exporting_cnt = (
                        session.query(ModelExport)
                        .filter(ModelExport.status.in_([EnumModelExportStatus.exporting.value]))
                        .count()
                    )
                    if exporting_cnt >= max_exporting_limit:
                        logger.info(
                            f'当前导出的任务数量({exporting_cnt})超过限制值 {max_exporting_limit}'
                        )
                        continue

                    workdir = '/mnt/shared/'
                    pattern = r'[^a-zA-Z0-9_.-]+'
                    model_name = ''.join(lazy_pinyin(mv.model.name))
                    model_version_name = ''.join(lazy_pinyin(mv.name))
                    model_name = re.sub(pattern, '', model_name[:20])
                    model_version_name = re.sub(pattern, '', model_version_name[:10])
                    image_name = f'taichu-{model_name}-{model_version_name}'
                    export_work_dir = f'/mnt/publish-data/{env.get_env()}/sys/model-export/'
                    if is_modelarts():
                        export_work_dir = f'obs://publish-data/{env.get_env()}/sys/model-export/'

                    ret = storage_mgr.list_objects(
                        f'{env.get_env()}/sys/model-export/{item.export_name}.tar'
                    )
                    if len(ret) == 1:
                        logger.info(f'{item.export_name}.tar 已经存在')
                        item.status = EnumModelExportStatus.success.value
                        item.err_msg = ''
                        session.commit()
                        continue

                    cfg = {
                        'LicenseServer': {
                            'Name': 'taichu-license-server',
                        },
                        'ModelService': {
                            'Name': f'{image_name}',
                            'ImageFile': 'model-service.tar',
                            'Volumes': [],
                        },
                    }

                    def get_basename(path):
                        if path is None:
                            return None
                        if len(path) == 0:
                            return None

                        basename = os.path.basename(path)
                        if len(basename) > 0:
                            return basename

                        return os.path.basename(os.path.dirname(path))

                    model_path = mv.get_model_path()
                    model_path_basename = get_basename(model_path)
                    if model_path:
                        container_path = model_path.strip()
                        container_path = container_path.replace('obs://', '/mnt/')
                        cfg['ModelService']['Volumes'].append(
                            {'HostPath': model_path_basename, 'ContainerPath': container_path}
                        )
                    template_yaml = copy.deepcopy(MODEL_EXPORT_TEMPLATE.to_dict())

                    template_yaml['metadata']['name'] = task_name

                    # 写入config.yaml
                    yaml_cfg = yaml.dump(cfg)
                    b64_cfg = base64.b64encode(yaml_cfg.encode('utf-8'))

                    command = [
                        'sh',
                        '-c',
                        (
                            f'mkdir -p {workdir} && cd {workdir} && echo {b64_cfg.decode()} |'
                            f' base64 -d  >> config.yaml'
                        ),
                    ]
                    template_yaml['spec']['templates'][1]['container']['command'] = command

                    tar_args = [
                        f'--workdir={workdir}',
                        f'--add_file={cfg["ModelService"]["ImageFile"]}',
                        '--add_file=config.yaml',
                        f'--add_file={export_work_dir}license-server-arm64.tar',
                        f'--add_file={export_work_dir}license-server-amd64.tar',
                        f'--add_file={export_work_dir}run.py',
                        f'--add_file={export_work_dir}install.sh',
                        f'--output={export_work_dir}{item.export_name}.tar',
                        f'--add_image={model_service_image}',
                    ]

                    if model_path:
                        tar_args.append(f'--add_dir={model_path}')

                    template_yaml['spec']['templates'][2]['container']['args'] = tar_args
                    run_workflow_by_yaml(template_yaml)
                    item.status = EnumModelExportStatus.exporting.value
                    item.err_msg = ''

                except Exception as e:
                    logger.exception('exception', exc_info=e)

            logger.info(f'处理成功{len(export_tasks)}条记录')
            session.commit()

            if len(export_tasks) < limit:
                break
            last_id = export_tasks[-1].id


@celery_app.task(name='task.sync_notebook_status', bind=True)
def sync_notebook_status(task):
    logger.info(task)

    last_id = 0
    limit = 100
    # 分页查询遍历数据库
    while True:
        with session_scope() as session:
            tasks = (
                session.query(Notebook)
                .filter(
                    Notebook.status.in_([EnumNotebookStatus.deploying.value]),
                    Notebook.id > last_id,
                )
                .order_by('id')
                .limit(limit)
                .all()
            )

            logger.info('任务数量：' + str(len(tasks)))
            if tasks is None or len(tasks) == 0:
                break

            for item in tasks:
                with log_exception:
                    k8s_client = py_k8s.K8s()
                    namespace = conf.get('NOTEBOOK_NAMESPACE')
                    pods = k8s_client.get_pods(namespace=namespace, pod_name=repr(item))
                    status = pods[0]['status']
                    logger.info(f'pod状态：{status}')
                    is_ready = False
                    with ignore_exception:
                        is_ready = pods[0]['status_more']['container_statuses'][0]['ready']

                    if status == 'Running' and is_ready:
                        item.status = EnumNotebookStatus.running.value
                        item.running_start_time = datetime.datetime.now()
                    elif status == 'Failed':
                        item.status = EnumNotebookStatus.failed.value

            logger.info(f'处理成功{len(tasks)}条记录')
            session.commit()

            if len(tasks) < limit:
                break
            last_id = tasks[-1].id


# 补偿modelarts服务，将modelarts运行中的服务状态同步到本地数据库
@celery_app.task(name='task.compensate_modelarts_service', bind=True)
def compensate_modelarts_service(task):
    logger.info(task)

    for results in ma_client.get_service_list(status=ServiceStatus.running.value):
        if results is None or len(results) == 0:
            logger.info('未发现运行中的服务')
            return
        logger.info('发现运行中的服务数量：' + str(len(results)))
        for item in results:
            ma_id = item.get('service_id')
            logger.info(f'正在处理服务,id:{ma_id}')
            if ma_id is None or len(ma_id) == 0:
                logger.error(f'服务id为空，id:{ma_id}')
                continue
            try:
                with session_scope() as session:
                    session.query(Service).filter(Service.ma_id == ma_id).update(
                        {'status': ServiceStatus.running.value, 'error_msg': ''}
                    )
            except Exception as e:
                logger.error(f'compensate_modelarts_service 异常，id:{ma_id}')
                logger.error(f'{repr(e)}\n{traceback.format_exc()}')


# 同步modelarts服务部署状态到model/model_version表
@celery_app.task(name='task.sync_modelarts_model', bind=True)
def sync_modelarts_model(task):
    logger.info(task)
    last_id = 0
    limit = 100

    # 分页查询遍历数据库
    while True:
        with session_scope() as dbsession:
            version = (
                dbsession.query(Model_Version)
                .filter(
                    Model_Version.status == EnumModelStatus.importing.value,
                    Model_Version.id > last_id,
                )
                .order_by('id')
                .limit(limit)
                .all()
            )

            if version is None or len(version) == 0:
                break

            if is_modelarts():
                for item in version:
                    if item.ma_id is None or len(item.ma_id) == 0:
                        continue
                    try:
                        info = get_model_info(item.ma_id)
                        cur_status = EnumModelStatus[info.get('model_status')]
                        item.status = cur_status.value
                        model_size = info.get('model_size', 0)
                        item.size = hum_convert(model_size)
                    except Exception as e:
                        logger.error(f'modelarts服务异常，模型版本id:{item.ma_id}')
                        logger.error(e)

            logger.info(f'处理成功{len(version)}条记录')
            dbsession.commit()
        if len(version) < limit:
            break
        last_id = version[-1].id


# 回收modelarts 在线服务资源
@celery_app.task(name='task.recycle_modelarts_service', bind=True)
def recycle_modelarts_service(task):
    logger.info(task)

    service_ttl = MODELARTS_TASKS_CFGS.get('service_ttl', 5)
    logger.info(f'清理{service_ttl}天前的服务')

    # 清理1天前的服务
    last_time = datetime.datetime.now() - datetime.timedelta(days=service_ttl)
    for service_ids in scan_table(
        Service,
        filters=(
            Service.status.in_([ServiceStatus.stopped.value]),
            sql_func.length(Service.ma_id) > 0,
            Service.changed_on < last_time,
        ),
    ):
        logger.info(f'找到{len(service_ids)}条需要清理的服务')

        with session_scope() as dbsession:
            srvs = dbsession.query(Service).filter(Service.id.in_(service_ids)).all()
            for srv in srvs:
                if srv.ma_id is None:
                    continue
                if len(srv.ma_id) == 0:
                    continue

                try:
                    ma_client.delete_service(srv.ma_id)
                    logger.info(f'删除服务成功，id:{srv.ma_id}')
                    srv.ma_id = ''
                except modelarts.exception.apig_exception.APIGException as e:
                    err_code, _ = parse_apig_exception(e)
                    if err_code == 'ModelArts.3502':  # 如果服务不存在，则忽略报错
                        logger.info(f'服务已被删除，id:{srv.ma_id}')
                        srv.ma_id = ''
                    else:
                        logger.error(e)
                except Exception as e:
                    logger.error(e)

            dbsession.commit()

    logger.info('回收modelarts服务资源完成')


# 分页扫描全表
def scan_table(model, filters=None, page_size=100):
    last_id = 0
    # 分页查询遍历数据库
    while True:
        with session_scope() as dbsession:
            q = dbsession.query(model.id).filter(model.id > last_id)

            if filters is not None:
                q = q.filter(*filters) if isinstance(filters, tuple) else q.filter(filters)

            items = q.order_by('id').limit(page_size).all()
            if items is None or len(items) == 0:
                break

            ids = []
            for item in items:
                ids.append(item.id)

            yield ids

        if len(items) < page_size:
            break
        last_id = items[-1].id


# 数据回流
@celery_app.task(name='task.dataset_flow_back', bind=True)
def dataset_flow_back(task):
    model_type_to_run = [EnumModelType.multiple_rounds_of_text_question_answer.value]
    with session_scope() as dbsession:
        for model_type in model_type_to_run:
            fun_to_run = globals().get(EnumModelType.get_name(model_type))
            if not fun_to_run:
                logging.error(
                    f'未找到对应的回流处理函数，模型类型：{EnumModelType.get_name(model_type)}'
                )
                continue
            dataset_flow_back_runner(dbsession, model_type)


def dataset_flow_back_runner(dbsession, model_type):
    # 获取正在运行的需要回流数据的服务
    services = (
        dbsession.query(Service)
        .filter(Service.status == 'running')
        .filter(Service.model.has(type=model_type))
        .all()
    )
    for srv in services:
        auto_learning_task = (
            dbsession.query(AutoLearning).filter(AutoLearning.infer_service_id == srv.id).first()
        )
        if auto_learning_task:
            try:
                conf = json.loads(auto_learning_task.config)
                if not conf.get('flow_back', {}).get('enabled', True):
                    continue
            except Exception:
                pass
        else:
            continue
        logging.info(f'开始回流数据，服务id:{srv.id}')
        creator = srv.created_by.username
        now = datetime.datetime.now()
        checkout_day = now - datetime.timedelta(days=0)
        checkout_day_str = checkout_day.strftime('%Y-%m-%d')
        last_checkout_day_str = RedisClient.get(f'service_{srv.id}_last_checkout_day')
        base_query = (
            "select {} from unified_log_taichu_infer_proxy WHERE app_name = 'taichu-infer-proxy'"
            " AND visitParamExtractString(action_params,'service_id') ='{}' AND day <= '{}'"
        )
        count_query = base_query.format('count(1)', srv.id, checkout_day_str)
        result_query = base_query.format(
            'id, action_params.request_content as req, action_params.response_content as resp',
            srv.id,
            checkout_day_str,
        )
        # 数据范围(last_checkout_day, today)
        if last_checkout_day_str:
            print(last_checkout_day_str)
            where_clause = f" AND day > '{last_checkout_day_str}'"
            count_query += where_clause
            result_query += where_clause
        total = get_result_from_clickhouse(count_query)
        if not total:
            continue
        total = int(total[0]['count()'])
        if total < 100:
            if last_checkout_day_str:
                last_checkout_day = datetime.datetime.strptime(last_checkout_day_str, '%Y-%m-%d')
            else:
                last_checkout_day = auto_learning_task.created_on
            diff_days = now - last_checkout_day
            if diff_days.days < 8:
                logging.info(f'发现数据{total}条，距离上次生成过去{diff_days.days}天，跳过')
                continue
        logging.info(f'发现数据{total}条，达到100条或者时间满一周开始生成数据集')
        results = get_result_from_clickhouse(result_query)
        fun_to_run = globals().get(EnumModelType.get_name(model_type))
        new_path = fun_to_run(results, creator)
        if not new_path:
            logging.info('无有效数据')
            continue
        # 生成数据集
        now = datetime.datetime.now()
        checkout_day = now - datetime.timedelta(days=1)
        config_map = {
            EnumModelType.multiple_rounds_of_text_question_answer.value: {
                'data_type': 'txt',
                'label_type': 'Multiple Rounds of Text Question Answer',
            },
            EnumModelType.multiple_rounds_of_visual_question_answer.value: {
                'data_type': 'multiple',
                'label_type': 'Multiple Rounds of Visual Question Answer',
            },
        }
        new_dataset = Dataset(
            name=f'{auto_learning_task.task_name}-回流数据-{checkout_day.strftime("%Y%m%d")}-{id_generator()}',
            describe=auto_learning_task.task_name,
            dataset_path=new_path,
            labeled=1,
            label_type=config_map.get(model_type).get('label_type'),
            data_type=config_map.get(model_type).get('data_type'),
            storage_size='',
            source='flowback',
            status='succeed',
            upload_type='files',
            owner=creator,
            created_on=now,
            changed_on=now,
        )
        new_dataset.created_by_fk = auto_learning_task.created_by_fk
        new_dataset.changed_by_fk = auto_learning_task.changed_by_fk
        dbsession.add(new_dataset)
        dbsession.commit()
        RedisClient.set(f'service_{srv.id}_last_checkout_day', checkout_day_str, expire=None)
        logging.info(f'回流数据集：id-{new_dataset.id}，name-{new_dataset.name}')


def multiple_rounds_of_text_question_answer(results, username):
    result = []
    if not results:
        return None
    for row in results:
        try:
            req = json.loads(row.get('req'))
            resp = parse_answer_from_output(row.get('resp'))
            resp = json.loads(resp)
        except Exception:
            continue
        # if 'input_text' not in req or 'full_context' not in resp:
        #     continue
        question = req['input_text']
        if resp:
            answer = resp['full_context'].split('###答案：')[-1]
        else:
            answer = ''
        result.append(
            {
                'id': row.get('id'),
                'conversations': [
                    {'from': 'question', 'value': question},
                    {'from': 'answer', 'value': answer},
                ],
            }
        )
    if len(result) == 0:
        return None
    new_path = f'{username}/unpack/{int(time.time())}/'
    try:
        result_str = '\n'.join(
            [json.dumps(item, ensure_ascii=False, indent=None) for item in result]
        )
        storage_mgr.put_object(f'{new_path}result.json', result_str)
    except Exception:
        pass
    logging.info(f'生成文件：{new_path}')
    return new_path


def parse_answer_from_output(resp):
    resp_split = resp.split('\n')
    for line in resp_split:
        if 'full_context' in line:
            return line


def get_result_from_clickhouse(query):
    CH_URL = 'http://service-ch.logging:28123'
    CH_USER = 'default'
    CH_PASSWORD = '1qaz2wsx#'
    headers = {}
    headers['X-ClickHouse-User'] = CH_USER
    headers['X-ClickHouse-Key'] = CH_PASSWORD
    params = {
        'database': 'analytics',
        'query': query + ' FORMAT JSON',
        'session_id': 'my-session-id-string',
    }
    try:
        response = requests.post(url=CH_URL, params=params, headers=headers)
        if response.status_code != 200:
            logging.error(f'请求clickhouse失败: {response.text}')
            return None
        content = json.loads(response.content.decode('utf-8'))
        return content.get('data')
    except Exception as e:
        logging.error(e)


@celery_app.task(name='task.count_user_space', bind=True)
def count_user_space(task):
    logger.info('task count_user_space start')
    objects = StorageMgrFactory.region('default').list_objects('', delimiter='/')
    for o in objects:
        if not o.get('is_dir'):
            continue
        username = o['name'].rstrip('/')
        if username in ['train_code', 'train_data']:
            continue
        size = get_path_size(o['name'], 'default')
        RedisClient.set(f'user_space_size_{username}', size, expire=86400)


# @celery_app.task(name='task.init_user_storage_size', bind=True)
# def init_user_storage_size(task):
#     logger.info('task init_user_storage_size start')
#     if not is_private():
#         return
#     offset = 0
#     page_size = 100
#     with session_scope() as session:
#         q = session.query(MyUser).order_by(MyUser.id.asc())
#         while True:
#             users = q.offset(offset).limit(page_size).all()
#             for user in users:
#                 user_attribute = session.query(UserAttribute).filter_by(user_id=user.id).first()
#                 if user_attribute:
#                     continue
#                 user_attribute = UserAttribute()
#                 user_attribute.user_id = user.id
#                 user_attribute.storage_size = DEFAULT_STORAGE_SIZE
#                 session.add(user_attribute)
#                 session.commit()
#                 if os.path.exists(get_private_path(user.username)):
#                     juicefs_quota_set(user.username, DEFAULT_STORAGE_SIZE)
#
#             if len(users) < page_size:
#                 break
#             offset += page_size
@celery_app.task(name='task.init_user_storage_size', bind=True)
def init_user_storage_size(task):
    """初始化用户存储size数据，支持多区域"""
    logger.info('[init_user_storage_size] task start ...')
    if not is_private():
        return
    offset = 0
    page_size = 100
    with session_scope() as session:
        q = session.query(MyUser).order_by(MyUser.id.asc())
        while True:
            users = q.offset(offset).limit(page_size).all()
            logging.info(f'[init_user_storage_size] get users len: {len(users)}')
            for user in users:
                logging.info(f'[init_user_storage_size] region list: {get_region_keys()}')
                for region_key in get_region_keys():
                    user_attribute = (
                        session.query(UserAttribute)
                        .filter(UserAttribute.region == region_key)
                        .filter_by(user_id=user.id)
                        .first()
                    )
                    logging.info(f'[init_user_storage_size] user_attribute: {user_attribute}')
                    if user_attribute:
                        continue
                    user_attribute = UserAttribute()
                    user_attribute.user_id = user.id
                    user_attribute.region = region_key
                    user_attribute.storage_size = DEFAULT_STORAGE_SIZE
                    session.add(user_attribute)
                    session.commit()
                    if os.path.exists(get_private_path(user.username)):
                        juicefs_quota_set(user.username, DEFAULT_STORAGE_SIZE)

            if len(users) < page_size:
                break
            offset += page_size
    logger.info('[init_user_storage_size] task finish ...')


# 清理失败的任务，删除对应的k8s资源
@celery_app.task(name='task.clear_failed_service', bind=True)
def clear_failed_service(task):
    task_id = uuid.uuid4()
    begin_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    logging.info(f'{task_id} clear_failed_service begin work {begin_time}')
    with session_scope() as dbsession:
        try:
            # 状态是失败，更新日期是今天的数据
            instances = (
                dbsession.query(InferenceService)
                .filter(
                    InferenceService.status.in_(
                        [ServiceStatus.failed.value, ServiceStatus.stopped.value]
                    ),
                    InferenceService.changed_on >= datetime.datetime.now().strftime('%Y-%m-%d'),
                )
                .all()
            )
            logging.info(f'{task_id} clear_failed_service 本次失败记录数：' + str(len(instances)))
            for ins in instances:
                # 判断是否是多机多卡运行
                expand = json.loads(ins.expand)
                if is_multi_node(resource_cfg=expand.get('specifications', {})):
                    delete_old_service(
                        ins.name, namespace=ins.namespace, region=ins.region, multi_node=True
                    )
                else:
                    delete_old_service(
                        ins.name, namespace=ins.namespace, region=ins.region, multi_node=False
                    )
                logging.info(f'{task_id} clear_failed_service work finished')
        except Exception as e:
            logging.error(
                f'{task_id} clear_failed_service cronjob run error:{repr(e)}',
                exc_info=True,
            )
    end_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    logging.info(f'{task_id} clear_failed_service end work {end_time}')


@celery_app.task(name='task.clear_dataset_tmp_files', bind=True)
def clear_dataset_tmp_files(task):
    start_time = time.time()
    # 清理临时文件
    for root, _, files in os.walk(os.path.join(SharedMntDir, OriginPathPrefix)):
        for filename in files:
            # 获取文件的最后更新时间
            timestamp = os.path.getmtime(os.path.join(root, filename))
            if datetime.datetime.now().timestamp() - int(timestamp) < 60 * 60 * 24 * 7:
                continue

            logger.info(f'[clear_dataset_tmp_files] 删除过期的文件:{filename}')
            os.remove(os.path.join(root, filename))

    # 清理merge的数据文件
    for root, dirs, _ in os.walk(os.path.join(SharedMntDir, MergePathPrefix)):
        for dir_name in dirs:
            dir_path = os.path.join(root, dir_name)
            modification_time = os.stat(dir_path).st_mtime
            current_time = time.time()
            time_difference = current_time - modification_time
            # 假设最后更新时间超过7天就删除
            if time_difference > 86400 * 7:
                try:
                    shutil.rmtree(dir_path)
                    logging.info(f'[clear_dataset_tmp_files] 已删除目录: {dir_path}')
                except Exception as e:
                    logging.info(f'[clear_dataset_tmp_files] 无法删除目录 {dir_path}，错误: {e}')

    # 获取目录下的子目录
    for name in os.listdir(SharedMntDir):
        cur = os.path.join(SharedMntDir, name)
        if os.path.isdir(cur) is False:
            continue
        # 跳过内置目录
        if name in ['sys', '__dataset', 'train_code', 'train_data']:
            continue
        if name.startswith('__'):
            continue

        # 清理dataset目录下的文件
        dataset_path = os.path.join(cur, 'dataset')
        if os.path.isdir(dataset_path):
            for sub_dir in os.listdir(dataset_path):
                with session_scope() as session:
                    with log_exception:
                        dataset_id = int(sub_dir)
                        dataset = session.query(Dataset.id).filter_by(id=dataset_id).first()
                        if dataset is None:
                            db_dataset_path = os.path.join(name, 'dataset', sub_dir)
                            db_dataset_path = (
                                db_dataset_path
                                if db_dataset_path.endswith('/')
                                else db_dataset_path + '/'
                            )
                            d = (
                                session.query(Dataset.id)
                                .filter_by(dataset_path=db_dataset_path)
                                .first()
                            )
                            if d is None:
                                path_to_delete = os.path.join(dataset_path, sub_dir)
                                logger.info(
                                    f'[clear_dataset_tmp_files] 删除不存在的目录: {path_to_delete}'
                                )
                                shutil.rmtree(path_to_delete)

        # 清理eval_dataset目录下面的文件
        dataset_path = os.path.join(cur, 'eval_dataset')
        if os.path.isdir(dataset_path):
            for sub_dir in os.listdir(dataset_path):
                with session_scope() as session:
                    with log_exception:
                        dataset_id = int(sub_dir)
                        dataset = session.query(EvalDataset.id).filter_by(id=dataset_id).first()
                        if dataset is None:
                            db_dataset_path = os.path.join(name, 'eval_dataset', sub_dir)
                            db_dataset_path = (
                                db_dataset_path
                                if db_dataset_path.endswith('/')
                                else db_dataset_path + '/'
                            )
                            d = (
                                session.query(EvalDataset.id)
                                .filter_by(dataset_path=db_dataset_path)
                                .first()
                            )
                            if d is None:
                                path_to_delete = os.path.join(dataset_path, sub_dir)
                                logger.info(
                                    f'[clear_dataset_tmp_files] 删除不存在的目录: {path_to_delete}'
                                )
                                shutil.rmtree(path_to_delete)
    end_time = time.time()
    logger.info(f'[clear_dataset_tmp_files] time used: {end_time - start_time} s')


@celery_app.task(name='task.clear_train_model_files', bind=True)
def clear_train_model_files(task):
    start_time = time.time()
    # 获取目录下的子目录
    for name in os.listdir(SharedMntDir):
        cur = os.path.join(SharedMntDir, name)
        if os.path.isdir(cur) is False:
            continue
        # 跳过内置目录
        if name in ['sys', '__dataset', 'train_code', 'train_data']:
            continue
        if name.startswith('__'):
            continue

        # 清理jobs目录下的目录
        dataset_path = os.path.join(cur, 'jobs')
        logger.info(f'[clear_train_model_files] 当前扫描目录: {dataset_path}')
        if os.path.isdir(dataset_path):
            for sub_dir in os.listdir(dataset_path):
                with session_scope() as session:
                    with log_exception:
                        model_record = (
                            session.query(Model_Version)
                            .filter(
                                or_(
                                    Model_Version.path.like(f'%{sub_dir}%'),
                                    Model_Version.merge_path.like(f'%{sub_dir}%'),
                                )
                            )
                            .first()
                        )
                        if model_record is None:
                            try:
                                model_loc_path = os.path.join(dataset_path, sub_dir)
                                logger.info(
                                    f'[clear_train_model_files] 删除目录: {model_loc_path}'
                                )
                                delete_local_dir(local_path=model_loc_path)
                            except Exception as e:
                                logger.warning(
                                    f'[clear_train_model_files] 无法删除目录 {model_loc_path}，错误: {e}'
                                )
                        else:
                            logger.info(
                                f'[clear_train_model_files] {sub_dir} 找到记录: {model_record.id}'
                            )
    end_time = time.time()
    logger.info(f'[clear_train_model_files] time used: {end_time - start_time} s')


if __name__ == '__main__':
    dataset_flow_back()
    # clear_train_model_files()
