#Copyright (c) 2025, Alibaba Cloud and its affiliates;
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at

#   http://www.apache.org/licenses/LICENSE-2.0

#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.



import os
import time
import yaml
import shutil
from datetime import datetime

from kubernetes import client, utils
from kubernetes import config as kubeconfig

import fce_util.libs.localrun as localrun
from fce_util.libs import log as loggers

logger = loggers.g_logger.logger

yamlFiles_dir = "/usr/local/yamlFiles"

defaultCM = {

}

defaultPVC = {
    'VolumeSize': '50Gi',
    'VolumeMode': 'Filesystem',
    'AccessMode': 'ReadWriteOnce',
    'StorageClassName': 'alicloud-disk-topology-alltype'
}

logtailEnvArray = [
    {
        'name': 'user_config_file_path',
        'value': '/etc/ilogtail/checkpoint/user_log_config.json'
    },
    {
        'name': 'docker_file_cache_path',
        'value': '/etc/ilogtail/checkpoint/docker_path_config.json'
    },
    {
        'name': 'check_point_filename',
        'value': '/etc/ilogtail/checkpoint/logtail_check_point'
    },
    {
        'name': 'check_point_dump_interval',
        'value': '60'
    },
    {
        'name': 'buffer_file_path',
        'value': '/etc/ilogtail/checkpoint'
    },
]

class genYaml:
    def cm(self, **kwargs):
        yamlTempPath = getYamlTempPath('cm')
        with open(yamlTempPath, 'r') as file:
            config = yaml.safe_load(file)
        status, result = _getCMYamlObject(config, **kwargs)
        return result

    def sc(self, **kwargs):
        yamlTempPath = getYamlTempPath('sc')
        with open(yamlTempPath, 'r') as file:
            config = yaml.safe_load(file)
        status, result = _getSCYamlObject(config, **kwargs)
        return result
    
    def pvc(self, **kwargs):
        yamlTempPath = getYamlTempPath('pvc')
        with open(yamlTempPath, 'r') as file:
            config = yaml.safe_load(file)
        status, result = _getPVCYamlObject(config, **kwargs)
        return result

    def svc(self, **kwargs):
        yamlTempPath = getYamlTempPath('svc')
        with open(yamlTempPath, 'r') as file:
            config = yaml.safe_load(file)
        status, result = _getSVCYamlObject(config, **kwargs)
        return result

    def ray_cluster(self, **kwargs):
        yamlTempPath = getYamlTempPath('rc')
        with open(yamlTempPath, 'r') as file:
            config = yaml.safe_load(file)
        status, result = _getDeploymentYamlObjectForRC(config, 'raycluster', **kwargs)
        return result

class ResourceExist:
    def cm(self, name, namespace = 'default'):
        is_exist, _ = _getConfigMapStatus(name, namespace)
        return is_exist

    def sc(self, name, namespace = 'default'):
        is_exist, _ = _getSCStatus(name, namespace)
        return is_exist

    def pvc(self, name, namespace = 'default'):
        is_exist, _ = _getPVCStatus(name, namespace)
        return is_exist
    
    def svc(self, name, namespace = 'default'):
        is_exist, _ = _getSVC(name, namespace)
        return is_exist
    
    def rc(self, name, namespace = 'default'):
        is_exist, _ = _checkRCExist(name, namespace)
        return is_exist
    
def _getDeploymentStatus(deployment_name, namespace = 'default'):
    kubeconfig.load_kube_config()
    apps_v1 = client.AppsV1Api()
    try: 
        deployment = apps_v1.read_namespaced_deployment(name=deployment_name, namespace=namespace)
        status = deployment.status
        logger.info(f'describe deployment {deployment_name}: {status}')
        if status and status.available_replicas is not None and status.replicas is not None and status.replicas == status.available_replicas and status.available_replicas >= 1:
            return True, 'object status ready'
    except Exception as exception:
        return False, exception
    else:
        return False, 'object status not ready'  
    
def _getCustomObjectStatus(custom_obj_name, kube_config, namespace = 'default'):
    
    try: 
        '''Could not get status with the client api
        kubeconfig.load_kube_config()
        custom_api = client.CustomObjectsApi()
        group = "ray.io"
        version = "v1alpha1"
        #version = "v1"
        plural = "rayclusters"
        custom_obj = custom_api.get_namespaced_custom_object(group=group, version=version, plural=plural, name=custom_obj_name, namespace=namespace)
        logger.debug('_getCustomObject  custom_obj: {}'.format(custom_obj))
        custom_obj_status = custom_api.get_namespaced_custom_object_status(group=group, version=version, plural=plural, name=custom_obj_name, namespace=namespace)
        logger.debug('_getCustomObject2 status  custom_obj_status: {}'.format(custom_obj_status))
        '''
        cmd = "kubectl --kubeconfig={} get raycluster {} -o yaml -n {}".format(kube_config, custom_obj_name, namespace)
        ret, output = localrun.run(cmd)
        if ret:
            logger.error('_getCustomObject  cmd: {} failed, output: {}'.format(cmd, output))
            return False, output
        custom_obj_dict = yaml.safe_load(output)
        #logger.debug('_getCustomObject  custom_obj_dict: {}'.format(custom_obj_dict))
        if 'status' not in custom_obj_dict:
            logger.error('custom_obj_dict status not exist')
            return False, 'Status is None'
        obj_status = custom_obj_dict['status']
        logger.debug('_getCustomObject  custom_obj_name: {}, obj_status: {}'.format(custom_obj_name, obj_status))
        if obj_status and 'availableWorkerReplicas' in obj_status and 'desiredWorkerReplicas' in obj_status and obj_status['availableWorkerReplicas'] == obj_status['desiredWorkerReplicas'] and obj_status['availableWorkerReplicas'] >= 1:
            return True, 'object status ready'
    except Exception as exception:
        logger.debug('_getCustomObjectStatus failed, exception: {}'.format(exception))
        return False, exception
        
    return False, 'object status not ready' 
    
class ResourceStatus:
    def cm(self, name, namespace = 'default'):
        return _getConfigMapStatus(name, namespace)

    def sc(self, name, namespace = 'default'):
        return _getSCStatus(name, namespace)

    def pvc(self, name, namespace = 'default'):
        return _getPVCStatus(name, namespace)
    
    def svc(self, name, namespace = 'default'):
        return _getSVC(name, namespace)
    
    def op(self, name, namespace = 'default'):
        return _getDeploymentStatus(name, namespace)
    
    def rc(self, name, kube_config, namespace = 'default'):
        return _getCustomObjectStatus(name, kube_config, namespace)


class ResourceClean:
    def cm(self, serviceName, namespace = 'default'):
        cleanConfigMap(serviceName, namespace)

    def sc(self, serviceName, namespace = 'default'):
        cleanStorageClass(serviceName, namespace)

    def pvc(self, serviceName, namespace = 'default'):
        cleanPVC(serviceName, namespace)

    def svc(self, serviceName, namespace = 'default'):
        cleanSVC(serviceName, namespace)
    
    def rc(self, serviceName, namespace = 'default'):
        cleanRayCluster(serviceName, namespace)

genYamlObj = genYaml()
ResourceExistObj = ResourceExist()
ResourceStatusObj = ResourceStatus()
ResourceCleanObj = ResourceClean()

def _getName(clusterId, name, options = ''):
    if options == '':
        return f'{clusterId}-{name}'
    else:
        return f'{clusterId}-{name}-{options}'

def getRegionId():
    cmd = 'curl -s http://100.100.100.200/latest/meta-data/region-id'
    status, output = localrun.run(cmd)
    return output

def getYamlTempPath(object):
    return f'/usr/local/fce/bin/yaml_tmps/{object}.yaml'

def getYamlPath(clusterId, object):
    logger.info('getYamlPath cmd is {}'.format('mkdir -p {}/{}'.format(yamlFiles_dir, clusterId)))
    localrun.run('mkdir -p {}/{}'.format(yamlFiles_dir, clusterId))
    return f'{yamlFiles_dir}/{clusterId}/{object}.yaml'

def validateObject(object = 'cm'):
    validObject = [ 'cm', 'pv', 'pvc', 'sc', 'svc', 'op', 'rc' ]
    return object in validObject

def checkProperties(obj, keys = []):
  for key in keys:
    if key not in obj or obj[key] == "":
      return False
  return True

def _getMountDirectory(path):
    return f'/{path}'

def cleanConfigMap(configmap_name, namespace = 'default'):
    kubeconfig.load_kube_config()
    v1 = client.CoreV1Api()

    try:
        v1.delete_namespaced_config_map(configmap_name, namespace)
    except Exception as exception:
       logger.error(f'clean configMap: {configmap_name} error: {exception}')
    else:
       logger.info(f'clean configMap: {configmap_name} successful...')

def is_sc_bound_to_pv(sc_name):
    kubeconfig.load_kube_config()
    v1 = client.CoreV1Api()
    pv_list = v1.list_persistent_volume()

    for pv in pv_list.items:
        if pv.spec.storage_class_name == sc_name:
            logger.info('sc: {sc_name} bound to {pv.metadata.name}')
            return True
    return False

def cleanStorageClass(sc_name, namespace = 'default'):
    kubeconfig.load_kube_config()
    v1 = client.StorageV1Api()
    v1.delete_storage_class(sc_name)
    logger.info(f'clean sc: {sc_name} successful...')

def cleanPVC(pvc_name, namespace = 'default'):
    kubeconfig.load_kube_config()
    v1 = client.CoreV1Api()
    v1.delete_namespaced_persistent_volume_claim(pvc_name, namespace)
    logger.info(f'clean pvc: {pvc_name} successful...')

def cleanSVC(svc_name, namespace = 'default'):
    kubeconfig.load_kube_config()
    v1 = client.CoreV1Api()
    v1.delete_namespaced_service(svc_name, namespace)
    logger.info(f'clean svc: {svc_name} successful...')

def cleanRayCluster(cluster_name, namespace = 'default'):
    kubeconfig.load_kube_config()
    custom_api = client.CustomObjectsApi()
    group = "ray.io"
    version = "v1"
    plural = "rayclusters"
    custom_api.delete_namespaced_custom_object(group=group, version=version, plural=plural, name=cluster_name, namespace=namespace)
    logger.info(f'clean raycluster: {cluster_name} successful...')

def clean(object, serviceName, namespace = 'default'):
    logger.info(f'start clean {object}: {serviceName}...')
    func = getattr(ResourceCleanObj, object)
    func(serviceName, namespace)

def _getConfigMapStatus(config_map_name, namespace = 'default'):
    kubeconfig.load_kube_config()
    v1 = client.CoreV1Api()
    try: 
        config_map = v1.read_namespaced_config_map(name=config_map_name, namespace=namespace)
    except Exception as exception:
        return False, exception
    else:
        return True, config_map

def _getSCStatus(sc_name, namespace = 'default'):
    kubeconfig.load_kube_config()
    v1 = client.StorageV1Api()
    try: 
        sc = v1.read_storage_class(name = sc_name)
    except Exception as exception:
        return False, exception
    else:
        return True, sc
        
def _getPVCStatus(pvc_name, namespace = 'default'):
    kubeconfig.load_kube_config()
    v1 = client.CoreV1Api()
    try: 
        pvc = v1.read_namespaced_persistent_volume_claim(name=pvc_name, namespace=namespace)
        pvc_status = pvc.status.phase
    except Exception as exception:
        return False, exception
    else:
        return True, pvc_status

def _getSVC(svc_name, namespace = 'default'):
    kubeconfig.load_kube_config()
    v1 = client.CoreV1Api()
    try: 
        service = v1.read_namespaced_service(name=svc_name, namespace=namespace)
        cluster_ip = service.spec.cluster_ip
    except Exception as exception:
        return False, exception
    else:
        return True, cluster_ip
    
def _checkRCExist(rc_name, namespace = 'default'):
    kubeconfig.load_kube_config()
    custom_api = client.CustomObjectsApi()
    try: 
        group = "ray.io"
        #version = "v1alpha1"
        version = "v1"
        plural = "rayclusters"
        custom_obj = custom_api.get_namespaced_custom_object(group=group, version=version, plural=plural, name=rc_name, namespace=namespace)
        if custom_obj:
            logger.debug('_getCustomObject  custom_obj: {}'.format(custom_obj))
            return True, custom_obj
        else:
            logger.debug('custom_obj not exist')
            return False, 'custom_obj not exist'
    except Exception as exception:
        logger.error(f'checkRCExist failed: {exception}')
        return False, exception
    
def getEnvArray(cmName, params = []):
    res = []
    try:
        for tmp in params:
            res.append({
                'name': tmp,
                'valueFrom': {
                    'configMapKeyRef': {
                        'name': cmName,
                        'key': tmp
                        },
                    },
                })
    except Exception as e:
        logger.error(f'get env failed: {e}')
        return res
    
    res.extend([{ 
        'name': 'NODE_NAME', 
        'valueFrom': {
            'fieldRef': {
                'fieldPath': 'spec.nodeName'
            }
        } 
    },
    { 
        'name': 'POD_NAME', 
        'valueFrom': {
            'fieldRef': {
                'fieldPath': 'metadata.name'
            }
        }
    }])
    return res

def checkServiceStatus(name, object = 'cm', namespace = 'default', times = 60, interval = 1, kube_config = '/root/.kube/config'):
    if not validateObject(object):
        logger.error(f'服务{name}类型不支持')
        return False

    result = False

    try:
        while not result and times > 0:
            #logger.debug("start,result {} time is {}".format(result, times))
            times = times - 1
            func = getattr(ResourceStatusObj, object)
            if object == 'rc':
                result, res = func(name, kube_config, namespace)
                #logger.debug(f'rc status check, result:{result}, response: {res}')
            else:
                result, res = func(name, namespace)
            if not result:
                logger.info(f'轮巡等待服务{name}启动，当前状态: {res}, times:{times}, interval:{interval}')
                time.sleep(interval)
                #logger.debug("after sleep,result {}".format(result))
    except Exception as e:
        logger.error(f'check service status failed: {e}')
        return False
    return result

def apply_from_single_file(yaml_file):
    kubeconfig.load_kube_config()
    #kubeconfig.load_kube_config(kube_config_path='/path/to/your/kubeconfig')
    k8s_client = client.ApiClient()
    utils.create_from_yaml(k8s_client, yaml_file, verbose=False)

def _getCMYamlObject(config, **kwargs):
    cmKeys = ["Name", "ClusterId", "Namespace"]


    if not checkProperties(kwargs, cmKeys):
       logger.error('invalid configMap args')
       return False, {}

    config['metadata']['name'] = kwargs['Name']
    config['metadata']['namespace'] = kwargs['Namespace']
    config['data']['ClusterId'] = kwargs['ClusterId']
    #config['data']['DBEndpoint'] = kwargs['DBEndpoint']
    #config['data']['DBPort'] = kwargs['DBPort']

    return True, config

def _getSCYamlObject(config, **kwargs):
    pvcKeys = [ "Name", "RegionId", "ZoneId", "VpcId", "VswitchId" ]

    if not checkProperties(kwargs, pvcKeys):
       logger.error('invalid sc args')
       return False, {}

    config['metadata']['name'] = kwargs['Name']
    config['parameters']['regionId'] = kwargs['RegionId']
    config['parameters']['zoneId'] = kwargs['ZoneId']
    config['parameters']['vpcId'] = kwargs['VpcId']
    config['parameters']['vSwitchId'] = kwargs['VswitchId']
    return True, config

def _getPVCYamlObject(config, **kwargs):
    pvcKeys = [ "Name", "VolumeSize", "AccessMode", "StorageClassName" ]

    if not checkProperties(kwargs, pvcKeys):
       logger.error('invalid pvc args')
       return False, {}

    config['metadata']['name'] = kwargs['Name']
    config['spec']['storageClassName'] = kwargs['StorageClassName']
    config['spec']['accessModes'][0] = kwargs['AccessMode']
    if 'VolumeMode' in kwargs:
        config['spec']['volumeMode'] = kwargs['VolumeMode']
    config['spec']['resources']['requests']['storage'] = kwargs['VolumeSize']
    return True, config

def _getSVCYamlObject(config, **kwargs):
    svcKeys = [ "Name" ]

    if not checkProperties(kwargs, svcKeys):
       logger.error('invalid pvc args')
       return False, {}

    config['metadata']['name'] = kwargs['Name']
    config['spec']['selector']['fce.clusterId'] = kwargs['ClusterId']
    return True, config

def _update_env_param_list(env_list, env, key):
    
    for i, d in enumerate(env_list):
        if d.get(key.lower()) == env.get(key):
            lowercase_env = {k.lower(): v for k, v in env.items()}
            env_list[i] = lowercase_env
            #del env_list[i]
            
            return
    
    env_list.append({
            'name': env['Name'],
            'value': env['Value'],
        })
    
def _getDeploymentYamlObjectForRC(config, object, **kwargs):
    deploymentKeys = [ "Name", "ClusterId", "Namespace", "HeadImage", "HeadCPU", "HeadMem", "HeadNodeTags", "HeadNodeEnvs", 
                       "WorkerGroupName", "WorkerImage", "WorkerCPU", "WorkerMem", "WorkerNodeTags", "WorkerNodeEnvs", "WorkerReplicas" ]

    if not checkProperties(kwargs, deploymentKeys):
        logger.error('invalid deployment args')
        return False, {}
    rc_config = config['items'][0]

    rc_config['metadata']['name'] = kwargs['Name']
    rc_config['metadata']['namespace'] = kwargs['Namespace']
    #rc_config['metadata']['annotations']['meta.helm.sh/release-namespace'] = kwargs['Namespace']
    rc_config['metadata']['labels']['app.clusterId'] = kwargs['ClusterId']
    #config['spec']['replicas'] = kwargs['Replicas']

    rc_config['spec']['headGroupSpec']['template']['metadata']['labels']['app.clusterId'] = kwargs['ClusterId']
    head_spec = rc_config['spec']['headGroupSpec']['template']['spec']
    nodeSelectTerms = head_spec['affinity']['nodeAffinity']['requiredDuringSchedulingIgnoredDuringExecution']['nodeSelectorTerms']
    logger.debug(f'nodeSelectTerms: {nodeSelectTerms}')
    if len(kwargs['HeadNodeTags']) == 0:
        del head_spec['affinity']
    else:
        nodeSelectTerms[0]['matchExpressions'] = []
        for tag in kwargs['HeadNodeTags']:
            nodeSelectTerms[0]['matchExpressions'].append({
                'key': tag['Key'],
                'operator': tag['Operator'],
                'values': tag['Values'],
            })
    logger.debug(f'nodeSelectTerms after: {nodeSelectTerms}')
    head_spec['containers'][0]['name'] = 'ray-head'
    head_spec['containers'][0]['image'] = kwargs['HeadImage']
    head_spec['containers'][0]['resources']['limits'] = {
        'cpu': str(kwargs['HeadCPU']),
        'memory': kwargs['HeadMem'],
    }
    head_spec['containers'][0]['resources']['requests'] = {
        'cpu': str(kwargs['HeadCPU']),
        'memory': kwargs['HeadMem'],
    }
    cmParams = [
        'ClusterId', 'PIP_INDEX_URL',
    ]
    envParams = getEnvArray(_getName(kwargs['ClusterId'], 'cm'), cmParams)
    envParams.append({ 
        'name': 'Role', 
        'value': 'HeadNode' 
        })
    logger.debug('envParams: {};  HeadNodeEnvs: {}'.format(envParams, kwargs['HeadNodeEnvs']))
    for env in kwargs['HeadNodeEnvs']:
        _update_env_param_list(envParams, env, 'Name')
    logger.debug('after envParams: {};  HeadNodeEnvs: {}'.format(envParams, kwargs['HeadNodeEnvs']))
    head_spec['containers'][0]['env'] = envParams
    #logger.debug(f'head_spec: {head_spec}')
    '''
    head_spec['containers'][0]['volumeMounts'] = [{
        'mountPath': /data,
        'name': pvc-nas,
        'subPath': headnode,
    }]
    #defaultPath = 'opt'
    volumes = {
        'name': clusterid-nas,
        'persistentVolumeClaim': {
            'claimName': pvc-nas,
        },
    }
    head_spec['volumes'] = [ volumes ]
    '''
    #Support only one worker group currently, TBD
    rc_config['spec']['workerGroupSpecs'][0]['template']['metadata']['labels']['app.clusterId'] = kwargs['ClusterId']
    rc_config['spec']['workerGroupSpecs'][0]['groupName'] = kwargs['WorkerGroupName']
    rc_config['spec']['workerGroupSpecs'][0]['maxReplicas'] = kwargs['WorkerReplicas']
    rc_config['spec']['workerGroupSpecs'][0]['replicas'] = kwargs['WorkerReplicas']
    worker_spec = rc_config['spec']['workerGroupSpecs'][0]['template']['spec']
    nodeSelectTerms = worker_spec['affinity']['nodeAffinity']['requiredDuringSchedulingIgnoredDuringExecution']['nodeSelectorTerms']
    if len(kwargs['WorkerNodeTags']) == 0:
        del worker_spec['affinity']
    else:
        nodeSelectTerms[0]['matchExpressions'] = []
        for tag in kwargs['WorkerNodeTags']:
            nodeSelectTerms[0]['matchExpressions'].append({
                'key': tag['Key'],
                'operator': tag['Operator'],
                'values': tag['Values'],
            })
    worker_spec['containers'][0]['name'] = 'ray-worker'
    worker_spec['containers'][0]['image'] = kwargs['WorkerImage']
    worker_spec['containers'][0]['resources']['limits'] = {
        'cpu': str(kwargs['WorkerCPU']),
        'memory': kwargs['WorkerMem'],
    }
    worker_spec['containers'][0]['resources']['requests'] = {
        'cpu': str(kwargs['WorkerCPU']),
        'memory': kwargs['WorkerMem'],
    }
    
    cmParams = [
        'ClusterId', 'PIP_INDEX_URL',
    ]
    envParams = getEnvArray(_getName(kwargs['ClusterId'], 'cm'), cmParams)
    envParams.append({ 
        'name': 'Role', 
        'value': 'WorkerNode' 
        })
    for env in kwargs['WorkerNodeEnvs']:
        _update_env_param_list(envParams, env, 'Name')
    
    worker_spec['containers'][0]['env'] = envParams

    return True, config

def create_resource_from_file(config, yamlFile):
    if os.path.exists(yamlFile) and os.path.isfile(yamlFile):
        now = datetime.now()
        formatted_time = now.strftime("%Y%m%d%H%M")
        tmp_dir = f'{yamlFile}.{formatted_time}'
        logger.warning(f'{yamlFile} already exist, move to {tmp_dir}')
        shutil.move(yamlFile, tmp_dir)
    with open(yamlFile, 'w') as file:
        yaml.dump(config, file)
    apply_from_single_file(yamlFile)

def apply_resource_from_file(config, yamlFile, kube_config):
    if os.path.exists(yamlFile) and os.path.isfile(yamlFile):
        now = datetime.now()
        formatted_time = now.strftime("%Y%m%d%H%M")
        tmp_dir = f'{yamlFile}.{formatted_time}'
        logger.warning(f'{yamlFile} already exist, move to {tmp_dir}')
        shutil.move(yamlFile, tmp_dir)
    with open(yamlFile, 'w') as file:
        yaml.dump(config, file)
    
    deploy_cmd = 'kubectl --kubeconfig={} apply -f {}'.format(kube_config, yamlFile)
    status, output = localrun.run(deploy_cmd)
    logger.debug("deploy_cmd is {}, output: {}".format(deploy_cmd, output))
    return status, output

def deploy_configMap(cluster_id, namespace):
    func = getattr(genYamlObj, 'cm')
    params = {
        'Name': _getName(cluster_id, 'cm'),
        'ClusterId': cluster_id,
        'Namespace': namespace,
    }
    
    config = func(**params)
    yamlPath = getYamlPath(cluster_id, 'cm')
    create_resource_from_file(config, yamlPath)

def deploy_SC(cluster_id, region_id, zone_id, vpc_id, vswitch_id):
    func = getattr(genYamlObj, 'sc')
    params = {
        'Name': _getName(cluster_id, 'sc'),
        'RegionId': region_id,
        'ZoneId': zone_id,
        'VpcId': vpc_id,
        'VswitchId': vswitch_id
    }
    config = func(**params)
    yamlPath = getYamlPath(cluster_id, 'sc')
    create_resource_from_file(config, yamlPath)

def deploy_PVC(cluster_id, sc_name):
    func = getattr(genYamlObj, 'pvc')
    params = {
        'Name': _getName(cluster_id, 'pvc'),
        'StorageClassName': sc_name,
        'AccessMode': defaultPVC['AccessMode'],
        # 'VolumeMode': defaultPVC['VolumeMode'],
        'VolumeSize': defaultPVC['VolumeSize']
    }
    config = func(**params)
    yamlPath = getYamlPath(cluster_id, 'pvc')
    create_resource_from_file(config, yamlPath)

def deploy_PVC_disk(cluster_id, pvc_type = 'opt'):
    func = getattr(genYamlObj, 'pvc')
    params = {
        'Name': _getName(cluster_id, pvc_type, 'pvc'),
        'StorageClassName': defaultPVC['StorageClassName'],
        'AccessMode': defaultPVC['AccessMode'],
        'VolumeMode': defaultPVC['VolumeMode'],
        'VolumeSize': defaultPVC['VolumeSize']
    }
    config = func(**params)
    yamlPath = getYamlPath(cluster_id, 'pvc')
    if pvc_type == 'vn':
        yamlPath = getYamlPath(cluster_id, 'vn-pvc')
    create_resource_from_file(config, yamlPath)

def deploy_SVC(cluster_id):
    func = getattr(genYamlObj, 'svc')
    params = {
        'Name': _getName(cluster_id, 'svc'),
        'ClusterId': cluster_id,
    }
    config = func(**params)
    yamlPath = getYamlPath(cluster_id, 'svc')
    create_resource_from_file(config, yamlPath)

def deploy_ray_cluster(ray_cluster_cfg):
    func = getattr(genYamlObj, 'ray_cluster')
    params = {
        'Name': ray_cluster_cfg['cluster_name'],
        'ClusterId': ray_cluster_cfg['cluster_id'],
        'Namespace': ray_cluster_cfg['namespace'],
        'HeadImage': ray_cluster_cfg['head_node']['Image'],
        'HeadCPU': ray_cluster_cfg['head_node']['Cpu'],
        'HeadMem': ray_cluster_cfg['head_node']['Memory'],
        'HeadNodeTags': ray_cluster_cfg['head_node']['NodeAffinity'],
        'HeadNodeEnvs': ray_cluster_cfg['head_node']['EnvironmentVars'],
        'WorkerGroupName': ray_cluster_cfg['worker_node'][0]['GroupName'],
        'WorkerImage': ray_cluster_cfg['worker_node'][0]['Image'],
        'WorkerCPU': ray_cluster_cfg['worker_node'][0]['Cpu'],
        'WorkerMem': ray_cluster_cfg['worker_node'][0]['Memory'],
        'WorkerNodeTags': ray_cluster_cfg['worker_node'][0]['NodeAffinity'],
        'WorkerNodeEnvs': ray_cluster_cfg['worker_node'][0]['EnvironmentVars'],
        'WorkerReplicas': ray_cluster_cfg['worker_node'][0]['Replicas'],
    }
    config = func(**params)
    yamlPath = getYamlPath(ray_cluster_cfg['cluster_id'], 'rc')
    return apply_resource_from_file(config, yamlPath, ray_cluster_cfg['kube_config'])

def list_service_account_by_name(name):
    value = False
    kubeconfig.load_kube_config()

    v1 = client.CoreV1Api()
    ret = v1.list_service_account_for_all_namespaces()
    for i in ret.items:
        if i.metadata.name == name:
            value = True
            break

    return value

def checkServiceExist(object, name, namespace = 'default'):
    if not validateObject(object):
        logger.error(f'服务{object}类型不支持')
        return False
    func = getattr(ResourceExistObj, object)
    return func(name, namespace)
