import copy
import json
import logging
import os
import re
import time

from kubernetes import client
from myapp.config import CRD_INFO, HOSTALIASES, HUBSECRET, IMAGE_PULL_POLICY, SERVICE_NAMESPACE
from myapp.const.pool import EnumPoolType
from myapp.models.model_job import Repository
from myapp.models.model_serving import InferenceService
from myapp.models.user_attributes import UserAttribute
from myapp.third.k8s.pv_pvc import create_pv, create_pvc
from myapp.third.k8s.py_karmada import (
    get_k8s_client_by_region,
    get_service_export_yaml,
    get_service_import_yaml,
    propagation_job,
    propagation_job_without_namespace,
)
from myapp.third.k8s.scheduler import get_scheduler_name
from myapp.third.zhentong.api import NODE_LABEL_KEY
from myapp.utils.device_resource import DeviceResource
from myapp.utils.exception import log_exception
from myapp.utils.resource import get_deploy_resource, is_multi_node


log = logging.getLogger(__name__)


def init_namespace(user_id, db_session, region='default'):
    user_attr = (
        db_session.query(UserAttribute)
        .filter(UserAttribute.user_id == user_id)
        .filter(UserAttribute.region == region)
        .first()
    )
    if user_attr is None:
        raise Exception(f'用户attr不存在, user_id: {user_id}')
    if user_attr.service_is_initialized == 1:
        return

    # 初始化namespace
    # k8s_client = K8s()
    k8s_client = get_k8s_client_by_region(region=region)

    namespace = f'{SERVICE_NAMESPACE}-{user_id}'
    k8s_client.create_namespace(namespace)

    create_pv(
        'service-kubeflow-archives', user_id, '/data/k8s/kubeflow/pipeline/archives', k8s_client
    )
    create_pv(
        'service-kubeflow-publish-data',
        user_id,
        '/data/k8s/kubeflow/pipeline/publish-data',
        k8s_client,
    )
    create_pv(
        'service-kubeflow-user-workspace',
        user_id,
        '/data/k8s/kubeflow/pipeline/workspace',
        k8s_client,
    )

    create_pvc('kubeflow-archives', namespace, 'service-kubeflow-archives', k8s_client)
    create_pvc('kubeflow-publish-data-pvc', namespace, 'service-kubeflow-publish-data', k8s_client)
    create_pvc('kubeflow-user-workspace', namespace, 'service-kubeflow-user-workspace', k8s_client)

    if region != 'default':
        pv_name_list = [
            f'service-kubeflow-archives-{user_id}',
            f'service-kubeflow-publish-data-{user_id}',
            f'service-kubeflow-user-workspace-{user_id}',
        ]
        for pv_name in pv_name_list:
            propagation_job_without_namespace(
                region=region,
                k8s_client=k8s_client,
                name=pv_name,
                suffix=region,
                api_version='v1',
                kind='PersistentVolume',
            )

        pvc_name_list = [
            'kubeflow-archives',
            'kubeflow-publish-data-pvc',
            'kubeflow-user-workspace',
        ]
        for pvc_name in pvc_name_list:
            propagation_job(
                region=region,
                k8s_client=k8s_client,
                name=pvc_name,
                suffix=region,
                namespace=namespace,
                api_version='v1',
                kind='PersistentVolumeClaim',
            )

    db_session.query(UserAttribute).filter(
        UserAttribute.user_id == user_id,
        UserAttribute.service_is_initialized == 0,
        UserAttribute.region == region,
    ).update({'service_is_initialized': 1})
    db_session.commit()


def delete_old_service(
    service_name, cluster=None, namespace=None, region='default', multi_node=False
):
    # k8s_client = K8s(cluster.get('KUBECONFIG', ''))
    k8s_client = get_k8s_client_by_region(region=region)
    if namespace is None:
        namespace = SERVICE_NAMESPACE

    with log_exception:
        for name in [service_name, 'debug-' + service_name, 'test-' + service_name]:
            service_external_name = (name + '-external').lower()[:60].strip('-')
            k8s_client.delete_service(namespace=namespace, name=name)
            k8s_client.delete_service(namespace=namespace, name=service_external_name)
            k8s_client.delete_istio_ingress(namespace=namespace, name=name)
            k8s_client.delete_hpa(namespace=namespace, name=name)
            k8s_client.delete_configmap(namespace=namespace, name=name)
            if multi_node is False:
                k8s_client.delete_deployment(namespace=namespace, name=name)
                isvc_crd = CRD_INFO['inferenceservice']
                k8s_client.delete_crd(
                    isvc_crd['group'],
                    isvc_crd['version'],
                    isvc_crd['plural'],
                    namespace=namespace,
                    name=name,
                )
            else:
                isvc_crd = CRD_INFO['vcjob']
                k8s_client.delete_crd(
                    isvc_crd['group'],
                    isvc_crd['version'],
                    isvc_crd['plural'],
                    namespace=namespace,
                    name=name,
                )


def deploy_service(
    instance_id,
    user_id,  # g.user.id
    # user_name,  # g.user.username
    # request_host,  # request.host
    session,
    region='default',
):
    instance = session.query(InferenceService).filter_by(id=instance_id).first()

    name = instance.name
    command = instance.command
    deployment_replicas = instance.min_replicas

    if user_id is None:
        raise Exception('user_id empty')

    # NAMESPACE 隔离
    init_namespace(user_id, session, region=region)
    namespace = instance.namespace

    image_secrets = HUBSECRET
    user_hubsecrets = (
        session.query(Repository.hubsecret).filter(Repository.created_by_fk == user_id).all()
    )
    if user_hubsecrets:
        for hubsecret in user_hubsecrets:
            if hubsecret[0] not in image_secrets:
                image_secrets.append(hubsecret[0])

    # k8s_client = K8s(instance.project.cluster.get('KUBECONFIG', ''))
    k8s_client = get_k8s_client_by_region(region=region)

    config_datas = (
        instance.inference_config.strip().split('\n---') if instance.inference_config else []
    )
    config_datas = [x.strip() for x in config_datas if x.strip()]
    volume_mount = instance.volume_mount

    # 默认挂载上 publish-data
    volume_mount = append_default_volume_mount(volume_mount)

    config_data = {}
    for data in config_datas:
        file_name = re.sub('^-*', '', data.split('\n')[0]).strip()
        file_content = '\n'.join(data.split('\n')[1:])
        if file_name and file_content:
            config_data[file_name] = file_content
    if config_data:
        k8s_client.create_configmap(
            namespace=namespace, name=name, data=config_data, labels={'app': name}
        )
        volume_mount += ',%s(configmap):/config/' % name
    ports = [int(port) for port in instance.ports.split(',')]

    pod_env = get_envs(instance)

    # 因为所有的服务流量通过ingress实现，所以没有isito的envoy代理
    labels = {
        'app': name,
        'user': instance.created_by.username,
        'pod-type': 'inference',
        'service-id': str(instance.service_id),
    }
    pod_ports = copy.deepcopy(ports)
    try:
        if instance.metrics.strip():
            metrics_port = int(instance.metrics[: instance.metrics.index(':')])
            pod_ports.append(metrics_port)
    except Exception as e:
        logging.exception(e)

    pod_ports = list(set(pod_ports))
    annotations = {}
    # https://istio.io/latest/docs/reference/config/annotations/
    if instance.sidecar and 'istio' in instance.sidecar and instance.service_type == 'serving':
        labels['sidecar.istio.io/inject'] = 'true'

    health = instance.service.health
    if health is None or health == '':
        health = '8080'

    k8s_client.create_namespace(namespace)
    tolerations = None
    if instance.pool_type == EnumPoolType.exclusive.value:
        tolerations = [{'key': NODE_LABEL_KEY, 'operator': 'Exists', 'effect': 'NoSchedule'}]
    dr = DeviceResource.from_str(instance.resource_gpu, instance.gpu_type)

    resource_cfg = get_deploy_resource(instance.specifications)
    if resource_cfg is None:
        raise KeyError(f'get resource_cfg is None, specifications: {instance.specifications}')

    if is_multi_node(resource_cfg=resource_cfg):
        # 执行分布式部署，使用volcano
        k8s_client.deploy_multi_node_volcano(
            k8s_client=k8s_client,
            namespace=namespace,
            name=name,
            replicas=resource_cfg.get('node_num'),
            labels=labels,
            annotations=annotations,
            command=['sh', '-c', command] if command else None,
            args=None,
            volume_mount=volume_mount,
            working_dir=instance.working_dir,
            node_selector=instance.node_selector,
            resource_memory=instance.resource_memory,
            resource_cpu=instance.resource_cpu,
            resource_gpu=dr,
            image_pull_policy=IMAGE_PULL_POLICY,
            image_pull_secrets=image_secrets,
            image=instance.images,
            env=pod_env,
            privileged=False,
            username=instance.created_by.username,
            ports=pod_ports,
            health=health,
        )
    else:
        # 执行单机部署
        k8s_client.create_deployment(
            namespace=namespace,
            name=name,
            replicas=deployment_replicas,
            labels=labels,
            annotations=annotations,
            command=['sh', '-c', command] if command else None,
            args=None,
            volume_mount=volume_mount,
            working_dir=instance.working_dir,
            node_selector=instance.node_selector,
            resource_memory=instance.resource_memory,
            resource_cpu=instance.resource_cpu,
            resource_gpu=dr,
            image_pull_policy=IMAGE_PULL_POLICY,
            image_pull_secrets=image_secrets,
            image=instance.images,
            hostAliases=HOSTALIASES,
            env=pod_env,
            privileged=False,
            accounts=None,
            username=instance.created_by.username,
            ports=pod_ports,
            health=health,
            scheduler_name=get_scheduler_name(),
            tolerations=tolerations,
        )

        # 监控
        annotations = {}
        if instance.metrics:
            annotations = {
                'prometheus.io/scrape': 'true',
                'prometheus.io/port': instance.metrics.split(':')[0],
                'prometheus.io/path': instance.metrics.split(':')[1],
            }

        logging.info('deploy service')
        k8s_client.create_service(
            namespace=namespace,
            name=name,
            username=instance.created_by.username,
            ports=ports,
            annotations=annotations,
            selector=labels,
        )
        logging.info('deploy service success')

    # 如果是异地集群，任务要分发
    if region != 'default':
        for i in range(10):
            obj = k8s_client.get_deployment(name=name, namespace=namespace)
            if obj:
                log.info(f'[deploy_service][{i}] find {name} before propagation')
                time.sleep(0.3)
                break
            else:
                log.info(f'[deploy_service][{i}] {name} not found before propagation')
                time.sleep(0.5)

        # 分发Deployment
        propagation_job(
            region=region,
            k8s_client=k8s_client,
            name=name,
            suffix=region,
            namespace=namespace,
            api_version='apps/v1',
            kind='Deployment',
        )

        # 分发Service
        propagation_job(
            region=region,
            k8s_client=k8s_client,
            name=name,
            suffix=region + '-service',
            namespace=namespace,
            api_version='v1',
            kind='Service',
        )

        try:
            se_yaml = get_service_export_yaml(name=name, namespace=namespace)
            logging.info(f'[deploy_service] name {name}; se_yaml {se_yaml}')
            k8s_client.create_crd(
                group='multicluster.x-k8s.io',
                version='v1alpha1',
                plural='serviceexports',
                namespace=namespace,
                body=se_yaml,
            )
        except client.exceptions.ApiException as e:
            if e.status == 409:
                log.info(f'[deploy_service] name {name} already exists in [{region}][{namespace}]')
            else:
                raise e

        propagation_job(
            region=region,
            k8s_client=k8s_client,
            name=name,
            suffix=region + '-se',
            namespace=namespace,
            api_version='multicluster.x-k8s.io/v1alpha1',
            kind='ServiceExport',
        )

        try:
            si_yaml = get_service_import_yaml(name=name, namespace=namespace)
            logging.info(f'[deploy_service] name {name}; si_yaml {si_yaml}')
            k8s_client.create_crd(
                group='multicluster.x-k8s.io',
                version='v1alpha1',
                plural='serviceimports',
                namespace=namespace,
                body=si_yaml,
            )
        except client.exceptions.ApiException as e:
            if e.status == 409:
                log.info(f'[deploy_service] name {name} already exists in [{region}][{namespace}]')
            else:
                raise e

        propagation_job(
            region=region,
            k8s_client=k8s_client,
            name=name,
            suffix=region + '-si',
            namespace=namespace,
            api_version='multicluster.x-k8s.io/v1alpha1',
            kind='ServiceImport',
        )

    # 如果域名配置的gateway，就用这个
    # host = (
    #     instance.name
    #     + '.'
    #     + instance.project.cluster.get('SERVICE_DOMAIN', conf.get('SERVICE_DOMAIN'))
    # )

    # if instance.host:
    #     host = instance.host.replace('http://', '').replace('https://', '').strip()
    #     if '/' in host:
    #         host = host[: host.index('/')]

    # 前缀来区分不同的环境服务
    # if env == 'debug' or env == 'test':
    #     host = env + '.' + host
    # try:
    #     print('deploy istio ingressgateway')
    #     k8s_client.create_istio_ingress(
    #         namespace=namespace,
    #         name=name,
    #         host=host,
    #         ports=instance.ports.split(','),
    #         canary=instance.canary,
    #         shadow=instance.shadow
    #     )
    # except Exception as e:
    #     logging.error('{}\n{}'.format(repr(e), traceback.format_exc()))

    # 以ip形式访问的话，使用的代理ip。不然不好处理机器服务化机器扩容和缩容时ip变化

    # SERVICE_EXTERNAL_IP = []
    # # 使用项目组ip
    # if instance.project.expand:
    #     ip = json.loads(instance.project.expand).get('SERVICE_EXTERNAL_IP', '')
    #     if ip and isinstance(SERVICE_EXTERNAL_IP, str):
    #         SERVICE_EXTERNAL_IP = [ip]
    #
    # # 使用全局ip
    # if not SERVICE_EXTERNAL_IP:
    #     SERVICE_EXTERNAL_IP = conf.get('SERVICE_EXTERNAL_IP', None)
    #
    # # 使用当前ip
    # if not SERVICE_EXTERNAL_IP:
    #     ip = (
    #         request_host[: request_host.rindex(':')] if ':' in request_host else request_host
    #     )  # 如果捕获到端口号，要去掉
    #     if core.checkip(ip):
    #         SERVICE_EXTERNAL_IP = [ip]
    #
    # if SERVICE_EXTERNAL_IP:
    #     service_ports = [
    #         [20000 + 10 * instance.id + index, port] for index, port in enumerate(ports)
    #     ]
    #     service_external_name = (instance.name + '-external').lower()[:60].strip('-')
    #     print('deploy proxy ip')
    #     k8s_client.create_service(
    #         namespace=namespace,
    #         name=service_external_name,
    #         username=instance.created_by.username,
    #         ports=service_ports,
    #         selector=labels,
    #         external_ip=SERVICE_EXTERNAL_IP,
    #     )

    # if env == 'prod':
    #     hpas = re.split(',|;', instance.hpa)
    #     # regex = re.compile(r"\(.*\)")
    #     # if not int(regex.sub('', instance.resource_gpu)):
    #     #     for hpa in copy.deepcopy(hpas):
    #     #         if 'gpu' in hpa:
    #     #             hpas.remove(hpa)
    #
    #     # 伸缩容
    #     if int(instance.max_replicas) > int(instance.min_replicas) and instance.hpa:
    #         try:
    #             # 创建+绑定deployment
    #             print('create hpa')
    #             k8s_client.create_hpa(
    #                 namespace=namespace,
    #                 name=name,
    #                 min_replicas=int(instance.min_replicas),
    #                 max_replicas=int(instance.max_replicas),
    #                 hpa=','.join(hpas),
    #             )
    #         except Exception as e:
    #             flash('hpa:' + str(e), 'warning')
    #     else:
    #         k8s_client.delete_hpa(namespace=namespace, name=name)


def append_default_volume_mount(volume_mount: str):
    pd = 'kubeflow-publish-data-pvc(pvc):/mnt/publish-data'
    train_code = 'kubeflow-publish-data-pvc(pvc):/mnt/publish-data:train_code'
    train_data = 'kubeflow-publish-data-pvc(pvc):/mnt/publish-data:train_data'

    def append(s1, s2):
        if s1 is None or len(s1) == 0:
            return s2
        return f'{s1},{s2}'

    # 默认挂载上 publish-data
    if pd not in volume_mount:
        volume_mount = append(volume_mount, pd)
    if train_code not in volume_mount:
        volume_mount = append(volume_mount, train_code)
    if train_data not in volume_mount:
        volume_mount = append(volume_mount, train_data)
    return volume_mount


def get_envs(instance):
    ins_env = ''
    try:
        envs = json.loads(instance.env)
        if envs:
            for k, v in envs.items():
                ins_env += f'{k}={v}\n'
    except Exception:
        ins_env = instance.env
    pod_env = ins_env.strip()
    pod_env += '\nKUBEFLOW_MODEL_PATH=' + instance.model_path if instance.model_path else ''

    model_path = instance.get_model_path()
    infer_code_path = instance.get_infer_code_path()
    if model_path and model_path != '':
        if not model_path.startswith('/mnt/publish-data'):
            model_path = os.path.join('/mnt/publish-data', model_path)
        pod_env += '\nMODEL_PATH=' + model_path
        pod_env += '\nvllm__model=' + model_path
    if infer_code_path and infer_code_path != '':
        if not infer_code_path.startswith('/mnt/publish-data'):
            infer_code_path = os.path.join('/mnt/publish-data', infer_code_path)
        pod_env += '\nINFER_CODE_PATH=' + infer_code_path

    # pod_env += '\nKUBEFLOW_MODEL_VERSION=' + instance.model_version

    pod_env += '\nKUBEFLOW_MODEL_IMAGES=' + instance.images
    pod_env += '\nKUBEFLOW_MODEL_NAME=' + instance.model_name
    pod_env += '\nKUBEFLOW_AREA=' + json.loads(instance.project.expand).get('area', 'guangzhou')
    # 注入日志采集环境变量
    pod_env += '\nilogtail=true'
    pod_env = pod_env.strip(',')

    return pod_env


def upgrade_service(instance_id, user_id, session, region='default'):
    instance = session.query(InferenceService).filter_by(id=instance_id).first()

    namespace = instance.namespace
    name = instance.name
    command = instance.command
    deployment_replicas = instance.min_replicas
    image_secrets = HUBSECRET
    user_hubsecrets = (
        session.query(Repository.hubsecret).filter(Repository.created_by_fk == user_id).all()
    )
    if user_hubsecrets:
        for hubsecret in user_hubsecrets:
            if hubsecret[0] not in image_secrets:
                image_secrets.append(hubsecret[0])

    # k8s_client = K8s(instance.project.cluster.get('KUBECONFIG', ''))
    k8s_client = get_k8s_client_by_region(region=region)

    config_datas = (
        instance.inference_config.strip().split('\n---') if instance.inference_config else []
    )
    config_datas = [x.strip() for x in config_datas if x.strip()]
    volume_mount = instance.volume_mount
    # 默认挂载上 publish-data
    volume_mount = append_default_volume_mount(volume_mount)

    config_data = {}
    for data in config_datas:
        file_name = re.sub('^-*', '', data.split('\n')[0]).strip()
        file_content = '\n'.join(data.split('\n')[1:])
        if file_name and file_content:
            config_data[file_name] = file_content
    if config_data:
        k8s_client.create_configmap(
            namespace=namespace, name=name, data=config_data, labels={'app': name}
        )
        volume_mount += ',%s(configmap):/config/' % name
    ports = [int(port) for port in instance.ports.split(',')]

    pod_env = get_envs(instance)
    logging.info(f'[upgrade] pod_env:{pod_env}')

    # 因为所有的服务流量通过ingress实现，所以没有isito的envoy代理
    labels = {
        'app': name,
        'user': instance.created_by.username,
        'pod-type': 'inference',
        'service-id': str(instance.service_id),
    }
    pod_ports = copy.deepcopy(ports)
    with log_exception:
        if instance.metrics.strip():
            metrics_port = int(instance.metrics[: instance.metrics.index(':')])
            pod_ports.append(metrics_port)

    pod_ports = list(set(pod_ports))
    logging.info('create deployment')
    annotations = {}
    # https://istio.io/latest/docs/reference/config/annotations/
    if instance.sidecar and 'istio' in instance.sidecar and instance.service_type == 'serving':
        labels['sidecar.istio.io/inject'] = 'true'

    health = instance.service.health
    if health is None or health == '':
        health = '8080'
    tolerations = None
    if instance.pool_type == EnumPoolType.exclusive.value:
        tolerations = [{'key': NODE_LABEL_KEY, 'operator': 'Exists', 'effect': 'NoSchedule'}]
    k8s_client.create_deployment(
        namespace=namespace,
        name=name,
        replicas=deployment_replicas,
        labels=labels,
        annotations=annotations,
        command=['sh', '-c', command] if command else None,
        args=None,
        volume_mount=volume_mount,
        working_dir=instance.working_dir,
        node_selector=instance.get_node_selector(),
        resource_memory=instance.resource_memory,
        resource_cpu=instance.resource_cpu,
        resource_gpu=DeviceResource.from_str(instance.resource_gpu, instance.gpu_type),
        image_pull_policy=IMAGE_PULL_POLICY,
        image_pull_secrets=image_secrets,
        image=instance.images,
        hostAliases=HOSTALIASES,
        env=pod_env,
        privileged=False,
        accounts=None,
        username=instance.created_by.username,
        ports=pod_ports,
        health=health,
        tolerations=tolerations,
        scheduler_name=get_scheduler_name(),
    )
