--- START OF FILE view_serving.py ---

# Import standard library modules for various functionalities.
import copy
import datetime
import json
import logging
import math
import os
import random
import re
import time
from urllib import parse
import uuid

# Import components from Flask for handling requests and responses.
from flask import Response, g, request
# Import the SQLAlchemy data interface from Flask-AppBuilder.
from flask_appbuilder.models.sqla.interface import SQLAInterface
# Import the User model from Flask-AppBuilder's security module.
from flask_appbuilder.security.sqla.models import User
# Import the ModelArts SDK for interacting with the Huawei ModelArts platform.
import modelarts
# Import OpenTelemetry for distributed tracing.
from opentelemetry import trace
# Import requests for making HTTP requests, particularly for ModelArts.
import requests
# Import SQLAlchemy for database operations.
import sqlalchemy

# Import base API functionalities
# 导入基础API功能
from myapp.apis.base import expose
# Import service building utility
# 导入服务构建工具
from myapp.apis.service import build_service
# Import application instance, appbuilder, and database instance
# 导入应用实例、appbuilder和数据库实例
from myapp.app import app, appbuilder, db
# Import auto-learning configurations
# 导入自动学习配置
from myapp.configs.auto_learning import AUTO_LEARNING_CFGS
# Import common, inference, and ModelArts error codes
# 导入通用、推理和ModelArts错误码
from myapp.const.error import CommonErrorCode, InferErrorCode, ModelartsErrorCode
# Import business error and response wrapper
# 导入业务错误和响应包装器
from myapp.const.response import BizError, wrap_response
# Import service-related enums
# 导入与服务相关的枚举
from myapp.const.service import EnumFramework, EnumModelSource, EnumModelType, ServiceStatus
# Import ExModel model
# 导入ExModel模型
from myapp.models.model_ex_model import ExModel
# Import Labels model
# 导入Labels模型
from myapp.models.model_labels import Labels
# Import Model_Version model
# 导入Model_Version模型
from myapp.models.model_model_version import Model_Version
# Import InferenceService, Service, and get_model_code utility
# 导入InferenceService、Service和get_model_code工具
from myapp.models.model_serving import InferenceService, Service, get_model_code
# Import Training_Model model
# 导入Training_Model模型
from myapp.models.model_train_model import Training_Model
# Import Elasticsearch query utilities
# 导入Elasticsearch查询工具
from myapp.third.es.es import query, query_all
# Import Kubernetes client
# 导入Kubernetes客户端
from myapp.third.k8s.py_k8s import K8s
# Import ModelArts client
# 导入ModelArts客户端
from myapp.third.modelarts import ma_client
# Import ModelArts configurations
# 导入ModelArts配置
from myapp.third.modelarts.configs import (
    MODELARTS_PRIVATE_POOL_ID,
    TAICHU_ONLINE_INFER_SERVICE_DEFAULT_CONFIGS,
)
# Import ModelArts inference service utilities
# 导入ModelArts推理服务工具
from myapp.third.modelarts.infer_service import get_deploy_failed_reason
# Import ModelArts predictor client
# 导入ModelArts预测器客户端
from myapp.third.modelarts.ma_client import ma_predictor
# Import ModelArts API exception parser
# 导入ModelArts API异常解析器
from myapp.third.modelarts.tools import parse_apig_exception
# Import Prometheus metrics utility
# 导入Prometheus指标工具
from myapp.third.prometheus.prometheus import get_metrics
# Import TCIS API utilities
# 导入TCIS API工具
from myapp.third.tcis.api import TaskType, is_resources_config_runnable
# Import environment and string utilities
# 导入环境和字符串工具
from myapp.utils import env, strings
# Import collection utilities for dictionary difference
# 导入集合工具，用于字典差异
from myapp.utils.collections import dict_diff, dict_to_array
# Import environment check functions for ModelArts and private cloud
# 导入ModelArts和私有云的环境检查函数
from myapp.utils.env import is_modelarts, is_private
# Import resource utilities
# 导入资源工具
from myapp.utils.resource import (
    get_custom_resources,
    get_deploy_resources,
    is_multi_node,
)

# Import merge utility for service
# 导入服务合并工具
from ..apis.merge import merge_by_service
# Import payment-related API functions
# 导入支付相关的API功能
from ..apis.pay import (
    check_user_balance,
    get_model_deploying_estimate_amount,
    get_model_deploying_price,
    model_deploying_billing,
    need_model_deploying_billing,
)
# Import service business logic functions
# 导入服务业务逻辑功能
from ..biz.service import delete_old_service, deploy_service, upgrade_service
# Import pool type enum
# 导入池类型枚举
from ..const.pool import EnumPoolType
# Import BillingOrder model
# 导入BillingOrder模型
from ..models.billing_order import BillingOrder
# Import MyUser model
# 导入MyUser模型
from ..models.model_user import MyUser
# Import exception logging utility
# 导入异常日志记录工具
from ..utils.exception import log_exception
# Import specifications utility
# 导入规格工具
from ..utils.specifications import get_specifications_type
# Import base API class
# 导入基础API类
from .baseApi import MyappModelRestApi


# Get application configuration
# 获取应用配置
conf = app.config

# Initialize OpenTelemetry tracer
# 初始化OpenTelemetry跟踪器
tracer = trace.get_tracer(__name__)

# Lock prefix for inference service prediction
# 推理服务预测的锁前缀
LOCK_PREFIX = 'taichu-inferservice-predict'


# Service ModelView API class
# 服务模型视图API类
class Service_ModelView_Api(MyappModelRestApi):
    # Data model interface for Service
    # Service的数据模型接口
    datamodel = SQLAInterface(Service)
    # Route base for the API endpoint
    # API端点的路由基础
    route_base = '/service_modelview/api'

    # List services API endpoint
    # 列出服务API端点
    @expose('/list', methods=['POST'])
    @wrap_response
    def list(self):
        # Get request JSON data
        # 获取请求JSON数据
        req = request.json
        # Get pagination and filter parameters
        # 获取分页和过滤参数
        page = req.get('page', 0)
        page_size = req.get('page_size', 10)
        lora_adapter = req.get('lora_adapter', False)
        filters = req.get('filters', [])

        # Get region key from global context
        # 从全局上下文获取区域键
        region_key = g.region.key

        # Call static method to list services
        # 调用静态方法列出服务
        return self.list_service(page, page_size, lora_adapter, filters, region=region_key)

    # Static method to list services
    # 列出服务的静态方法
    @staticmethod
    def list_service(page=0, page_size=10, lora_adapter=False, filters=None, region=None):
        filters = filters or []
        # Sanitize pagination parameters
        # 清理分页参数
        if page < 0:
            page = 0
        if page_size < 0:
            page_size = 0
        if page_size > 999:
            page_size = 999

        # Build base query for Service
        # 构建Service的基础查询
        q = db.session.query(Service)

        # Apply user-specific filters based on roles
        # 根据角色应用用户特定过滤器
        if g.user.is_admin:
            pass
        elif g.user.is_tenant_admin:
            q = q.outerjoin(MyUser, MyUser.id == Service.created_by_fk).filter(
                MyUser.tenant_id == g.user.tenant_id
            )
        else:
            q = q.filter(Service.created_by_fk == g.user.id)

        # Apply region filter if available
        # 如果可用，则应用区域过滤器
        if region is not None:
            q = q.filter(Service.region == region)

        # Apply additional filters
        # 应用额外过滤器
        for f in filters:
            if f['col'] != "model_type" and not hasattr(Service, f['col']):
                raise BizError(1000, "未找到相关数据")
            if f['opr'] == 'eq':
                q = q.filter_by(**{f['col']: f['value']})
            elif f['opr'] == 'ct':
                q = q.filter(getattr(Service, f['col']).like(f'%{f["value"]}%'))
            elif f['opr'] == 'in':
                if f['col'] == 'labels':
                    if len(f['value']) > 0:
                        q = (
                            q.join(Labels, Labels.rel_id == Service.id)
                            .filter(Labels.rel_type == 'service')
                            .filter(Labels.name.in_(f['value']))
                        )
                elif f['col'] == 'model_type':
                    if len(f['value']) > 0:
                        vals = []
                        for v in f['value']:
                            val = EnumModelType.get_val(v, default=None)
                            if val:
                                vals.append(val)

                        q = q.join(Training_Model, Training_Model.id == Service.model_id).filter(
                            Training_Model.type.in_(vals)
                        )
                else:
                    q = q.filter(getattr(Service, f['col']).in_(f['value']))
            else:
                raise BizError(1000, f"不支持的filter,opr:{f['opr']}")

        # Get total count and paginated results
        # 获取总数和分页结果
        count = q.count()
        results = (
            q.order_by(Service.created_on.desc()).offset(page * page_size).limit(page_size).all()
        )

        # Query all service instances
        # 查询所有的服务实例
        instance_ids = []
        created_by_ids = []
        srv_ids = []
        for item in results:
            srv_ids.append(item.id)
            instance_ids.append(item.id)
            created_by_ids.append(item.created_by_fk)
        instances = (
            db.session.query(InferenceService)
            .filter(InferenceService.service_id.in_(instance_ids))
            .all()
        )
        id2instance = {}
        model_version_ids = []
        for item in instances:
            model_version_ids.append(item.model_version_id)
            if id2instance.get(str(item.service_id)):
                id2instance[str(item.service_id)].append(item)
            else:
                id2instance[str(item.service_id)] = [item]

        id2model_version = {}
        model_version_ids = list(set(model_version_ids))
        model_versions = (
            db.session.query(Model_Version)
            .filter(Model_Version.region == g.region.key)
            .filter(Model_Version.id.in_(model_version_ids))
            .all()
        )
        model_ids = []
        for item in model_versions:
            model_ids.append(item.model_id)
            id2model_version[item.id] = item

        id2model = {}
        model_ids = list(set(model_ids))
        models = (
            db.session.query(Training_Model.name, Training_Model.id, Training_Model.type)
            .filter(Training_Model.region == g.region.key)
            .filter(Training_Model.id.in_(model_ids))
            .all()
        )
        for item in models:
            id2model[item.id] = item

        id2labels = {}
        srv_ids = list(set(srv_ids))
        if len(srv_ids) > 0:
            labels = (
                db.session.query(Labels.name, Labels.rel_id)
                .filter(Labels.rel_type == 'service')
                .filter(Labels.rel_id.in_(srv_ids))
                .all()
            )
            for item in labels:
                lst_labels = id2labels.get(item[1], [])
                lst_labels.append(item[0])
                id2labels[item[1]] = lst_labels

        created_by_ids = list(set(created_by_ids))
        created_by_names = (
            db.session.query(User.id, User.username).filter(User.id.in_(created_by_ids)).all()
        )
        id2username = {}
        for item in created_by_names:
            id2username[item.id] = item.username
        ret = []
        for item in results:
            instances = id2instance.get(str(item.id), [])

            configs = []
            for ins in instances:
                try:
                    envs = json.loads(ins.env)
                except Exception:
                    envs = None

                model_version = id2model_version.get(ins.model_version_id)

                configs.append(
                    {
                        'node_id': ins.id,
                        'model_version_id': ins.model_version_id,
                        'model_version_name': (
                            model_version.name if hasattr(model_version, 'name') else ''
                        ),
                        'weight': ins.weight,
                        'envs': envs,
                        'min_replicas': ins.min_replicas,
                        'specifications': ins.specifications,
                        'pool_type': ins.pool_type,
                    }
                )

            model = id2model.get(item.model_id)
            model_type = model.type

            # Get base model code information
            # 获取基础模型编码信息
            model_version_id = configs[0].get('model_version_id')
            model_version = id2model_version.get(model_version_id)
            base_model_code = model_version.base_model_code

            ret.append(
                {
                    'id': item.id,
                    'name': item.name,
                    'describe': item.describe,
                    'model': {
                        'name': model.name if hasattr(model, 'name') else '',
                        'id': model.id if hasattr(model, 'id') else 0,
                        'type': EnumModelType.get_name(model_type),
                    },
                    'model_id': item.model_id,
                    'labels': id2labels.get(item.id, []),
                    'invocation_times': item.total_invocation_times,
                    'failed_times': item.failed_times,
                    'acc_running_time': item.get_acc_running_time(),
                    'status': item.status,
                    'error_msg': item.error_msg,
                    'configs': configs,
                    'advanced': json.loads(item.advanced) if item.advanced else {},
                    'ratelimit': json.loads(item.ratelimit) if item.ratelimit else {},
                    'deploy_history': (
                        json.loads(item.deploy_history) if item.deploy_history else []
                    ),
                    'created_by': id2username.get(item.created_by_fk, ''),
                    'created_on': item.created_on.strftime('%Y-%m-%d %H:%M:%S'),
                    'changed_on': item.changed_on.strftime('%Y-%m-%d %H:%M:%S'),
                    'source_id': model_version.source_id,
                    'base_model_code': base_model_code,
                    'region': item.region,
                }
            )
        # Commented out: Lora adapter logic
        # 已注释：Lora适配器逻辑
        # if lora_adapter:
        #     q1 = (
        #         db.session.query(Model_Version)
        #         .filter_by(adapter_status=1)
        #         .filter_by(region=g.region.key)
        #     )
        #
        #     if g.user.is_admin:
        #         pass
        #     elif g.user.is_tenant_admin:
        #         q1 = q1.outerjoin(MyUser, MyUser.id == Model_Version.created_by_fk).filter(
        #             MyUser.tenant_id == g.user.tenant_id
        #         )
        #     else:
        #         q1 = q1.filter(Model_Version.created_by_fk == g.user.id)
        #
        #     for f in filters:
        #         if f['opr'] == 'in':
        #             if ['col'] == 'model_type':
        #                 if len(f['value']) > 0:
        #                     vals = []
        #                     for v in f['value']:
        #                         val = EnumModelType.get_val(v, default=None)
        #                         if val:
        #                             vals.append(val)
        #
        #                     q1 = q1.join(
        #                         Training_Model, Training_Model.id == Model_Version.model_id
        #                     ).filter(Training_Model.type.in_(vals))
        #
        #     count1 = q1.count()
        #     results1 = (
        #         q1.order_by(Model_Version.created_on.desc())
        #         .offset(page * page_size)
        #         .limit(page_size)
        #         .all()
        #     )
        #     for item in results1:
        #         model = (
        #             db.session.query(Training_Model)
        #             .filter_by(region=g.region.key)
        #             .filter_by(id=item.model_id)
        #             .first()
        #         )
        #         if not item.adapter_info:
        #             continue
        #         adapter_info = json.loads(item.adapter_info)
        #         if not adapter_info:
        #             continue
        #         configs = [
        #             {
        #                 'model_version_id': item.id,
        #                 'model_version_name': item.name,
        #             }
        #         ]
        #
        #         ret.append(
        #             {
        #                 'id': adapter_info.get('infer_id', 0),
        #                 'name': adapter_info.get('lora_name'),
        #                 'describe': item.describe,
        #                 'model': {
        #                     'name': model.name if hasattr(model, 'name') else '',
        #                     'id': model.id if hasattr(model, 'id') else 0,
        #                     'type': EnumModelType.get_name(model.type),
        #                 },
        #                 'model_id': item.model_id,
        #                 'configs': configs,
        #                 'created_on': item.created_on.strftime('%Y-%m-%d %H:%M:%S'),
        #                 'changed_on': item.changed_on.strftime('%Y-%m-%d %H:%M:%S'),
        #                 'lora_adapter': True,
        #                 'source_id': item.source_id,
        #                 'base_model_code': item.base_model_code,
        #             }
        #         )
        #     count += count1
        return {'count': count, 'data': ret}

    # Edit service API endpoint
    # 编辑服务API端点
    @expose('/edit/<service_id>', methods=['POST'])
    @wrap_response
    def edit(self, service_id):
        # Get request JSON data
        # 获取请求JSON数据
        req = request.json

        # Query service by ID and user permissions
        # 按ID和服务权限查询服务
        q = db.session.query(Service).filter(Service.id == service_id)

        if g.user.is_admin:
            pass
        elif g.user.is_tenant_admin:
            q = q.outerjoin(MyUser, MyUser.id == Service.created_by_fk).filter(
                MyUser.tenant_id == g.user.tenant_id
            )
        else:
            q = q.filter(Service.created_by_fk == g.user.id)

        old_srv = q.first()
        if old_srv is None:
            raise BizError(CommonErrorCode.NOT_FOUND, '服务不存在')

        # Check if service status allows update
        # 检查服务状态是否允许更新
        if ServiceStatus.can_update(old_srv.status) is False:
            raise BizError(CommonErrorCode.NOT_ALLOWED, '当前服务状态下禁止更新操作')

        # Record old configurations before modification
        # 记录修改前的配置
        old_srv_configs = json.loads(old_srv.configs) if old_srv.configs else []
        for item in old_srv_configs:
            if 'envs' in item and item['envs'] is None:
                del item['envs']

        old_cfgs = {
            'name': old_srv.name,
            'describe': old_srv.describe,
            'configs': json.dumps(old_srv_configs),
            'ratelimit': old_srv.ratelimit,
            'advanced': old_srv.advanced,
            'model_id': old_srv.model_id,
            'model': {'name': getattr(old_srv.model, 'name', '')},
            'changed_on': old_srv.changed_on.strftime('%Y-%m-%d %H:%M:%S'),
        }
        deploy_history = []
        try:
            deploy_history = json.loads(old_srv.deploy_history) if old_srv.deploy_history else []
            if deploy_history is None:
                deploy_history = []
            if isinstance(deploy_history, dict):
                deploy_history = []
        except Exception:
            pass

        # Build new service and instances
        # 构建新服务和实例
        new_srv, instances, can_deploy = build_service(req)
        for item in instances:
            item.gen_random_name(service_id)
            item.created_by_fk = old_srv.created_by_fk
        new_srv.created_by_fk = old_srv.created_by_fk

        # Update service name if changed
        # 如果服务名称更改，则更新
        if new_srv.name != old_srv.name:
            # Check if new service name already exists
            # 检查新服务名称是否已存在
            if db.session.query(Service.id).filter_by(name=new_srv.name).first() is not None:
                raise BizError(
                    CommonErrorCode.INVALID_INPUT_PARAM,
                    f'服务名称 {new_srv.name} 已存在，请换个名称。',
                )
            old_srv.name = new_srv.name

        # Update service description if changed
        # 如果服务描述更改，则更新
        if new_srv.describe != old_srv.describe:
            old_srv.describe = new_srv.describe

        # Update advanced configurations if changed
        # 如果高级配置更改，则更新
        new_advanced = json.loads(new_srv.advanced) if new_srv.advanced else {}
        new_advanced = {} if new_advanced is None else new_advanced
        old_advanced = json.loads(old_srv.advanced) if old_srv.advanced else {}
        old_advanced = {} if old_advanced is None else old_advanced
        if new_advanced != old_advanced:
            old_srv.advanced = json.dumps(new_advanced, ensure_ascii=False)

        # Commented out: logic for non-admin users to retain auto-stop configuration
        # 已注释：非管理员用户保留自动停止配置的逻辑
        # # 非admin用户沿用变更前的自动停止配置
        # if not g.user.is_admin():
        #     old_auto_stop = old_advanced.get('auto_stop', {})
        #     enable = old_auto_stop.get('enable', None)
        #     if enable is False:
        #         new_advanced['auto_stop'] = old_auto_stop
        #         new_srv.advanced = json.dumps(new_advanced)

        # Update ratelimit configurations if changed
        # 如果限速配置更改，则更新
        new_ratelimit = json.loads(new_srv.ratelimit) if new_srv.ratelimit else {}
        new_ratelimit = {} if new_ratelimit is None else new_ratelimit
        old_ratelimit = json.loads(old_srv.ratelimit) if old_srv.ratelimit else {}
        old_ratelimit = {} if old_ratelimit is None else old_ratelimit
        if new_ratelimit != old_ratelimit:
            old_srv.ratelimit = json.dumps(new_ratelimit, ensure_ascii=False)

        old_configs = json.loads(old_srv.configs)

        # Helper function to check if service configuration has changed
        # 辅助函数，用于检查服务配置是否已更改
        def is_service_config_changed(old, new):
            if len(old) != len(new):
                logging.info(
                    f'number of service configs changed, old: {len(old)}, new: {len(new)}'
                )
                return True

            for old_item in old:
                found = False
                for new_item in new:
                    if old_item['model_version_id'] == new_item['model_version_id']:
                        found = True
                        if old_item['weight'] != new_item['weight']:
                            logging.info(
                                f'weight changed, old: {old_item["weight"]}, new:'
                                f' {new_item["weight"]}'
                            )
                            return True
                        if old_item['min_replicas'] != new_item['min_replicas']:
                            logging.info(
                                f'min_replicas changed, old: {old_item["min_replicas"]}, new:'
                                f' {new_item["min_replicas"]}'
                            )
                            return True
                        if old_item['specifications'] != new_item['specifications']:
                            logging.info(
                                f'specifications changed, old: {old_item["specifications"]}, new:'
                                f' {new_item["specifications"]}'
                            )
                            return True

                        old_pool_type = old_item.get('pool_type', EnumPoolType.public.value)
                        new_pool_type = new_item.get('pool_type', EnumPoolType.public.value)
                        if old_pool_type != new_pool_type:
                            return True

                        old_envs = old_item.get('envs', {})
                        new_envs = new_item.get('envs', {})
                        old_envs = {} if old_envs is None else old_envs
                        new_envs = {} if new_envs is None else new_envs
                        if old_envs != new_envs:
                            logging.info(f'envs changed, old: {old_envs}, new: {new_envs}')
                            return True

                if not found:
                    logging.info(f'config not found, old: {old_item["model_version_id"]}')
                    return True

            return False

        # Check if service configuration or health advanced configuration has changed
        # 检查服务配置或健康高级配置是否已更改
        is_changed = is_service_config_changed(old_configs, req.get('configs', []))
        if not is_changed:
            old_health = old_advanced.get('health', {})
            new_health = new_advanced.get('health', {})
            if old_health != new_health:
                logging.info(f'health changed, old: {old_health}, new: {new_health}')
                is_changed = True

        # If configurations have changed, update service and instances
        # 如果配置已更改，则更新服务和实例
        if is_changed:
            old_srv.model_id = new_srv.model_id
            old_srv.configs = new_srv.configs
            logging.info('service config changed')
            old_instances = (
                db.session.query(InferenceService)
                .filter(InferenceService.service_id == service_id)
                .all()
            )

            dict_old_instances_name = {}
            dict_new_instances_name = {}

            for i, item in enumerate(old_instances):
                dict_old_instances_name[item.model_version_id] = i
            for i, item in enumerate(instances):
                dict_new_instances_name[item.model_version_id] = i
                item.service_id = old_srv.id

            l_diff, l_intersection, r_intersection, r_diff = dict_diff(
                dict_old_instances_name, dict_new_instances_name
            )

            for k, v in l_intersection.items():
                new_instance_index = r_intersection[k]
                old_instance_index = v
                instances[new_instance_index].name = old_instances[old_instance_index].name

            l_diff = dict_to_array(l_diff)
            r_diff = dict_to_array(r_diff)

            while len(l_diff) > 0 and len(r_diff) > 0:
                instances[r_diff.pop()].name = old_instances[l_diff.pop()].name

            for item in l_diff:
                ins = old_instances[item]
                if is_private():
                    delete_old_service(ins.name, namespace=ins.namespace, region=g.region.key)

            for item in old_instances:
                db.session.delete(item)

            for item in instances:
                db.session.add(item)

        # If configurations have changed, update deploy history
        # 如果配置已更改，则更新部署历史记录
        if is_changed:
            # deploy_history: newest on left, oldest on right
            # deploy_history左侧是最新的，右侧是最早的
            deploy_history.insert(0, old_cfgs)
            if len(deploy_history) > 5:
                logging.warning(f'服务 {service_id} 部署历史记录超过5条，将删除最早的一条。')
                deploy_history = deploy_history[:5]
            old_srv.deploy_history = json.dumps(deploy_history)

        # Update labels
        # 更新labels
        new_labels = req.get('labels', [])
        new_labels = list({label.strip() for label in new_labels})
        db.session.query(Labels).filter(
            Labels.rel_type == 'service', Labels.rel_id == service_id
        ).delete()
        for label_name in new_labels:
            item = Labels()
            item.rel_id = service_id
            item.rel_type = 'service'
            item.name = label_name
            db.session.add(item)

        # Commit changes to the database
        # 提交更改到数据库
        db.session.commit()

        # If ModelArts and configurations changed, update ModelArts service
        # 如果是ModelArts且配置已更改，则更新ModelArts服务
        if is_modelarts() and is_changed:
            ma_client.update_service(
                service_id=old_srv.ma_id,
                desc=f'模型ID:{old_srv.model_id}, 服务ID:{service_id},用户ID:{g.user.id}',
                configs=self.build_ma_service_configs(service_id),
            )

            old_srv.status = ServiceStatus.deploying.value
            db.session.commit()

        # If private cloud, configurations changed, can upgrade, and can deploy, then upgrade service
        # 如果是私有云，配置已更改，可以升级，并且可以部署，则升级服务
        if (
            is_private()
            and is_changed
            and ServiceStatus.can_upgrade(old_srv.status)
            and can_deploy
        ):
            logging.info('service config changed, need to redeploy')
            self.upgrade_by_id(service_id)

    # Static method to get total fee for a service
    # 获取服务总费用的静态方法
    @staticmethod
    def _get_total_fee(service_name):
        total_amount = (
            db.session.query(sqlalchemy.func.sum(BillingOrder.amount))
            .filter(BillingOrder.bill_type == 10, BillingOrder.order_id.like(f'%{service_name}%'))
            .scalar()
        )
        if total_amount is None:
            total_amount = 0
        return f'{total_amount / 10000}元'

    # Static method to get scheme (http/https) based on host
    # 根据主机获取方案（http/https）的静态方法
    @staticmethod
    def get_scheme(host):
        ip_pattern = re.compile(r'^(\d{1,3}\.){3}\d{1,3}(:\d+)?$')
        if ip_pattern.match(host):
            return 'http'
        else:
            return 'https'

    # Get service detail API endpoint
    # 获取服务详细信息API端点
    @expose('/detail/<service_id>', methods=['GET'])
    @wrap_response
    def detail(self, service_id):
        # Query service by ID and user permissions
        # 按ID和用户权限查询服务
        query = db.session.query(Service).filter(Service.id == service_id)

        if g.user.is_admin:
            pass
        elif g.user.is_tenant_admin:
            query = query.outerjoin(MyUser, MyUser.id == Service.created_by_fk).filter(
                MyUser.tenant_id == g.user.tenant_id
            )
        else:
            query = query.filter(Service.created_by_fk == g.user.id)

        srv = query.first()
        if srv is None:
            raise BizError(1000, '服务不存在')

        # Query inference instances for the service
        # 查询服务的推理实例
        instances = (
            db.session.query(InferenceService)
            .filter(InferenceService.service_id == service_id)
            .all()
        )
        if instances is None or len(instances) == 0:
            raise BizError(1000, '服务实例不存在')

        # Query labels for the service
        # 查询服务的标签
        labels = (
            db.session.query(Labels.name)
            .filter(Labels.rel_type == 'service', Labels.rel_id == service_id)
            .all()
        )

        configs = []
        curl_example = ''
        # Get host scheme (http/https)
        # 获取主机方案（http/https）
        # host_scheme = request.scheme
        host_scheme = self.get_scheme(request.host)
        for ins in instances:
            envs = None
            with log_exception:
                envs = json.loads(ins.env)

            curl_cmd = ins.get_curl_cmd(f'{host_scheme}://{request.host}')
            curl_example = curl_cmd if len(curl_cmd) > 0 else curl_example
            resource_type = get_specifications_type(ins.specifications)

            configs.append(
                {
                    'node_id': ins.id,
                    'model_version_id': ins.model_version_id,
                    'model_version_name': ins.model_version_info.name,
                    'resource_type': resource_type if resource_type else '',
                    'weight': ins.weight,
                    'envs': envs,
                    'min_replicas': ins.min_replicas,
                    'specifications': ins.specifications,
                    'pool_type': ins.pool_type,
                }
            )
        # Get base model code from model version
        # 从模型版本获取基础模型代码
        model_version_id = configs[0].get('model_version_id')
        mv = db.session.query(Model_Version).filter(Model_Version.id == model_version_id).first()
        base_model_code = ''
        if mv:
            base_model_code = mv.base_model_code

        # Return service details
        # 返回服务详细信息
        return {
            'id': srv.id,
            'name': srv.name,
            'describe': srv.describe,
            'total_fee': self._get_total_fee(srv.name),
            'model': {
                'name': getattr(srv.model, 'name', ''),
                'id': getattr(srv.model, 'id', 0),
                'type': EnumModelType.get_name(
                    getattr(srv.model, 'type', EnumModelType.other.value)
                ),
                'source': EnumModelSource.get_name(getattr(srv.model, 'source', '')),
            },
            'model_id': srv.model_id,
            'labels': [item[0] for item in labels],
            'status': srv.status,
            'invocation_times': srv.invocation_times,
            'failed_times': srv.failed_times,
            # Request example / 请求示例
            # 'api_address': f'{request.host}/api/v1/infer/{srv.id}',
            'api_address': f'{request.host}/api/v1/infer/{srv.id}'
            if curl_example == ''
            else f'{request.host}/maas/v1/chat/completions',
            'req_example': 'https://gitee.com/wair-ac/taichu-docs/blob/master/%E7%94%A8%E6%88%B7%E6%93%8D%E4%BD%9C%E6%89%8B%E5%86%8C.md#551-%E6%8E%A8%E7%90%86%E6%9C%8D%E5%8A%A1api%E8%B0%83%E7%94%A8%E8%AF%B4%E6%98%8E',
            'curl_example': curl_example,
            'error_msg': srv.error_msg,
            'configs': configs,
            'advanced': json.loads(srv.advanced) if srv.advanced else {},
            'ratelimit': json.loads(srv.ratelimit) if srv.ratelimit else {},
            'acc_running_time': srv.get_acc_running_time(),
            'deploy_history': json.loads(srv.deploy_history) if srv.deploy_history else [],
            'created_by': str(srv.created_by),
            'created_on': srv.created_on.strftime('%Y-%m-%d %H:%M:%S'),
            'changed_on': srv.changed_on.strftime('%Y-%m-%d %H:%M:%S'),
            'base_model_code': base_model_code,
            'model_code': get_model_code(base_model_code, mv.source_id, service_id),
        }

    # Add service API endpoint
    # 添加服务API端点
    @expose('/add', methods=['POST'])
    @wrap_response
    def add_service(self):
        # Get request JSON data
        # 获取请求JSON数据
        req = request.json
        return self._add_service(req)

    # Internal method to add a service
    # 添加服务的内部方法
    def _add_service(self, req):
        source = req.get('source')
        pool_type = req['configs'][0].get('pool_type', EnumPoolType.public.value)
        region_key = g.region.key

        # Special handling for fine-tuning source
        # 微调来源的特殊处理
        if source == EnumModelSource.finetune.name:
            configs = req.get('configs')
            for cfg in configs:
                mv_id = cfg.get('model_version_id', None)
                if mv_id is None:
                    raise BizError(CommonErrorCode.INVALID_INPUT_PARAM, '模型版本ID必填')
                mv = db.session.query(Model_Version).filter_by(id=mv_id).first()
                if not mv:
                    raise BizError(
                        CommonErrorCode.INVALID_INPUT_PARAM,
                        f'指定的模型版本不存在或损坏：{mv_id}，请检查',
                    )
                deploy_cfgs = json.loads(mv.deploy_cfgs)
                cfg['envs'] = deploy_cfgs.get('envs', {})

        req['tenant_id'] = g.user.tenant_id
        req['region'] = region_key

        # Build service and instances
        # 构建服务和实例
        srv, instances, can_deploy = build_service(req)

        # Check user balance if public pool type and billing is needed
        # 如果是公共池类型且需要计费，则检查用户余额
        if pool_type == EnumPoolType.public.value and need_model_deploying_billing(
            g.user.username
        ):
            estimate_amount = get_model_deploying_estimate_amount(srv)
            check_user_balance(int(g.user.taichu), estimate_amount=estimate_amount)

        # Check if service name already exists
        # 检查服务名称是否已存在
        if db.session.query(Service.id).filter_by(name=srv.name).first() is not None:
            raise BizError(
                CommonErrorCode.INVALID_INPUT_PARAM, f'服务名称 {srv.name} 已存在，请换个名称。'
            )

        # Add service and instances to database
        # 将服务和实例添加到数据库
        db.session.add(srv)
        db.session.flush()
        for item in instances:
            item.service_id = srv.id
            item.gen_random_name()
            db.session.add(item)

        # Create labels
        # 创建标签
        labels = req.get('labels', [])
        labels = list({label.strip() for label in labels})
        for name in labels:
            label = Labels()
            label.name = name
            label.rel_id = srv.id
            label.rel_type = 'service'
            db.session.add(label)

        db.session.commit()

        # Start service deployment if possible
        # 如果可能，则启动服务部署
        if can_deploy:
            self.deploy_by_id(srv.id)
        else:
            try:
                # For Lora deployment, weights need to be merged first
                # lora部署时，需要先merge权重
                merge_by_service(srv, 'add_service')
            except Exception as e:
                srv.status = ServiceStatus.failed.value
                db.session.commit()
                raise e

        return {'id': srv.id}

    # Import service API endpoint
    # 导入服务API端点
    @expose('/import', methods=['POST'])
    @wrap_response
    def import_service(self):
        # Get request JSON data
        # 获取请求JSON数据
        req = request.json
        source_id = req.get('source_id', '')
        source = EnumModelSource.get_val(req.get('source'))
        name = req.get('name', 'serving-' + str(uuid.uuid4()))
        min_replicas = req.get('min_replicas', 1)
        auto_stop = req.get('auto_stop', None)
        specifications = req.get(
            'specifications', 'modelarts.npu.default' if is_modelarts() else 'taichu.gpu.xlarge'
        )

        # Validate service name
        # 验证服务名称
        if not strings.is_valid_name(name):
            raise BizError(
                CommonErrorCode.INVALID_INPUT_PARAM,
                f'服务名称 {name} 不符合规范，请换个名称。名称'
                '支持1-48位可见字符（含中文），可以包含字母、中文、数宇、中划线、下划线。',
            )
        if source_id is None:
            raise BizError(CommonErrorCode.INVALID_INPUT_PARAM, 'source_id不能为空')

        # Query model version by source and source_id
        # 按来源和source_id查询模型版本
        model_version = (
            db.session.query(Model_Version).filter_by(source=source, source_id=source_id).first()
        )
        if model_version is None:
            raise BizError(CommonErrorCode.INTERNAL_ERROR, f'模型不存在,source_id:{source_id}')
        req['model_id'] = model_version.model_id

        try:
            model_type = EnumModelType.get_name(model_version.model.type)
            cfgs = AUTO_LEARNING_CFGS.get(model_type)
            if cfgs and 'specifications' in cfgs:
                logging.info(f'found specifications in AUTO_LEARNING_CFGS, val:{cfgs}')
                spec = cfgs.get('specifications', specifications)
                if len(spec) > 0:
                    specifications = spec
        except Exception:
            pass

        configs = [
            {
                'min_replicas': min_replicas,  # Minimum number of replicas / 最小副本数量
                'max_replicas': min_replicas,  # Maximum number of replicas / 最大副本数量
                'resource_memory': '1G',
                'resource_cpu': 1,
                'resource_gpu': 0,
                'specifications': specifications,
                'weight': 100,
                'model_version_id': model_version.id,
            }
        ]
        auto_learning_cfgs = AUTO_LEARNING_CFGS.get(
            EnumModelType.get_name(model_version.model.type)
        )
        if auto_learning_cfgs is None:
            logging.error(f'模型类型不支持,source_id:{source_id}')
            raise BizError(CommonErrorCode.INTERNAL_ERROR)

        env_name = auto_learning_cfgs.get('env_name', 'MODEL_PATH')
        envs = auto_learning_cfgs.get('envs', {})
        if envs is None:
            envs = {}

        model_path = model_version.path
        if model_path and len(model_path) > 0:
            if model_path.startswith('{') and model_path.endswith('}'):
                model_path = json.loads(model_path)
                model_path = model_path.get('model_path', '')
            envs[env_name] = model_path
        configs[0]['envs'] = envs

        # Default service name - model name + model version
        # 服务默认名称 - 模型名称+模型版本号
        if not req.get('name'):
            req['name'] = f'{model_version.model.name}-{model_version.name}'

        req['configs'] = configs
        req['advanced'] = {'auto_stop': {'enable': True, 'seconds': 3600}}

        if auto_stop:
            req['advanced']['auto_stop'] = auto_stop

        return self._add_service(req)

    # Estimate deploying amount API endpoint
    # 估算部署金额API端点
    @expose('/estimate_deploying_amount', methods=['POST'])
    @wrap_response
    def estimate_deploying_amount(self):
        # Get request JSON data
        # 获取请求JSON数据
        req = request.json
        min_replicas = req.get('min_replicas', None)
        seconds = req.get('seconds', 0)
        specifications = req.get('specifications', None)
        # Validate required parameters
        # 验证必需参数
        if not min_replicas:
            raise BizError(CommonErrorCode.INTERNAL_ERROR, '实例数为空')
        if not specifications:
            raise BizError(CommonErrorCode.INTERNAL_ERROR, '规格配置为空')
        # Get model deploying price and calculate amount
        # 获取模型部署价格并计算金额
        price = get_model_deploying_price(specifications)
        amount = (
            price * math.ceil(seconds / 60) * min_replicas
        )  # Calculate amount: unit price (per minute) * minutes * instance count / 得到金额 单价（每分钟多少毫） * 分钟 * 实例数量
        return amount / 10000

    # Static method to start ModelArts service
    # 启动ModelArts服务的静态方法
    @staticmethod
    def ma_start(service_id):
        if not is_modelarts():
            return
        service = db.session.query(Service).filter_by(id=service_id).first()

        # Check if service exists
        # 检查服务是否存在
        if service.ma_id is None or len(service.ma_id) == 0:
            return Service_ModelView_Api.ma_deploy(service_id)

        try:
            predictor = ma_predictor(service.ma_id)
            predictor.update_service_config(status='running')
        except modelarts.exception.apig_exception.APIGException as e:
            err_code, err_msg = parse_apig_exception(e)

            if err_code == 'ModelArts.3502':  # Service does not exist / 服务不存在
                service.ma_id = None
            elif err_code == 'ModelArts.3962' and 'running to running' in err_msg:  # Service already running / 服务已经启动
                logging.info(f'service {service_id} already running')
            else:
                raise e

    # Static method to build ModelArts service configurations
    # 构建ModelArts服务配置的静态方法
    @staticmethod
    def build_ma_service_configs(service_id):
        service = db.session.query(Service).filter_by(id=service_id).first()
        instances = db.session.query(InferenceService).filter_by(service_id=service_id).all()

        if instances is None or len(instances) == 0:
            raise BizError(CommonErrorCode.INTERNAL_ERROR, '服务实例不存在')

        default_cfgs = TAICHU_ONLINE_INFER_SERVICE_DEFAULT_CONFIGS
        # model_instance = Model(ma_session, model_id=model_id)
        configs = []

        base_envs = {}
        # Set MindSpore log level to WARNING
        # mindspore日志打印级别设置为WARNING
        if service.model.framework == EnumFramework.mindspore.name:
            base_envs['GLOG_v'] = '2'

        for ins in instances:
            envs = {}
            if ins.env and len(ins.env) > 0:
                envs = json.loads(ins.env)
            if envs is None:
                envs = {}
            for k, v in base_envs.items():
                envs[k] = v

            envs['app_name'] = strings.filter_name(
                service.name, default='default_app_name'
            )  # Filter special characters / 过滤特殊字符
            envs['service_id'] = service.id
            envs['instance_id'] = ins.id
            # Inject filebeat component download address
            # 注入filebeat组件下载地址
            envs['filebeat_url'] = (
                f'https://wair.obs.{env.get_region_id()}.{env.get_external_domain_name()}/soft/filebeat-service-{env.get_env()}.tar.gz'
            )
            # Inject OBS configuration
            # 注入obs配置
            obs_config = os.getenv('OBS_CONFIG', '{}') or '{}'
            obs_config = json.loads(obs_config)

            envs['HW_OBS_AK'] = obs_config.get('access_key_id', '')
            envs['HW_OBS_SK'] = obs_config.get('secret_access_key', '')
            envs['HW_OBS_SERVER'] = obs_config.get('server', '')
            model_path = ins.get_model_path()
            if model_path and len(model_path) > 0:
                envs['MODEL_OBS_PATH'] = model_path

            # If auto-learning, inject code and model path environment variables
            # 如果是自动学习，注入代码和模型路径的环境变量
            if ins.model_version_info.source == EnumModelSource.auto_learning.value:
                code_obs_path = default_cfgs['CODE_OBS_PATH'].get(
                    EnumModelType.get_name(service.model.type)
                )
                if code_obs_path is not None:
                    envs['CODE_OBS_PATH'] = code_obs_path

            resource_cpu = float(ins.resource_cpu)
            resource_memory = ins.resource_memory.strip()
            if resource_memory.endswith('G'):
                resource_memory = float(resource_memory[:-1])
            resource_gpu = float(ins.resource_gpu)

            cfg = {
                'model_id': ins.model_version_info.ma_id,
                'weight': str(ins.weight),
                'specification': 'custom',
                'custom_spec': {
                    'cpu': resource_cpu,
                    'memory': int(resource_memory * 1000),
                    'gpu_p4': 0,
                    'ascend_a310': int(resource_gpu),
                },
                'instance_count': ins.min_replicas,
                'envs': envs,
            }
            configs.append(cfg)

        logging.info(f'service {service_id} configs: {configs}')
        return configs

    # Static method to deploy ModelArts service
    # 部署ModelArts服务的静态方法
    @staticmethod
    def ma_deploy(service_id):
        service = db.session.query(Service).filter_by(id=service_id).first()
        instances = db.session.query(InferenceService).filter_by(service_id=service_id).all()

        if instances is None or len(instances) == 0:
            raise BizError(CommonErrorCode.INTERNAL_ERROR, '服务实例不存在')

        # Check if service already exists
        # 检查服务是否已存在
        if service.ma_id is not None and len(service.ma_id) > 0:
            try:
                predictor = ma_predictor(service.ma_id)
                predictor.update_service_config(status='running')
                return predictor.service_id
            except modelarts.exception.apig_exception.APIGException as e:
                err_code, _ = parse_apig_exception(e)
                if err_code == 'ModelArts.3502':  # Service does not exist / 服务不存在
                    service.ma_id = None

        configs = Service_ModelView_Api.build_ma_service_configs(service_id)

        srv_name = f'taichustudio-{str(uuid.uuid4())}'
        if not env.is_online():
            srv_name = f'taichustudio-{env.get_env()}-{str(uuid.uuid4())}'

        user_id = 0
        try:
            user_id = g.user.id
        except Exception:
            pass

        service_id = ma_client.deploy_service(
            name=srv_name,
            desc=(
                f'模型名称:{service.model.name}, 模型版本ID:{instances[0].model_version_id},
                f'模型版本来源:{EnumModelSource.get_name(instances[0].model_version_info.source)},
                f' 服务ID:{service.id},用户ID:{user_id}'
            ),
            cluster_id=MODELARTS_PRIVATE_POOL_ID,
            configs=configs,
        )

        service.ma_id = service_id

        db.session.commit()
        return service_id

    # Start service API endpoint
    # 启动服务API端点
    @expose('/<service_id>/start', methods=['GET'])
    @wrap_response
    def start(self, service_id):
        # Query service by ID
        # 按ID查询服务
        service = db.session.query(Service).filter_by(id=service_id).first()
        if service is None:
            raise BizError(1000, f'服务不存在:{service_id}')

        # Check if service status allows starting
        # 检查服务状态是否允许启动
        if ServiceStatus.can_start(service.status) is False:
            raise BizError(1000, f'服务正在{service.status}，请耐心等待')

        # Check user balance if billing is needed
        # 如果需要计费，则检查用户余额
        if need_model_deploying_billing(g.user.username):
            estimate_amount = get_model_deploying_estimate_amount(service)
            check_user_balance(int(g.user.taichu), estimate_amount=estimate_amount)

        # Update service status to deploying
        # 更新服务状态为部署中
        now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
        res = (
            db.session.query(Service)
            .filter_by(id=service_id, status=ServiceStatus.stopped.value)
            .update(
                {
                    'status': ServiceStatus.deploying.value,
                    'running_start_time': None,
                    'deploy_time': now,
                    'error_msg': '',
                },
                synchronize_session=False,
            )
        )
        if res != 1:
            raise BizError(CommonErrorCode.NOT_FOUND, '启动异常，原因：服务不存在，或者状态异常')

        # Update inference service status to deploying
        # 更新推理服务状态为部署中
        db.session.query(InferenceService).filter_by(service_id=service_id).update(
            {'status': ServiceStatus.deploying.value, 'deploy_time': now, 'error_msg': ''},
            synchronize_session=False,
        )
        db.session.commit()

        try:
            # If ModelArts, start ModelArts service
            # 如果是ModelArts，则启动ModelArts服务
            if is_modelarts():
                self.ma_start(service_id)
            else:
                # Otherwise, deploy service for each instance
                # 否则，为每个实例部署服务
                instances = (
                    db.session.query(InferenceService.id).filter_by(service_id=service_id).all()
                )
                for item in instances:
                    # init_namespace(service.created_by_fk, db.session, region=service.region)
                    deploy_service(
                        item.id,
                        user_id=service.created_by_fk,
                        session=db.session,
                        region=service.region,
                    )
        except Exception as e:
            logging.error('启动推理服务发生错误', exc_info=True)

            error_msg = repr(e)
            error_msg = f'系统内部错误，原因：{error_msg}'
            # Update service and inference service status to failed
            # 更新服务和推理服务状态为失败
            res = (
                db.session.query(Service)
                .filter_by(id=service_id, status=ServiceStatus.deploying.value)
                .update(
                    {'status': ServiceStatus.failed.value, 'error_msg': error_msg},
                    synchronize_session=False,
                )
            )
            if res == 1:
                db.session.query(InferenceService).filter_by(service_id=service_id).update(
                    {'status': ServiceStatus.failed.value, 'error_msg': error_msg},
                    synchronize_session=False,
                )

            db.session.commit()
            raise e

    # Static method to upgrade service by ID
    # 按ID升级服务的静态方法
    @staticmethod
    def upgrade_by_id(service_id):
        if is_modelarts():
            return

        service = db.session.query(Service).filter_by(id=service_id).first()
        if service is None:
            raise BizError(1000, f'服务不存在:{service_id}')

        # Check if service status allows upgrading
        # 检查服务状态是否允许升级
        if ServiceStatus.can_upgrade(service.status) is False:
            raise BizError(1000, f'服务正在{service.status}，请耐心等待')

        instances = db.session.query(InferenceService).filter_by(service_id=service_id).all()
        try:
            for item in instances:
                upgrade_service(item.id, g.user.id, db.session)
        except Exception as e:
            service.status = ServiceStatus.failed.value
            db.session.commit()
            raise e

        # Update service and inference service status to upgrading
        # 更新服务和推理服务状态为升级中
        now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
        service.status = ServiceStatus.upgrading.value
        service.deploy_time = now
        service.error_msg = ''
        for item in instances:
            item.status = ServiceStatus.upgrading.value
            item.error_msg = ''
            item.deploy_time = now

        db.session.commit()

    # Static method to deploy service by ID
    # 按ID部署服务的静态方法
    @staticmethod
    def deploy_by_id(service_id, dbsession=db.session):
        service = dbsession.query(Service).filter_by(id=service_id).first()
        if service is None:
            raise BizError(1000, f'服务不存在:{service_id}')

        # Commented out: check if service is already deploying
        # 已注释：检查服务是否已在部署中
        # if service.status == ServiceStatus.deploying.value:
        #     raise BizError(1000, '服务正在部署中，请耐心等待')

        instances = dbsession.query(InferenceService).filter_by(service_id=service_id).all()
        # If ModelArts, deploy ModelArts service
        # 如果是ModelArts，则部署ModelArts服务
        if is_modelarts():
            Service_ModelView_Api.ma_deploy(service_id)

        # If private cloud, deploy service for each instance
        # 如果是私有云，则为每个实例部署服务
        if is_private():
            try:
                for item in instances:
                    deploy_service(
                        item.id,
                        user_id=service.created_by_fk,
                        session=dbsession,
                        region=service.region,
                    )
            except Exception as e:
                service.status = ServiceStatus.failed.value
                dbsession.commit()
                raise e

        # Update service and inference service status to deploying
        # 更新服务和推理服务状态为部署中
        now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
        service.status = ServiceStatus.deploying.value
        service.deploy_time = now
        service.error_msg = ''
        for item in instances:
            item.status = ServiceStatus.deploying.value
            item.error_msg = ''
            item.deploy_time = now

        dbsession.commit()

    # Stop service API endpoint
    # 停止服务API端点
    @expose('/<service_id>/stop', methods=['GET'])
    @wrap_response
    def stop(self, service_id):
        return self.stop_by_id(service_id)

    # Static method to stop service by ID
    # 按ID停止服务的静态方法
    @staticmethod
    def stop_by_id(service_id):
        service = db.session.query(Service).filter_by(id=service_id).first()
        if service is None:
            raise BizError(1000, f'服务不存在:{service_id}')
        logging.info(f'[stop_by_id] 开始停止服务, {service_id}, {service.status}')

        # Check if service status allows stopping
        # 检查服务状态是否允许停止
        if ServiceStatus.can_stop(service.status) is False:
            raise BizError(CommonErrorCode.NOT_ALLOWED, f'当前服务状态不允许停止:{service.status}')
        old_status = service.status

        # Update service status to stopped
        # 更新服务状态为已停止
        res = (
            db.session.query(Service)
            .filter(Service.id == service.id, Service.status == service.status)
            .update(
                {
                    'status': ServiceStatus.stopped.value,
                    'error_msg': '',
                    'deploy_time': '',
                    'acc_running_time': service.get_acc_running_time(),
                },
                synchronize_session=False,
            )
        )
        if res == 0:
            raise BizError(CommonErrorCode.BUSY, '系统繁忙,请稍后再试')

        # Update inference service instances status to stopped
        # 更新推理服务实例状态为已停止
        instances = (
            db.session.query(InferenceService)
            .filter(InferenceService.service_id == service_id)
            .all()
        )
        for item in instances:
            item.status = ServiceStatus.stopped.value
            item.error_msg = ''
        db.session.commit()

        # If private cloud and service was running, perform billing
        # 如果是私有云且服务正在运行，则执行计费
        if is_private():
            logging.info(f'service_id:{service_id}, old_status:{old_status}')
            if old_status == ServiceStatus.running.value:
                configs = json.loads(service.configs)
                pool_type = configs[0].get('pool_type', EnumPoolType.public.value)
                if pool_type == EnumPoolType.public.value:
                    model_deploying_billing(service, db.session)

        # Clean up resources
        # 清理资源
        if is_private():
            for ins in instances:
                # Check if multi-node multi-GPU is running
                # 判断是否是多机多卡运行
                expand = json.loads(ins.expand)
                if is_multi_node(resource_cfg=expand.get('specifications', {})):
                    delete_old_service(
                        ins.name, namespace=ins.namespace, region=g.region.key, multi_node=True
                    )
                else:
                    delete_old_service(
                        ins.name, namespace=ins.namespace, region=g.region.key, multi_node=False
                    )
                # Commented out: alternative delete old service call
                # 已注释：备用删除旧服务调用
                # delete_old_service(
                #     service_name=ins.name, namespace='service', region=g.region.key
                # )

        # If ModelArts, stop ModelArts service
        # 如果是ModelArts，则停止ModelArts服务
        if is_modelarts():
            try:
                if service.ma_id is not None and len(service.ma_id) > 0:
                    predictor_instance = ma_predictor(service.ma_id)
                    predictor_instance.update_service_config(status='stopped')
            except modelarts.exception.apig_exception.APIGException as e:
                err_code, _ = parse_apig_exception(e)

                if err_code == 'ModelArts.3502':  # If service does not exist, ignore error / 如果服务不存在，则忽略报错
                    logging.info(e)
                else:
                    raise e

    # Choose an inference instance based on weight
    # 按权重选择一个服务实例
    def _choose_instance(self, service_id):
        """
        按权重选择一个服务实例
        Choose an inference instance based on weight.
        """
        instances = db.session.query(InferenceService).filter_by(service_id=service_id).all()
        if instances is None:
            raise BizError(
                InferErrorCode.SERVICE_NOT_EXIST, f'服务实例不存在,service_id: {service_id}'
            )

        if len(instances) > 1:
            # Distribute traffic by weight
            # 按权重流量分流
            index = random.choices(
                list(range(len(instances))), weights=[ins.weight for ins in instances]
            )
            instance = instances[index[0]]
        else:
            instance = instances[0]

        return instance

    # Deprecated predict API endpoint
    # 已废弃的预测API端点
    @expose(
        '/<service_id>/<regx("infer.*?"):subpath>',
        methods=['GET', 'POST', 'PUT', 'DELETE', 'HEAD', 'OPTIONS', 'PATCH'],
    )
    @wrap_response
    def predict(self, service_id, subpath):
        raise BizError(CommonErrorCode.NOT_ALLOWED, '此接口已废弃，请使用taichu-infer-proxy')

    # Deprecated download API endpoint (for streaming traffic, mainly file download)
    # 已废弃的下载API端点（流式流量转发，主要用于文件下载）
    @expose(
        '/<service_id>/<regx("download.*?"):subpath>',
        methods=['GET', 'POST', 'PUT', 'DELETE', 'HEAD', 'OPTIONS', 'PATCH'],
    )
    def download(self, service_id, subpath):
        raise BizError(CommonErrorCode.NOT_ALLOWED, '此接口已废弃，请使用taichu-infer-proxy')

    # Static method for ModelArts prediction
    # ModelArts预测的静态方法
    @staticmethod
    def ma_predict(service_id, subpath, stream=False):
        service = db.session.query(Service.ma_id).filter_by(id=service_id).first()
        # Check if service exists
        # 检查服务是否存在
        if service is None or service.ma_id is None or len(service.ma_id) == 0:
            raise BizError(InferErrorCode.SERVICE_NOT_EXIST, '您访问到服务已被删除')

        query_str = request.headers.environ.get('QUERY_STRING')
        query_dic = None
        if len(query_str) > 0:
            query_dic = parse.parse_qs(query_str)

        p = ma_predictor(service.ma_id)
        access_address = p.predictor_instance.get_service_info()['access_address']
        path = subpath
        access_host = access_address.split('//')[1].split('/', 1)[0]
        access_request = parse.urljoin(
            '/' + access_address.split('//')[1].split('/', 1)[1] + '/', path.lstrip('/')
        )

        headers = {}
        predict_body = request.get_data()
        content_type = request.headers.get('Content-Type')
        if content_type:
            headers['Content-Type'] = content_type
        tmp_session = copy.copy(p.session)
        tmp_session.host = access_host

        try:
            if not stream:
                predict_result = ma_client.request(
                    tmp_session,
                    request.method,
                    access_request,
                    body=predict_body,
                    query=query_dic,
                    headers=headers,
                )

                return predict_result

            return ma_client.stream_request(
                tmp_session,
                request.method,
                access_request,
                body=predict_body,
                query=query_dic,
                headers=headers,
            )
        except requests.Timeout:
            raise BizError(ModelartsErrorCode.TIMEOUT, '推理服务繁忙，请稍后重试')

    # Static method to batch get service details
    # 批量获取服务详细信息的静态方法
    @staticmethod
    def batch_get_service_detail(ids):
        if len(ids) == 0:
            return []

        results = db.session.query(Service).filter(Service.id.in_(ids)).all()
        if results is None or len(results) == 0:
            return []

        return results

    # Delete service API endpoint
    # 删除服务API端点
    @expose('/<service_id>/delete', methods=['GET'])
    @wrap_response
    def delete(self, service_id):
        self.delete_by_id(service_id)

    # Static method to delete service by ID
    # 按ID删除服务的静态方法
    @staticmethod
    def delete_by_id(service_id):
        logging.info(f'开始删除service,{service_id}')
        q = db.session.query(Service.ma_id).filter(Service.id == service_id)

        if g.user.is_admin:
            pass
        elif g.user.is_tenant_admin:
            q = q.outerjoin(MyUser, MyUser.id == Service.created_by_fk).filter(
                MyUser.tenant_id == g.user.tenant_id
            )
        else:
            q = q.filter(Service.created_by_fk == g.user.id)

        srv = q.first()
        if srv is None:
            return

        # Check if the service is used by an experience model
        # 是否被体验页模型使用
        ex_model = (
            db.session.query(ExModel.id, ExModel.name)
            .filter(ExModel.service_id == service_id)
            .first()
        )
        if ex_model is not None:
            raise BizError(
                CommonErrorCode.NOT_ALLOWED,
                f'当前服务正在被体验体验使用，请先删除体验模型: {ex_model.name}',
            )

        # Consider billing when deleting service
        # 删除service考虑计费
        if is_private():
            service = db.session.query(Service).filter_by(id=service_id).first()
            old_status = service.status
            logging.info(f'service_id:{service_id}, old_status:{old_status}')
            if old_status == ServiceStatus.running.value:
                configs = json.loads(service.configs)
                pool_type = configs[0].get('pool_type', EnumPoolType.public.value)
                if pool_type == EnumPoolType.public.value:
                    model_deploying_billing(service, db.session)

        # Synchronously delete ModelArts service
        # 同步删除modelarts的服务
        if is_modelarts():
            if srv.ma_id is not None and len(srv.ma_id) > 0:
                try:
                    ma_client.delete_service(srv.ma_id)
                except modelarts.exception.apig_exception.APIGException as e:
                    err_code, _ = parse_apig_exception(e)
                    if err_code == 'ModelArts.3502':  # If service does not exist, ignore error / 如果服务不存在，则忽略报错
                        logging.info(e)
                    else:
                        raise e

        # Clean up related services
        # 清理相关服务
        instances = db.session.query(InferenceService).filter_by(service_id=service_id).all()

        if is_private():
            for ins in instances:
                # Check if multi-node multi-GPU is running
                # 判断是否是多机多卡运行
                expand = json.loads(ins.expand)
                if is_multi_node(resource_cfg=expand.get('specifications', {})):
                    delete_old_service(
                        ins.name, namespace=ins.namespace, region=g.region.key, multi_node=True
                    )
                else:
                    delete_old_service(
                        ins.name, namespace=ins.namespace, region=g.region.key, multi_node=False
                    )
                # Commented out: alternative delete old service call
                # 已注释：备用删除旧服务调用
                # delete_old_service(
                #     service_name=ins.name, namespace='service', region=g.region.key
                # )

        # Delete InferenceService, Service, and Labels records
        # 删除InferenceService、Service和Labels记录
        db.session.query(InferenceService).filter(
            InferenceService.service_id == service_id
        ).delete(synchronize_session=False)
        db.session.query(Service).filter(Service.id == service_id).delete(
            synchronize_session=False
        )
        db.session.query(Labels).filter(
            Labels.rel_type == 'service', Labels.rel_id == service_id
        ).delete(synchronize_session=False)
        db.session.commit()

    # Get deploy resource API endpoint
    # 获取部署资源API端点
    @expose('/deploy/resource', methods=['POST'])
    @wrap_response
    def get_resource(self):
        model_source = request.json.get('model_source', 'image')
        task_type = request.json.get('task_type')

        # Model source is from model training task
        # 模型来源是模型训练任务
        user_id = g.user.id
        region_key = g.region.key

        # Validate model source
        # 验证模型来源
        if model_source not in ['image', 'finetune']:
            raise BizError(1000, '模型来源必须来源为：image or finetune')
