--- START OF FILE view_model_version.py ---

# Import standard library modules.
import json
import logging
import re

# Import Flask and Flask-AppBuilder components.
from flask import g, request
from flask_appbuilder.models.sqla.interface import SQLAInterface
# Import the modelarts SDK.
import modelarts
from modelarts.model import Model

# Import custom components from the application.
from myapp.apis.base import expose
from myapp.app import app, appbuilder, db
# Import specific configurations.
from myapp.configs.auto_learning import AUTO_LEARNING_CFGS
# Import constants for error codes and response handling.
from myapp.const.error import CommonErrorCode
from myapp.const.response import BizError, wrap_response
# Import service-level constants and enums.
from myapp.const.service import (
    EnumFramework,
    EnumModelDimension,
    EnumModelSource,
    EnumModelStatus,
    EnumModelType,
    defaultInt,
)
# Import database models.
from myapp.models.model_job import Workflow
from myapp.models.model_model_version import Model_Version
from myapp.models.model_serving import InferenceService
from myapp.models.model_train_model import Training_Model
# Import third-party utility functions.
from myapp.third.docker.regexp import has_tag, is_valid_image_name
from myapp.third.modelarts import ma_client
from myapp.third.modelarts.ma_client import get_ma_session
from myapp.third.modelarts.tools import parse_apig_exception
# Import utility functions from the application.
from myapp.utils import env
from myapp.utils.env import is_modelarts, is_private

# Import configurations and models from the current package.
from ..configs.training import PRETRAIN_CONFIG
from ..models.model_auto_learning import AutoLearning
from ..models.model_user import MyUser
from ..third.lora_adapter.api import load_lora_adapter
from ..utils.storage import get_path_size, size_str, storage_mgr
# Import the custom base API class.
from .baseApi import MyappModelRestApi
# Import utility functions from a sibling view module.
from .view_train_model import get_apis_by_type, get_image_by_type


# Get the application configuration.
conf = app.config


# This base class defines the common configuration for Model_Version views (both UI and API).
class Model_Version_ModelView_Base:
    # Set the data model to Model_Version using the SQLAlchemy interface.
    datamodel = SQLAInterface(Model_Version)
    # Define the base permissions for this view. Note that 'can_add' is intentionally omitted here.
    base_permissions = ['can_edit', 'can_delete', 'can_list', 'can_show']
    # Set the default sorting order for the list view (by id, descending).
    base_order = ('id', 'desc')
    # Specify the columns that can be used for sorting.
    order_columns = ['id']

    # Define the columns to be displayed in the list view.
    list_columns = [
        'id',
        'name',
        'size',
        'path',
        'image',
        'image_id',
        'volume_mount',
        'source',
        'source_id',
        'describe',
        'serving_framework',
        'cpu_type',
        'device_type',
        'port',
        'apis',
        'changed_on',
        'failed_msg',
        'status',
        'model_id',
        'source_id',
        'deploy_cfgs',
        'compress',
        'can_compress',
        'lora_adapter',
        'can_lora_adapter',
    ]

    # Define the columns that are searchable in the UI.
    search_columns = ['id', 'model_id', 'status']

    # Define the columns to be included in the add form.
    add_columns = [
        'project',
        'name',
        'size',
        'path',
        'image',
        'image_id',
        'volume_mount',
        'source',
        'source_id',
        'describe',
        'serving_framework',
        'cpu_type',
        'device_type',
        'port',
        'apis',
        'model_id',
        'ma_id',
        'working_dir',
        'command',
    ]
    # Use the same columns for the edit form as the add form.
    edit_columns = add_columns
    # Define the columns to be displayed in the show/detail view.
    show_columns = [
        'id',
        'name',
        'size',
        'path',
        'image',
        'image_id',
        'volume_mount',
        'source',
        'source_id',
        'describe',
        'serving_framework',
        'cpu_type',
        'device_type',
        'port',
        'apis',
        'changed_on',
        'failed_msg',
        'status',
        'model_id',
        'model',
        'source_id',
        'deploy_cfgs',
        'compress',
        'can_compress',
        'lora_adapter',
        'can_lora_adapter',
    ]

    # Hook to pre-process data before sending the list response.
    def pre_get_list(self, resp):
        # Iterate over each item in the response data.
        for data in resp['data']:
            # Convert enum integer values to their string names for better readability.
            data['source'] = EnumModelSource.get_name(data.get('source'))
            data['status'] = EnumModelStatus(data.get('status', defaultInt)).name
        return resp

    # Hook to pre-process data before sending the single item (get) response.
    def pre_get(self, resp):
        data = resp['data']

        # Convert enum integer values to their string names.
        resp['data']['source'] = EnumModelSource.get_name(data.get('source'))
        resp['data']['status'] = EnumModelStatus(data.get('status', defaultInt)).name
        return resp

    # Hook to perform actions before deleting an item.
    def pre_delete(self, item):
        print(item.id)
        # If the application is running in the ModelArts environment.
        if is_modelarts():
            # Find the parent training model to get its ModelArts ID.
            train_model = db.session.query(Training_Model).filter_by(id=item.model_id).first()
            model_id = train_model.ma_id
            print('id:', model_id)
            # Get a ModelArts session and delete the corresponding model instance in the cloud.
            ma_session = get_ma_session()
            model_instance = Model(ma_session, model_id=model_id)
            model_instance.delete_model()
            print('delete over')


# This class provides the REST API endpoints for the Model_Version model.
class Model_Version_ModelView_Api(Model_Version_ModelView_Base, MyappModelRestApi):  # noqa
    # Set the data model to Model_Version.
    datamodel = SQLAInterface(Model_Version)
    # Define the base route for this API.
    route_base = '/model_version_modelview/api'
    # Inherit base permissions from the parent API class.
    base_permissions = MyappModelRestApi.base_permissions
    # Add a custom permission specific to this view.
    base_permissions.append('can_add_version')

    # Expose a custom endpoint for listing model versions using POST for complex filtering.
    @expose('/list', methods=['POST'])
    # Use a custom decorator to wrap the response in a standard format.
    @wrap_response
    def list(self):
        # Get the JSON payload from the request.
        req = request.json
        # Extract pagination and filtering parameters.
        page = req.get('page', 0)
        page_size = req.get('page_size', 10)
        filters = req.get('filters', [])

        # Get the region from the global request context.
        region_key = g.region.key

        # Sanitize pagination values.
        if page < 0:
            page = 0
        if page_size < 0:
            page_size = 0
        if page_size > 100:
            page_size = 100

        # Start building the SQLAlchemy query.
        q = db.session.query(Model_Version)

        # Apply authorization filters based on user role.
        if g.user.is_admin:
            # Admins can see all model versions.
            pass
        elif g.user.is_tenant_admin:
            # Tenant admins can see all model versions within their tenant.
            q = q.outerjoin(MyUser, MyUser.id == Model_Version.created_by_fk).filter(
                MyUser.tenant_id == g.user.tenant_id
            )
        else:
            # Regular users can only see model versions they created.
            q = q.filter(Model_Version.created_by_fk == g.user.id)

        # Apply filters from the request payload.
        for f in filters:
            if f['opr'] == 'eq':
                q = q.filter(getattr(Model_Version, f['col']) == f['value'])
            elif f['opr'] == 'ct':
                q = q.filter(getattr(Model_Version, f['col']).like(f'%{f["value"]}%'))
            elif f['opr'] == 'in':
                q = q.filter(getattr(Model_Version, f['col']).in_(f['value']))
            else:
                # Raise an error for unsupported filter operators.
                raise BizError(1000, f"不支持的filter,opr:{f['opr']}")

        # Filter by region if a region is set.
        if region_key is not None:
            q = q.filter(Model_Version.region == region_key)

        # Get the total count for pagination.
        count = q.count()
        # Execute the query with ordering, offset, and limit.
        results = (
            q.order_by(Model_Version.created_on.desc())
            .offset(page * page_size)
            .limit(page_size)
            .all()
        )
        if results is None:
            return {'count': 0, 'data': []}

        # Format the query results into a list of dictionaries for the JSON response.
        ret = []

        for item in results:
            ret.append(
                {
                    'id': item.id,
                    'name': item.name,
                    'size': item.size,
                    'path': item.path,
                    'image': item.image,
                    'volume_mount': item.volume_mount,
                    'source': EnumModelSource.get_name(item.source),
                    'source_id': item.source_id,
                    'describe': item.describe,
                    'serving_framework': item.serving_framework,
                    'cpu_type': item.cpu_type,
                    'device_type': item.device_type,
                    'port': item.port,
                    'apis': item.apis,
                    'created_on': item.created_on.strftime('%Y-%m-%d %H:%M:%S'),
                    'changed_on': item.changed_on.strftime('%Y-%m-%d %H:%M:%S'),
                    'failed_msg': item.failed_msg,
                    'status': EnumModelStatus(item.status).name,
                    'model_id': item.model_id,
                    'deploy_cfgs': item.deploy_cfgs,
                    'compress': item.compress,
                    'can_compress': item.can_compress,
                    'lora_adapter': item.lora_adapter,
                    'can_lora_adapter': item.can_lora_adapter,
                    'region': item.region,
                }
            )

        return {'count': count, 'data': ret}

    # Expose an endpoint to delete a model version.
    @expose('/delete/<model_version_id>', methods=['GET'])
    @wrap_response
    def del_model(self, model_version_id):
        # Query for the model version, applying authorization filters.
        query = db.session.query(
            Model_Version.model_id, Model_Version.id, Model_Version.ma_id, Model_Version.source
        ).filter(Model_Version.id == model_version_id)
        if not g.user.is_admin:
            query = query.filter(Model_Version.created_by_fk == g.user.id)
        mv = query.first()
        if mv is None:
            raise BizError(CommonErrorCode.NOT_FOUND, '模型版本不存在')

        # 检查模型是否被部署成了推理服务
        # Check if the model version is currently being used by any inference services.
        instances = (
            db.session.query(InferenceService)
            .filter(InferenceService.model_version_id == model_version_id)
            .all()
        )
        if instances and len(instances) > 0:
            # If it is in use, prevent deletion and inform the user.
            name = ''
            for ins in instances:
                name += f' {ins.service.name}'
            raise BizError(
                1000, f'无法删除该模型版本，因为正在被推理服务使用,请先删除服务：{name}'
            )

        # If in ModelArts environment and the model has a ModelArts ID.
        if is_modelarts() and mv.ma_id and len(mv.ma_id) > 0:
            # Do not delete models from auto-learning source to avoid disrupting the platform.
            if mv.source != EnumModelSource.auto_learning.value:
                try:
                    logging.info(f'delete modelarts model: {mv.ma_id}')
                    # Call the ModelArts client to delete the model from the cloud.
                    ma_client.delete_model(mv.ma_id)
                except modelarts.exception.apig_exception.APIGException as e:
                    # Handle specific ModelArts exceptions, e.g., if the model is already deleted.
                    err_code, _ = parse_apig_exception(e)
                    if err_code != 'ModelArts.3001':  # 模型不存在
                        raise e

        # Query again to delete the actual ORM object.
        query = db.session.query(Model_Version).filter(Model_Version.id == model_version_id)
        if not g.user.is_admin:
            query = query.filter(Model_Version.created_by_fk == g.user.id)
        query.delete()

        # Commit the transaction to the database.
        db.session.commit()

    # Expose an endpoint to edit a model version's metadata.
    @expose('/edit/<model_version_id>', methods=['POST'])
    @wrap_response
    def edit(self, model_version_id):
        # Query for the model version, applying authorization filters.
        query = db.session.query(Model_Version).filter(Model_Version.id == model_version_id)
        if not g.user.is_admin:
            query = query.filter(Model_Version.created_by_fk == g.user.id)
        mv = query.first()
        if mv is None:
            raise BizError(CommonErrorCode.NOT_FOUND, '模型版本不存在')

        # Get the JSON payload from the request.
        req = request.json

        # Update the 'describe' field if provided.
        describe = req.get('describe')
        if describe is not None:
            mv.describe = describe

        # Update the 'apis' field if provided.
        apis = req.get('apis')
        if apis is not None:
            mv.apis = apis

        # Commit the changes to the database.
        db.session.commit()

    # Expose an endpoint to get the detailed information of a model version.
    @expose('/detail/<model_version_id>', methods=['GET'])
    @wrap_response
    def detail(self, model_version_id):
        # Query for the model version, applying authorization filters.
        query = db.session.query(Model_Version).filter(Model_Version.id == model_version_id)
        if not g.user.is_admin:
            query = query.filter(Model_Version.created_by_fk == g.user.id)
        mv = query.first()
        if mv is None:
            raise BizError(CommonErrorCode.NOT_FOUND, '模型版本不存在')

        # Find the related source task (e.g., training job) if it exists.
        rel_task = None
        if mv.source_id and mv.source_id > 0:
            if mv.source == EnumModelSource.auto_learning.value:
                # If the source is auto-learning, find the AutoLearning task.
                task = (
                    db.session.query(AutoLearning)
                    .filter(AutoLearning.training_job_id == mv.source_id)
                    .first()
                )
                if task:
                    rel_task = {
                        'id': task.id,
                        'name': task.task_name,
                    }
            else:
                # Otherwise, find the Workflow (job) task.
                wf = db.session.query(Workflow).filter(Workflow.id == mv.source_id).first()
                if wf:
                    rel_task = {
                        'id': wf.id,
                        'name': wf.display_name,
                    }

        # Generate a signed URL for the model's readme file if it exists.
        readme = mv.model.readme
        if readme:
            readme = storage_mgr.generate_signed_url(
                readme, content_type='application/octet-stream'
            )

        # Construct the detailed response payload.
        return {
            'id': mv.id,
            'name': mv.name,
            'describe': mv.describe,
            'apis': mv.apis,
            'model_id': mv.model_id,
            'model': {
                'id': mv.model.id,
                'name': mv.model.name,
                'describe': mv.model.describe,
                'dimension': EnumModelDimension.get_name(mv.model.dimension),
                'sub_dimension': mv.model.sub_dimension,
                'framework': EnumFramework.get_name(mv.model.framework),
                'type': EnumModelType.get_name(mv.model.type),
                'readme': readme,
            },
            'image': mv.image,
            'path': mv.path,
            'status': EnumModelStatus.get_name(mv.status),
            'source': EnumModelSource.get_name(mv.source),
            'source_id': mv.source_id,
            'rel_task': rel_task,
            'size': mv.size,
            'created_on': mv.created_on.strftime('%Y-%m-%d %H:%M:%S'),
            'changed_on': mv.changed_on.strftime('%Y-%m-%d %H:%M:%S'),
            'base_model_code': mv.base_model_code,
            'model_code': mv.model_code,
        }

    # Expose an endpoint to add a new model version.
    @expose('/add', methods=['POST'])
    @wrap_response
    def add_version(self):
        req = request.json
        model_id = req.get('model_id')
        version_name = req.get('name')
        # Validate required input.
        if version_name is None:
            raise BizError(1000, '版本名称必填')
        if model_id is None:
            raise BizError(1000, '模型id必填')

        # Validate the version name format.
        if not re.search('^\\d\\.\\d\\.\\d$', version_name):
            raise BizError(1000, f'版本号不合法,{version_name},请按照 0.0.1 格式填写')
        # 检查版本号是否重复
        # Check if the version name already exists for this model.
        if (
            db.session.query(Model_Version.id)
            .filter_by(model_id=model_id, name=version_name)
            .first()
        ):
            raise BizError(1000, f'版本号{version_name}已存在')
        source = EnumModelSource.get_val(req.get('source'))

        deploy_cfgs = None
        resource_type = None
        lora_adapter = 0
        # 如果是微调任务
        # Special handling if the model version is from a finetune task.
        if source == EnumModelSource.finetune.value:
            source_id = req.get('source_id')
            if not source_id:
                raise BizError(CommonErrorCode.INVALID_INPUT_PARAM, '训练任务ID必填')
            # 检验幂等
            # Check for idempotency: ensure this training job hasn't already been versioned.
            if (
                db.session.query(Model_Version.id)
                .filter_by(model_id=model_id, source_id=source_id)
                .first()
            ):
                raise BizError(1000, '您选中的训练任务已存在对应的模型版本')
            # 设置一些信息
            # Retrieve information from the source training task.
            task = db.session.query(AutoLearning).filter_by(training_job_id=source_id).first()
            if not task:
                raise BizError(CommonErrorCode.INVALID_INPUT_PARAM, '任务不存在')

            job = db.session.query(Workflow).filter_by(id=source_id).first()
            if job is None:
                raise BizError(CommonErrorCode.NOT_FOUND, '训练任务ID不存在')
            
            # Extract configuration details from the training job's algorithm definition.
            algorithm = json.loads(job.algorithm)
            resource_type = algorithm.get('resource', {}).get('resource_type')
            dimension = algorithm.get('pretrain_model', {}).get('dimension')
            lora = algorithm.get('resource', {}).get('lora')
            
            # Get the auto-learning configurations for the task type.
            all_cfgs = AUTO_LEARNING_CFGS.get(task.task_type)
            if lora:
                envs = all_cfgs.get(resource_type, {}).get('lora', {}).get('envs')
                specifications = (
                    all_cfgs.get(resource_type, {})
                    .get('lora', {})
                    .get('resources', {})
                    .get(task.region, {})
                    .get('specifications')
                )
            else:
                envs = all_cfgs.get(resource_type, {}).get('envs')
                specifications = (
                    all_cfgs.get(resource_type, {})
                    .get('resources', {})
                    .get(task.region, {})
                    .get('specifications')
                )
            # Automatically set the image, APIs, and path based on the task type.
            req['image'] = get_image_by_type(task.task_type, resource_type, lora)
            req['apis'] = get_apis_by_type(task.task_type)
            req['path'] = job.get_output_path()
            # If in a private environment, calculate the model size from the storage path.
            if is_private():
                req['size'] = size_str(get_path_size(req['path'], g.region.key))

            task_type = json.loads(job.labels).get('task_type')
            if task_type is None:
                raise BizError(CommonErrorCode.NOT_FOUND, f'训练任务 {source_id} 没有task_type')
            pool_type = algorithm.get('resource', {}).get('pool_type')
            # Construct the deployment configuration JSON.
            deploy_cfgs = {
                'pool_type': pool_type,
                'task_type': task_type,
                'envs': envs,
                'specifications': specifications,
            }
            # Set the lora_adapter flag if applicable.
            if algorithm.get('lora_adapter', False) and dimension == 'llm':
                lora_adapter = 1

        # Validate the image name if provided.
        image = req.get('image')
        if image is not None and len(image) > 0:
            image = image.strip()
            if not is_valid_image_name(image):
                raise BizError(CommonErrorCode.INVALID_INPUT_PARAM, f'无效的镜像地址,{image}')
            # Default to the 'latest' tag if none is provided.
            if not has_tag(image):
                image = image + ':latest'

        ma_id = ''
        status = EnumModelStatus.success.value
        # If in a ModelArts environment, create a corresponding model in the cloud.
        if is_modelarts():
            train_model = (
                db.session.query(Training_Model).filter_by(id=req.get('model_id')).first()
            )

            name = train_model.name
            version = req.get('name')

            if image is not None and len(image) > 0:
                # Construct the model name for ModelArts, including the environment prefix.
                ma_model_name = f'taichu-{name}'
                if not env.is_online():
                    ma_model_name = f'taichu-{env.get_env()}-{name}'
                # Call the ModelArts client to create the model.
                ma_id = ma_client.create_model(
                    name=ma_model_name,  # 模型名称
                    desc=f'由taichu平台创建,模型名称:{name},创建人:{g.user.id}',
                    model_version=version,  # 模型版本
                    source_location=image,  # 模型文件路径
                    model_type='Image',  # 模型类型，目前modelArts只开放image
                    install_type=['real-time'],  # 部署类型,默认为在线服务
                )
                # Set the initial status to 'importing' as ModelArts processes it.
                status = EnumModelStatus.importing.value

        # 构造版本数据
        # Create a new Model_Version instance and populate it with data from the request.
        version = Model_Version()
        version.model_id = model_id
        version.name = version_name
        version.project_id = req.get('project')
        version.size = req.get('size')
        version.path = json.dumps({'model_path': req.get('path')})
        version.image = image
        version.image_id = req.get('image_id')
        version.volume_mount = req.get('volume_mount')
        version.source = EnumModelSource.get_val(req.get('source'))
        version.source_id = req.get('source_id')
        version.describe = req.get('describe')
        version.serving_framework = req.get('serving_framework')
        version.cpu_type = req.get('cpu_type')
        version.port = req.get('port')
        version.apis = req.get('apis')
        version.working_dir = req.get('working_dir')
        version.command = req.get('command')
        version.ma_id = ma_id
        version.status = status
        if resource_type:
            version.device_type = resource_type
        if deploy_cfgs:
            version.deploy_cfgs = json.dumps(deploy_cfgs, ensure_ascii=False)
        version.lora_adapter = lora_adapter
        # Add the new version to the database session and commit.
        db.session.add(version)
        db.session.flush()
        db.session.commit()

        # Return the ID of the newly created version.
        return {'id': version.id}

    # Expose an endpoint to load a LoRA adapter for a specific model version.
    @expose('/load_lora_adapter/<model_version_id>', methods=['POST'])
    @wrap_response
    def load_lora_adapter(self, model_version_id):
        # Query for the model version, ensuring the current user is the creator.
        query = db.session.query(Model_Version).filter(Model_Version.id == model_version_id)
        query = query.filter(Model_Version.created_by_fk == g.user.id)
        mv = query.first()
        if mv is None:
            raise BizError(CommonErrorCode.NOT_FOUND, '模型版本不存在')
        # Check if the model version supports LoRA.
        if mv.lora_adapter == 0:
            raise BizError(CommonErrorCode.NOT_FOUND, '模型版本不支持lora adapter')
        # Check if the adapter is already loaded.
        if mv.adapter_status == 1:
            logging.info(0, '模型版本已挂载lora adapter')
            return {'lora_name': json.loads(mv.adapter_info).get('lora_name')}

        # Get the deployment configurations and resource type.
        task_type = json.loads(mv.deploy_cfgs).get('task_type')
        resource_type = mv.device_type
        # Look up the preset model configuration for LoRA adapters.
        preset_model = PRETRAIN_CONFIG.get(task_type, {}).get(resource_type, {}).get('lora', {})
        if not preset_model.get('lora_adapter'):
            raise BizError(CommonErrorCode.NOT_FOUND, '该模型不支持lora adapter')
        preset_model = preset_model.get('resources', {}).get(g.region.key, {})
        # Get the inference service URL and construct the LoRA adapter name and path.
        infer_url = preset_model.get('infer_url')
        lora_name = f'lora_adapter_service_{mv.id}'
        lora_path = json.loads(mv.path).get('model_path')
        # Call the external API to load the adapter into the inference service.
        load_lora_adapter(infer_url, lora_name, lora_path)
        infer_id = preset_model.get('infer_id')
        logging.info(f'load_lora_adapter[{lora_name}] for infer_id:{infer_id}')
        # Store the adapter information in the database.
        adapter_info = {'infer_id': infer_id, 'infer_url': infer_url, 'lora_name': lora_name}
        mv.adapter_status = 1
        mv.adapter_info = json.dumps(adapter_info, ensure_ascii=False)
        db.session.commit()
        # Return the name of the loaded adapter.
        return {'lora_name': lora_name}


# Register the Model Version API with the Flask-AppBuilder instance.
appbuilder.add_api(Model_Version_ModelView_Api)
