# Import the json module for handling JSON data.
import json

# Import the base Model class from Flask-AppBuilder.
from flask_appbuilder import Model
# Import column types and relationship-defining constructs from SQLAlchemy.
from sqlalchemy import Column, ForeignKey, Integer, String, Text
# Import the relationship function to define relationships between models.
from sqlalchemy.orm import relationship

# Import specific configurations related to training.
from myapp.configs.training import PRETRAIN_CONFIG
# Import custom base models and helper functions.
from myapp.models.base import MyappModelBase
from myapp.models.helpers import AuditMixinNullable, parse_model_path
# Import related database models.
from myapp.models.model_job import Workflow
from myapp.models.model_team import Project
from myapp.models.model_train_model import Training_Model as Model_Base


# Get the metadata from the base Flask-AppBuilder Model.
# This is a common pattern for SQLAlchemy model definitions in Flask-AppBuilder.
metadata = Model.metadata


# Define the Model_Version class, which maps to the 'model_version' table.
# This table stores versions of a parent training model.
class Model_Version(Model, AuditMixinNullable, MyappModelBase):
    # Specify the table name in the database.
    __tablename__ = 'model_version'
    # Define the primary key column.
    id = Column(Integer, primary_key=True)

    # Foreign key to the 'project' table.
    project_id = Column(Integer, ForeignKey('project.id'))
    # Establish the ORM relationship to the Project model.
    project = relationship(Project, foreign_keys=[project_id])

    # Foreign key to the 'model' table (the parent Training_Model).
    model_id = Column(Integer, ForeignKey('model.id'))
    # Establish the ORM relationship to the parent Model_Base (Training_Model).
    model = relationship(Model_Base, foreign_keys=[model_id])

    # The version name (e.g., "1.0.0").
    name = Column(String(100), nullable=False)
    # The status of the model version (e.g., creating, success, failed), as an integer enum.
    status = Column(Integer, nullable=True, default=0)
    # A text field to store any error messages if the status is 'failed'.
    failed_msg = Column(Text(65536), nullable=True, default='')
    # The size of the model version files (e.g., "1.2GB").
    size = Column(String(100), nullable=True, default='0')
    # The storage path of the model version files. Can be a JSON string.
    path = Column(String(200), nullable=True, default='')
    # The ID of the container image used for this model version.
    image_id = Column(Integer, nullable=True)
    # The full name of the container image (e.g., "repository/image:tag").
    image = Column(String(200), nullable=True, default='')
    # The volume mount configuration for the model.
    volume_mount = Column(String(1000), nullable=True, default='')
    # The source of the model version (e.g., trained, uploaded), as an integer enum.
    source = Column(Integer, nullable=True, default=0)
    # The ID related to the source (e.g., the training job ID).
    source_id = Column(Integer, nullable=True)
    # A description of the model version.
    describe = Column(String(1000), nullable=True, default='')
    # The serving framework used (e.g., "tensorflow", "pytorch").
    serving_framework = Column(String(100), nullable=True, default='other')
    # The CPU architecture required (e.g., "x86_64", "arm64").
    cpu_type = Column(String(100), nullable=True, default='x86_64')
    # The type of device required for inference (e.g., "cpu", "gpu", "npu").
    device_type = Column(String(100), nullable=True, default='cpu')
    # The network port the model server listens on.
    port = Column(String(100), nullable=True, default='8080')
    # A JSON string defining the APIs exposed by the model server.
    apis = Column(Text(65536), nullable=True, default='{}')
    # The ID of the corresponding model version in Huawei's ModelArts platform.
    ma_id = Column(String(100), default='', comment='modelArts对应的模型版本id')
    # The working directory inside the container.
    working_dir = Column(String(100), default='', comment='工作目录')
    # The command used to start the model server.
    command = Column(String(1000), default='', comment='启动命令')
    # An ID for any associated license.
    license_id = Column(String(100), default='', comment='授权id')
    # A JSON string containing specific configurations for deployment.
    deploy_cfgs = Column(Text(65536), nullable=True, default='{}', comment='部署配置')
    # A flag indicating if the model is compressed.
    compress = Column(Integer, nullable=False, server_default='0', default=0, comment='是否压缩')
    # A flag indicating if the model supports LoRA (Low-Rank Adaptation).
    lora_adapter = Column(
        Integer, nullable=False, server_default='0', default=0, comment='是否支持lora_adapter'
    )
    # The status of the LoRA adapter (e.g., not loaded, loaded).
    adapter_status = Column(
        Integer, nullable=False, server_default='0', default=0, comment='lora_adapter状态'
    )
    # Information about the loaded LoRA adapter, stored as a JSON string.
    adapter_info = Column(Text(65536), nullable=True, default='{}', comment='lora_adapter信息')
    # The storage path for the merged model (base model + LoRA adapter).
    merge_path = Column(String(200), nullable=True, default='')
    # The geographical or logical region where the model version is stored/managed.
    region = Column(String(100), nullable=False, default='default', server_default='default', comment='地区')

    # Define the string representation of the model version object.
    def __repr__(self):
        # When an instance is printed, it will show its ID.
        return self.id

    # A method to parse and return the model path from the 'path' JSON field.
    def get_model_path(self):
        return parse_model_path(self.path)

    # A read-only property to determine if the model version can be compressed.
    @property
    def can_compress(self):
        # Lazily import the db session to avoid circular dependencies.
        from myapp.app import db

        # If already compressed, it cannot be compressed again.
        if self.compress == 1:
            return False
        # Find the source workflow (training job).
        workflow = db.session.query(Workflow).filter_by(id=self.source_id).first()
        if not workflow:
            return False
        # Check if the training job used LoRA. LoRA models cannot be compressed.
        lora = json.loads(workflow.algorithm).get('resource', {}).get('lora', False)
        if lora:
            return False
        # Otherwise, compression is possible.
        return True

    # A read-only property to determine if the model version supports loading a LoRA adapter.
    @property
    def can_lora_adapter(self):
        # If the lora_adapter flag is not set, it's not supported.
        if self.lora_adapter != 1:
            return False
        # Get the task type and resource type from the deployment configuration.
        task_type = json.loads(self.deploy_cfgs).get('task_type')
        resource_type = self.device_type
        # Look up the preset model configuration for LoRA.
        preset_model = PRETRAIN_CONFIG.get(task_type, {}).get(resource_type, {}).get('lora', {})
        # If there's no lora_adapter configuration for this preset, it's not supported.
        if not preset_model.get('lora_adapter'):
            return False
        # Otherwise, it is supported.
        return True

    # A read-only property to get the base model code for this version.
    @property
    def base_model_code(self):
        # Get the task type and resource type from the deployment configuration.
        task_type = json.loads(self.deploy_cfgs).get('task_type')
        resource_type = self.device_type
        # Look up the base model code from the preset training configuration.
        base_model_code = (
            PRETRAIN_CONFIG.get(task_type, {}).get(resource_type, {}).get('base_model_code', '')
        )
        return base_model_code

    # A read-only property to construct the full, unique model code for inference.
    @property
    def model_code(self):
        # If it supports LoRA, construct a composite code.
        if self.can_lora_adapter:
            # Get necessary parameters from the deployment configuration.
            task_type = json.loads(self.deploy_cfgs).get('task_type')
            resource_type = self.device_type
            base_model_code = (
                PRETRAIN_CONFIG.get(task_type, {})
                .get(resource_type, {})
                .get('base_model_code', '')
            )
            source_id = self.source_id
            infer_id = (
                PRETRAIN_CONFIG.get(task_type, {})
                .get(resource_type, {})
                .get('lora', {})
                .get('infer_id')
            )
            # The LoRA adapter name follows a specific convention.
            lora_name = f'lora_adapter_service_{self.id}'
            # The final code combines the base model, training job ID, inference instance, and LoRA name.
            return f'{base_model_code}:train-{source_id}:instance-{infer_id}:{lora_name}'
        # If not a LoRA model, return a generic message.
        return '仅部署后可调用'
