# Import the base Model class from Flask-AppBuilder.
from flask_appbuilder import Model
# Import column types from SQLAlchemy to define the database schema.
from sqlalchemy import Column, Integer, String, Text

# Import a custom audit mixin for tracking creation/modification times and users.
from myapp.models.helpers import AuditMixinNullable


# Get the metadata object from the base Flask-AppBuilder Model.
# This metadata is shared across all models and holds the schema information.
metadata = Model.metadata


# Define the DatasetAug class, which maps to the 'dataset_aug' table.
# This model represents a dataset augmentation task.
class DatasetAug(Model, AuditMixinNullable):
    # Specify the table name in the database.
    __tablename__ = 'dataset_aug'
    # Define the primary key column, which auto-increments.
    id = Column(Integer, primary_key=True, autoincrement=True)
    # The name of the augmentation task.
    name = Column(String(200), nullable=False, default='', comment='名称')

    # A field to store any error message if the task fails.
    err_msg = Column(String(200), nullable=False, server_default='', comment='错误信息')
    # The storage path of the resulting augmented dataset.
    dataset_path = Column(
        String(200), nullable=False, default='', server_default='', comment='数据集路径'
    )
    # The current status of the augmentation task (e.g., uploading, processing, succeeded, failed).
    status = Column(
        String(200),
        nullable=False,
        default='',
        comment='数据集状态，uploading、processing、succeed、failed',
    )
    # The progress of the augmentation process, as a percentage.
    progress = Column(Integer, nullable=False, default=0, comment='处理进度')
    # The ID of the parent dataset to which this augmentation belongs.
    dataset_id = Column(Integer, nullable=False, default=0, comment='数据集id')

    # The ID of the specific source dataset version that is being augmented.
    src_dataset_version_id = Column(Integer, nullable=False, default=0, comment='源数据集版本id')
    # The name of the source dataset version.
    src_dataset_version_name = Column(
        String(200), nullable=False, default='', comment='源数据集版本名称'
    )
    # The ID of the new destination dataset version that will be created by this augmentation.
    dst_dataset_version_id = Column(Integer, nullable=False, default=0, comment='目标数据集版本id')
    # The name of the destination dataset version.
    dst_dataset_version_name = Column(
        String(200), nullable=False, default='', comment='目标数据集版本名称'
    )
    # The configuration for the augmentation task, stored as a JSON string in a text field.
    configs = Column(Text, nullable=False, default='', comment='配置信息')
    # The number of data entries that have been processed so far.
    processed_num = Column(Integer, nullable=False, default=0, comment='已处理的数据量')
    # The geographical or logical region where the augmentation is performed.
    region = Column(String(100), nullable=False, default='default', server_default='default', comment='地区')
