# Import standard Python libraries for JSON operations, OS-level interactions, and time functions.
import json
import os
import time

# Import Markup for rendering safe HTML, and 'g' for Flask's application context globals.
from flask import Markup, g
# Import the base Model class from Flask-AppBuilder.
from flask_appbuilder import Model
# Import SQLAlchemy components for defining the model schema and listening to ORM events.
from sqlalchemy import Column, Integer, String, Text, event

# Import custom helper classes and utility functions from the application.
from myapp.models.helpers import AuditMixinNullable
from myapp.utils.storage import bucketName, cube_bucket


# Get the metadata object from the base Model class. Metadata stores information about the database schema.
metadata = Model.metadata


# Define the Dataset class, which maps to the 'dataset' table.
# This model represents a single dataset and its comprehensive metadata.
class Dataset(Model, AuditMixinNullable):
    # Specifies the name of the database table for this model.
    __tablename__ = 'dataset'
    # Defines the primary key column for the table.
    id = Column(Integer, primary_key=True)
    # Defines a column for the name of the dataset.
    name = Column(String(200), nullable=True)  #
    # Defines a column for labels or tags associated with the dataset.
    label = Column(String(200), nullable=True)  #
    # Defines a column for a detailed description of the dataset.
    describe = Column(String(2000), nullable=True)  #

    # Defines a column for the source type of the dataset (e.g., open-source, asset, purchased).
    source_type = Column(String(200), nullable=True)  # 数据集来源，开源，资产，采购
    # Defines a column for the industry the dataset is relevant to.
    industry = Column(String(200), nullable=True)  # 行业，
    # Defines a column for a URL to an icon representing the dataset.
    icon = Column(String(2000), nullable=True)  # 图标
    # Defines a column for the data's domain (e.g., vision, audio, text).
    field = Column(String(200), nullable=True)  # 数据领域，视觉，听觉，文本
    # Defines a column for the intended usage of the dataset.
    usage = Column(String(200), nullable=True)  # 数据用途
    # Defines a column for the research direction related to the dataset.
    research = Column(String(200), nullable=True)  # 研究方向

    # Defines a column for the storage class, e.g., indicating if it's compressed.
    storage_class = Column(String(200), nullable=True, default='')  # 存储类型，压缩
    # Defines a column for the type of files in the dataset (e.g., png, wav).
    file_type = Column(String(200), nullable=True, default='')  # 文件类型，图片 png，音频

    # Defines a column for the year the dataset was created or is relevant to.
    years = Column(String(200), nullable=True)  # 年份
    # Defines a column for a related URL.
    url = Column(String(1000), nullable=True)  # 关联url
    # Defines a column for the persistent storage path of the dataset.
    path = Column(String(400), nullable=True)  # 本地的持久化路径
    # Defines a column for a public download URL.
    download_url = Column(String(1000), nullable=True)  # 下载地址
    # Defines a column for the number of records or entries in the dataset.
    entries_num = Column(String(200), nullable=True, default='')  # 记录数目
    # Defines a column for the total duration, relevant for audio or video datasets.
    duration = Column(String(200), nullable=True, default='')  # 时长
    # Defines a column for the price or cost of the dataset.
    price = Column(String(200), nullable=True, default='0')  # 价格
    # Defines a column for the deployment region.
    region = Column(
        String(100), nullable=False, default='default', server_default='default', comment='地区'
    )
    # Defines a column for the ID of the dataset in an external annotation system.
    annotation_id = Column(
        String(100),
        nullable=False,
        default='default',
        server_default='default',
        comment='标注数据集id',
    )
    # Defines a column for the ID of the annotation task associated with this dataset.
    annotation_task_id = Column(
        String(100),
        nullable=False,
        default='default',
        server_default='default',
        comment='标注任务数据集id',
    )
    # Defines a column for how the dataset was sourced (e.g., uploaded, labeled, processed).
    source = Column(
        String(100),
        nullable=True,
        default='',
        comment='数据集来源，upload，label，data_process，internal, flowback',
    )
    # Defines a column for the current status of the dataset.
    status = Column(
        String(200),
        nullable=True,
        default='uploading',
        comment='数据集状态，uploading、processing、succeed、failed',
    )
    # Defines a column for the storage size of the dataset.
    storage_size = Column(String(200), nullable=True, default='0', comment='文件大小')
    # Defines a column for storing extra information, likely as a JSON string.
    expand = Column(Text(65536), nullable=True, default='{}', comment='扩展信息')
    # 新增字段
    # Defines a column for the original filename, if uploaded as a single file.
    filename = Column(String(200), nullable=True, default='', comment='文件名')
    # Defines a column to link to the original dataset if this is a derived version.
    origin_id = Column(
        Integer, nullable=True, default=0, comment='原始数据集id，0表示本身为原始数据集'
    )
    # Defines a column for the general type of data.
    data_type = Column(
        String(200),
        nullable=True,
        default='other',
        comment='数据类型，image/audio/txt/multiple/other',
    )
    # Defines a column for the type of labeling task.
    label_type = Column(
        String(200),
        nullable=True,
        default='',
        comment='标注模式，mage_caption，image_classification，object_detection，image_division',
    )
    # Defines a column to indicate the labeling status.
    labeled = Column(Integer, nullable=True, default='0', comment='标注状态，0未标注，1已标注')
    # Defines a column for the format of the labels if they exist.
    labeled_format = Column(String(30), nullable=True, default='default', comment='已标注格式')
    # Defines a column for the path to the dataset in a format ready for training.
    dataset_path = Column(
        String(200), nullable=True, default='/', comment='可用于训练的数据集地址（标准格式）'
    )
    # Defines a column for the path where the dataset was unpacked.
    unpack_path = Column(String(200), nullable=True, default='/', comment='解压后的地址')
    # Defines a column for the path of the original uploaded file(s).
    origin_path = Column(String(200), nullable=True, default='/', comment='原始文件地址')
    # Defines a column for the method of upload.
    upload_type = Column(
        String(100),
        nullable=True,
        default='/',
        comment='上传方式，压缩包-package，多文件-files，目录-directory',
    )

    # Defines a column for the owner of the dataset.
    owner = Column(String(200), nullable=True)  #
    # Defines a column to associate this version with a parent dataset entity.
    dataset_id = Column(Integer, nullable=True, comment='关联的数据集ID')
    # Defines a column for the publication status of the dataset.
    pub_status = Column(
        String(200),
        nullable=False,
        default='',
        server_default='',
        comment='发布状态: unpublished(未发布)，publishing（发布中）published（发布完成）',
    )
    # Defines a column to store any error messages.
    err_msg = Column(String(200), nullable=False, server_default='', comment='错误信息')
    # Defines a column to track the progress of an import or processing job.
    progress = Column(Integer, nullable=False, default=0, comment='导入进度')

    # Defines the official string representation of a Dataset instance.
    def __repr__(self):
        return self.name

    # This property extracts a failure message from the 'expand' JSON field.
    @property
    def fail_msg(self):
        """
        Parses the 'expand' JSON field to retrieve the 'fail_msg' if it exists.
        Returns an empty string if not found or if 'expand' is null.
        """
        if self.expand is None:
            return ''
        cur_expand = json.loads(self.expand)
        if cur_expand.get('fail_msg'):
            return cur_expand.get('fail_msg')
        return ''

    # This property checks if the dataset was uploaded as a directory or multiple files.
    @property
    def is_directory(self):
        """
        Determines if the upload type represents a directory.
        Returns False if it's a 'package', True otherwise.
        """
        if self.upload_type == 'package':
            return False
        return True

    # This property renders the 'url' field as a clickable HTML link.
    @property
    def url_html(self):
        """
        Converts a newline-separated list of URLs into clickable HTML links.
        """
        if not self.url:
            return ''
        urls = self.url.split('\n')
        html = ''
        for url in urls:
            if url.strip():
                html += f'<a target=_blank href="{url.strip()}">{url.strip()}</a><br>'
        return Markup(f'<div>{html}</div>')

    # This property renders the 'icon' URL as an HTML image tag.
    @property
    def icon_html(self):
        """
        Creates an HTML <img> tag for the dataset's icon.
        Uses a default image if no icon is specified.
        """
        img_url = self.icon if self.icon else '/static/assets/images/dataset.png'
        url = (
            f'<a target=_blank href="{img_url}">'
            f'<img height="50px" width="50px" src="{img_url}">'
            '</a>'
        )
        return Markup(url)

    # This property renders the 'download_url' field as a clickable HTML link.
    @property
    def download_url_html(self):
        """
        Converts a newline-separated list of download URLs into clickable HTML links.
        """
        if not self.download_url:
            return ''
        urls = self.download_url.split('\n')
        html = ''
        for url in urls:
            if url.strip():
                html += f'<a target=_blank href="{url.strip()}">{url.strip()}</a><br>'
        return Markup(f'<div>{html}</div>')

    # This property serves as an alias for the 'owner' attribute.
    @property
    def username(self):
        return self.owner

    # This property checks if the storage backend is MinIO-compatible.
    @property
    def is_alluxio_storage(self):
        """
        Determines if the configured storage media is MinIO-compatible.
        Checks the 'STORAGE_MEDIA' environment variable.
        """
        st_type = os.getenv('STORAGE_MEDIA')
        return st_type in ['MINIO', 'minio', 'arm', 'ARM']

    # This method generates a unique path for unpacking the dataset.
    def gen_unpack_path(self):
        """
        Generates a unique storage path for the unpacked dataset files
        based on the owner's name and the current timestamp.
        """
        name = self.owner
        if not name:
            raise Exception('用户名不能为空')
        return f'{name}/unpack/{int(time.time())}/'

    # This method generates a unique path for storing the original uploaded dataset.
    def gen_origin_path(self):
        """
        Generates a unique storage path for the original uploaded file or directory.
        The path structure depends on whether it's a directory upload or a single file.
        """
        name = self.owner
        if not name:
            raise Exception('用户名不能为空')

        filename_is_null = self.filename in [None, '', 0, '0']
        
        # If it's a directory upload or has no filename, generate a directory path.
        if self.is_directory or filename_is_null:
            return f'{name}/origin/{int(time.time())}/'
        
        # If it's a file, preserve its extension.
        ext = os.path.splitext(self.filename)[-1].lower()
        return f'{name}/origin/{int(time.time())}{ext}'

    # This method constructs the full object storage (OBS) path for the dataset.
    def get_obs_path(self):
        """
        Constructs the object storage path for the dataset, handling different
        path formats and storage configurations.
        """
        # If the path already has an 'obs://' prefix, just strip it.
        if self.dataset_path.startswith('obs'):
            return self.dataset_path.replace('obs://', '')
        # Handle specific legacy cases.
        if self.id in [13, 14, 15] and not self.is_alluxio_storage:
            return f'{bucketName}/{self.dataset_path}'
        # Default case for the standard cube bucket.
        return f'{cube_bucket}/{self.dataset_path}'


# Define the DatasetV2 class, a newer, simplified version of the Dataset model.
class DatasetV2(Model, AuditMixinNullable):
    # Specifies the name of the database table for this model.
    __tablename__ = 'dataset_v2'
    # Defines the primary key column for the table.
    id = Column(Integer, primary_key=True, autoincrement=True)
    # Defines a column for the name of the dataset.
    name = Column(String(200), nullable=False, default='')
    # Defines a column for the general type of data.
    data_type = Column(
        String(200),
        nullable=False,
        default='',
        comment='数据类型，image/audio/txt/multiple/other',
    )
    # Defines a column for the type of labeling task.
    label_type = Column(
        String(200),
        nullable=False,
        default='',
        comment='标注模式，mage_caption，image_classification，object_detection，image_division',
    )
    # Defines a column for how the dataset was sourced.
    source = Column(
        String(100),
        nullable=False,
        default='',
        comment='数据集来源，upload，internal, flowback',
    )
    # Defines a column for a detailed description of the dataset.
    describe = Column(String(2000), nullable=False, default='')
    # Defines a column for the deployment region.
    region = Column(
        String(100), nullable=False, default='default', server_default='default', comment='地区'
    )


# This function is a SQLAlchemy event listener.
# It's triggered just before a new DatasetV2 record is inserted into the database.
@event.listens_for(DatasetV2, 'before_insert')
def before_insert(mapper, connection, target):
    """
    An event listener that automatically sets the 'region' for a new
    DatasetV2 object based on the current request's context.
    """
    try:
        # Try to get the region from Flask's global 'g' object.
        target.region = g.region.key
        print(f'GGGGGGG Setting default region to: {target.region}')
    except Exception:
        # If 'g' is not available (e.g., in a script), fall back to 'default'.
        target.region = 'default'
