# Import necessary modules
import os
import shutil

# Import flask global object
from flask import g
# Import constants for shared mount directory
from myapp.const.base import SharedMntDir
# Import dataset related constants
from myapp.const.dataset import (
    EnumDatasetDataType,
    EnumDatasetPublishStatus,
    EnumDatasetSource,
    EnumDatasetStatus,
    MsgKeyDatasetImporting,
    OriginPathPrefix,
)
# Import common error codes
from myapp.const.error import CommonErrorCode
# Import business error class
from myapp.const.response import BizError
# Import dataset models
from myapp.models.model_dataset import Dataset, DatasetV2
# Import user model
from myapp.models.model_user import MyUser
# Import message queue producer
from myapp.third.mq.producer import send_msg
# Import annotation utility
from myapp.utils.annotation import download_annotation_file


# Define a function to add a new dataset
def add_dataset(req, username, session):
    """
    Adds a new dataset.

    Args:
        req: The request object containing dataset information.
        username: The username of the user adding the dataset.
        session: The database session.

    Returns:
        A dictionary containing the ID of the newly created dataset.

    Raises:
        BizError: If the input parameters are invalid.
    """
    # Get dataset name from request
    name = req.get('name')
    # Get data type from request
    data_type = req.get('data_type')
    # Get label type from request
    label_type = req.get('label_type')
    # Get file list from request
    files = req.get('files')
    # Get dataset source from request, default to upload
    source = req.get('source', EnumDatasetSource.upload.value)

    # Check if name is provided
    if name is None or len(name) == 0:
        # Raise an error if name is missing
        raise BizError(CommonErrorCode.INVALID_INPUT_PARAM, 'name 必填')

    # Check if files are provided
    if files is None or len(files) == 0:
        # Raise an error if no files are selected
        raise BizError(CommonErrorCode.INVALID_INPUT_PARAM, '请选择至少1个文件')

    # Check if the data type is valid
    if EnumDatasetDataType.exist_key(data_type) is False:
        # Raise an error for unsupported data type
        raise BizError(CommonErrorCode.INVALID_INPUT_PARAM, f'不支持的数据类型: {data_type}')

    # Initialize a list to store original file paths
    origin_path = []
    # Iterate over the files
    for item in files:
        # Check if the file path has the correct prefix
        if item.startswith(OriginPathPrefix) is False:
            # Raise an error for incorrect file upload path
            raise BizError(CommonErrorCode.INVALID_INPUT_PARAM, f'文件上传路径错误: {item}')
        # 检查文件是否存在
        # Check if the file exists
        if os.path.isfile(os.path.join(SharedMntDir, item)) is False:
            # Raise an error if the file does not exist
            raise BizError(CommonErrorCode.INVALID_INPUT_PARAM, f'文件不存在: {item}')
        # Append the base name of the file to the origin_path list
        origin_path.append(os.path.basename(item))

    # Create a new DatasetV2 object
    dataset = DatasetV2()
    # Set the dataset name
    dataset.name = name
    # Set the data type
    dataset.data_type = data_type
    # Set the label type
    dataset.label_type = label_type
    # Set the source of the dataset
    dataset.source = source
    # Set the region of the dataset
    dataset.region = g.region.key

    # Add the dataset to the session
    session.add(dataset)
    # Flush the session to get the dataset ID
    session.flush()

    # Create a new Dataset object for versioning
    dataset_version = Dataset()
    # Set the region for the dataset version
    dataset_version.region = g.region.key
    # Set the dataset ID for the version
    dataset_version.dataset_id = dataset.id
    # Set the original path of the files
    dataset_version.origin_path = ','.join(origin_path)
    # Set the data type for the version
    dataset_version.data_type = data_type
    # Set the label type for the version
    dataset_version.label_type = label_type
    # Set the source for the version
    dataset_version.source = source
    # Set the version name
    dataset_version.name = 'v1'
    # Set the description for the version
    dataset_version.describe = ''
    # Set the status to processing
    dataset_version.status = EnumDatasetStatus.processing.value
    # Set the publish status to unpublished
    dataset_version.pub_status = EnumDatasetPublishStatus.unpublished.value
    # Add the dataset version to the session
    session.add(dataset_version)
    # Flush the session
    session.flush()

    # Send a message to the message queue for dataset importing
    with send_msg(
        MsgKeyDatasetImporting,
        {'id': dataset_version.id, 'username': username, 'x_request_id': g.x_request_id},
    ):
        # Commit the session
        session.commit()

    # Return the ID of the new dataset
    return {'id': dataset.id}


# Define a function to add a dataset with base annotation
def add_dataset_base_annotation(req, username, session):
    """
    Adds a dataset with base annotation.

    Args:
        req: The request object containing dataset information.
        username: The username of the user adding the dataset.
        session: The database session.

    Returns:
        A dictionary containing the ID of the newly created dataset.

    Raises:
        BizError: If the input parameters are invalid or an internal error occurs.
    """
    # Get dataset name from request
    name = req.get('name')
    # Get version ID from request
    version_id = req.get('version_id')

    # Check if name is provided
    if name is None or len(name) == 0:
        # Raise an error if name is missing
        raise BizError(CommonErrorCode.INVALID_INPUT_PARAM, 'name 必填')
    # Query the dataset by version ID
    ds = session.query(Dataset).filter(Dataset.id == version_id).first()
    # Check if the dataset is in processing status
    if ds.status == EnumDatasetStatus.processing.value:
        # 如果基础标注数据集状态为进行中则去拉取
        # If the base annotation dataset is in processing status, download the annotation file
        res = download_annotation_file(str(version_id), ds.annotation_task_id, ds.dataset_path)
        # Check if the download was successful
        if res == 1:
            # 下载成功
            # If download is successful, update the dataset status to succeed
            session.query(Dataset).filter(Dataset.id == ds.id).update(
                {'status': EnumDatasetStatus.succeed.value, 'progress': 100, 'labeled': 1}
            )
            # Commit the session
            session.commit()
        else:
            # 下载失败
            # If download fails, update the dataset status to failed
            session.query(Dataset).filter(Dataset.id == ds.id).update(
                {'status': EnumDatasetStatus.failed.value}
            )
            # Commit the session
            session.commit()
            # Raise an error for failed download
            raise BizError(CommonErrorCode.INTERNAL_ERROR, '下载数据标注文件失败')

    # Create a new DatasetV2 object
    dataset = DatasetV2()
    # Set the dataset name
    dataset.name = name
    # Set the data type from the original dataset
    dataset.data_type = ds.data_type
    # Set the label type from the original dataset
    dataset.label_type = ds.label_type
    # Set the source to annotation
    dataset.source = 'annotation'
    # Set the region of the dataset
    dataset.region = g.region.key

    # Add the dataset to the session
    session.add(dataset)
    # Flush the session to get the dataset ID
    session.flush()

    # Create a new Dataset object for versioning
    dataset_version = Dataset()
    # Set the region for the dataset version
    dataset_version.region = g.region.key
    # Set the dataset ID for the version
    dataset_version.dataset_id = dataset.id
    # Set the original path from the original dataset
    dataset_version.origin_path = ds.origin_path
    # Set the data type for the version
    dataset_version.data_type = ds.data_type
    # Set the label type for the version
    dataset_version.label_type = ds.label_type
    # Set the source to annotation
    dataset_version.source = 'annotation'
    # Set the version name
    dataset_version.name = 'v1'
    # Set the description for the version
    dataset_version.describe = ''
    # Set the number of entries from the original dataset
    dataset_version.entries_num = ds.entries_num
    # Set the status from the original dataset
    dataset_version.status = ds.status
    # Set the publish status to unpublished
    dataset_version.pub_status = EnumDatasetPublishStatus.unpublished.value
    # Set the progress from the original dataset
    dataset_version.progress = ds.progress
    # Set the labeled status from the original dataset
    dataset_version.labeled = ds.labeled
    # Add the dataset version to the session
    session.add(dataset_version)
    # Flush the session
    session.flush()
    # Set the dataset path
    dataset_version.dataset_path = username + '/dataset/' + str(dataset_version.id) + '/'
    # Commit the session
    session.commit()
    # Get the list of source files
    source_file_list = os.listdir(SharedMntDir + ds.dataset_path)
    # If there are no source files, return the dataset ID
    if len(source_file_list) == 0:
        return {'id': dataset.id}
    # 目标文件路径
    # Define the destination file path
    file_path = SharedMntDir + dataset_version.dataset_path
    # Create the destination directory if it doesn't exist
    if not os.path.exists(file_path):
        os.makedirs(file_path)
    # 复制文件
    # Copy the file to the destination directory
    shutil.copy(
        SharedMntDir + ds.dataset_path + os.path.basename(source_file_list[0]),
        file_path + os.path.basename(source_file_list[0]),
    )
    # Return the ID of the new dataset
    return {'id': dataset.id}


# Define a function to get version information
def get_version_info(vid, db_session, user):
    """
    Retrieves information about a specific dataset version.

    Args:
        vid: The ID of the dataset version.
        db_session: The database session.
        user: The user object.

    Returns:
        A dictionary containing the dataset version information.

    Raises:
        BizError: If the dataset version is not found.
    """
    # Create a query to get the dataset version and dataset information
    query = (
        db_session.query(Dataset, DatasetV2)
        .outerjoin(DatasetV2, DatasetV2.id == Dataset.dataset_id)
        .filter(Dataset.id == vid)
    )

    # Check user permissions
    if user.is_admin:
        # If the user is an admin, do nothing
        pass
    elif user.is_tenant_admin:
        # If the user is a tenant admin, filter by tenant ID
        query = query.outerjoin(MyUser, MyUser.id == Dataset.created_by_fk).filter(
            MyUser.tenant_id == user.tenant_id
        )
    else:
        # If the user is a regular user, filter by user ID
        query = query.filter(Dataset.created_by_fk == user.id)

    # Execute the query and get the first result
    res = query.first()
    # If no result is found, raise an error
    if res is None:
        raise BizError(CommonErrorCode.NOT_FOUND, '数据集版本不存在')

    # Unpack the result
    dataset_version, dataset = res
    # Get the dataset name
    dataset_name = dataset.name if dataset else None
    # Return the dataset version information
    return {
        'id': dataset_version.id,
        'name': dataset_version.name,
        'dataset_name': dataset_name,
        'source': dataset_version.source,
        'dataset_id': dataset_version.dataset_id,
        'pub_status': dataset_version.pub_status,
        'data_type': dataset_version.data_type,
        'label_type': dataset_version.label_type,
        'dataset_path': dataset_version.dataset_path,
        'status': dataset_version.status,
        'entries_num': dataset_version.entries_num,
        'progress': dataset_version.progress,
        'err_msg': dataset_version.err_msg,
        'created_on': dataset_version.created_on.strftime('%Y-%m-%d %H:%M:%S'),
        'changed_on': dataset_version.changed_on.strftime('%Y-%m-%d %H:%M:%S'),
    }