# Import necessary modules
import logging
import os
import time

# Import the importer factory
from myapp.biz.dataset.importer import factory
# Import the shared mount directory constant
from myapp.const.base import SharedMntDir
# Import dataset status and merge path prefix constants
from myapp.const.dataset import EnumDatasetStatus, MergePathPrefix
# Import the Dataset model
from myapp.models.model_dataset import Dataset


# Define a function to merge datasets by their IDs
def merge_dataset_ids(dataset_ids, username, session):
    # Log the start of the merge process
    logging.info(f'[merge_dataset_ids] 开始融合数据dataset_ids: {dataset_ids}')
    # Query the database for successful datasets with the given IDs
    datasets = (
        session.query(Dataset)
        .filter(Dataset.id.in_(dataset_ids), Dataset.status == EnumDatasetStatus.succeed.value)
        .all()
    )
    # Initialize a list to store file paths
    files_path = []
    # Initialize data type and label type
    data_type = ''
    label_type = ''
    # Iterate over the datasets
    for dataset in datasets:
        # Skip datasets with no path or a root path
        if not dataset.dataset_path or dataset.dataset_path == '/':
            continue
        # If data type and label type are not set, set them from the first dataset
        if not data_type and not label_type:
            data_type = dataset.data_type
            label_type = dataset.label_type
        # If data type or label type is different, log an error and return
        else:
            if data_type != dataset.data_type or label_type != dataset.label_type:
                logging.info('[merge_dataset_ids] 数据格式不一致')
                return ''
        # If the data type is 'txt', find the first file in the directory and add it to the list
        if data_type == 'txt':
            directory = os.path.join(SharedMntDir, dataset.dataset_path)
            for root, _, files in os.walk(directory):
                if files:
                    files_path.append(os.path.join(root, files[0]))
                    break
        # If the data type is 'multiple', add the dataset path to the list
        elif data_type == 'multiple':
            files_path.append(os.path.join(SharedMntDir, dataset.dataset_path))
    # If there are no files to merge, return an empty string
    if len(files_path) == 0:
        return ''
    # Log the files to be merged
    logging.info(f'[merge_dataset_ids] 要融合的数据集: {files_path}')
    # Build an importer based on the data type and label type
    importer = factory.build_importer(data_type, label_type)
    # Create a unique dataset path
    dataset_path = f'{username}_dataset_{int(time.time())}/'

    # Create the output path
    output_path = os.path.join(username, MergePathPrefix, dataset_path)
    # Create the destination path
    dest_path = os.path.join(SharedMntDir, output_path)
    # Import the files to the destination path
    importer.import_to(files_path, dest_path)
    # Log the destination path
    logging.info(f'[merge_dataset_ids] dest_path: {dest_path}')
    # Return the output path
    return output_path