# 导入json模块，用于处理JSON数据
# Import the json module for handling JSON data
import json
# 导入logging模块，用于记录日志
# Import the logging module for logging
import logging
# 导入os模块，用于与操作系统交互
# Import the os module for interacting with the operating system
import os
# 从concurrent.futures模块导入ThreadPoolExecutor，用于创建线程池
# Import ThreadPoolExecutor from the concurrent.futures module to create a thread pool
from concurrent.futures import ThreadPoolExecutor

# 从myapp.biz.dataset.aug导入gen_augmented_data函数，用于生成增强数据
# Import the gen_augmented_data function from myapp.biz.dataset.aug for generating augmented data
from myapp.biz.dataset.aug import gen_augmented_data
# 从myapp.configs.dataset_aug导入DATASET_AUG_CONFIGS，获取数据集增强配置
# Import DATASET_AUG_CONFIGS from myapp.configs.dataset_aug to get dataset augmentation configurations
from myapp.configs.dataset_aug import DATASET_AUG_CONFIGS
# 从myapp.const.base导入SharedMntDir，表示共享挂载目录
# Import SharedMntDir from myapp.const.base, representing the shared mount directory
from myapp.const.base import SharedMntDir
# 从myapp.const.dataset导入EnumDatasetStatus，表示数据集状态的枚举
# Import EnumDatasetStatus from myapp.const.dataset, representing the enumeration of dataset statuses
from myapp.const.dataset import EnumDatasetStatus
# 从myapp.const.dataset_aug导入数据集增强相关的常量和枚举
# Import dataset augmentation related constants and enumerations from myapp.const.dataset_aug
from myapp.const.dataset_aug import MsgKeyDatasetAugTaskCreated, MsgKeyDatasetAugTaskInfer, \
    MsgKeyDatasetAugTaskInferDone, EnumDatasetAugStatus
# 从myapp.consumer.app导入app和MsgContext
# Import app and MsgContext from myapp.consumer.app
from myapp.consumer.app import app, MsgContext
# 从myapp.models.model_dataset导入Dataset模型
# Import the Dataset model from myapp.models.model_dataset
from myapp.models.model_dataset import Dataset
# 从myapp.models.model_dataset_aug导入DatasetAug模型
# Import the DatasetAug model from myapp.models.model_dataset_aug
from myapp.models.model_dataset_aug import DatasetAug
# 从myapp.third.mq.producer导入transaction，用于事务性消息发送
# Import transaction from myapp.third.mq.producer for transactional message sending
from myapp.third.mq.producer import transaction
# 从myapp.utils.exception导入log_exception，用于记录异常日志
# Import log_exception from myapp.utils.exception for logging exceptions
from myapp.utils.exception import log_exception
# 从myapp.utils.sess导入session_scope，用于管理数据库会话
# Import session_scope from myapp.utils.sess to manage database sessions
from myapp.utils.sess import session_scope

# 获取一个名为__name__的logger实例
# Get a logger instance named __name__
log = logging.getLogger(__name__)


# 使用app.task装饰器注册一个任务，消息键为MsgKeyDatasetAugTaskCreated
# Register a task with the app.task decorator, with the message key MsgKeyDatasetAugTaskCreated
@app.task(MsgKeyDatasetAugTaskCreated)
def dataset_aug_created(ctx: MsgContext, message):
    # 解析JSON格式的消息
    # Parse the JSON formatted message
    data = json.loads(message)

    # 获取消息中的各项数据
    # Get various data from the message
    dataset_aug_id = data.get('id')
    dataset_id = data.get('dataset_id')
    username = data.get('username')
    src_dataset_version_id = data.get('src_dataset_version_id')
    src_dataset_path = data.get('src_dataset_path')
    src_dataset_num = data.get('src_dataset_num')
    dst_dataset_path = data.get('dst_dataset_path')
    dst_dataset_version_id = data.get('dst_dataset_version_id')
    gen_sample_num = data.get('gen_sample_num')
    created_by_fk = data.get('created_by_fk')

    # 获取最大批处理大小配置
    # Get the max batch size configuration
    max_batch_size = DATASET_AUG_CONFIGS.get('max_batch_size', 5)
    # 使用session_scope确保数据库会话的正确管理
    # Use session_scope to ensure proper management of the database session
    with session_scope() as session:
        # 查询待处理的数据增强任务
        # Query for the pending data augmentation task
        dataset_aug = session.query(DatasetAug).filter(DatasetAug.id == dataset_aug_id,
                                                       DatasetAug.status == EnumDatasetAugStatus.pending.value).first()

        # 如果任务不存在，则记录错误并返回
        # If the task does not exist, log an error and return
        if dataset_aug is None:
            log.error(f"数据增强任务{dataset_aug_id}不存在")

            return

    # 检查源数据集文件是否存在
    # Check if the source dataset file exists
    all_files = os.listdir(os.path.join(SharedMntDir, src_dataset_path))
    if all_files is None or len(all_files) == 0:
        log.error(f"数据增强任务{dataset_aug_id}源数据集文件不存在")
        # 如果源文件不存在，则更新任务状态为失败
        # If the source file does not exist, update the task status to failed
        with session_scope() as session:
            session.query(Dataset).filter(Dataset.id == dst_dataset_version_id,
                                          Dataset.status == EnumDatasetStatus.processing.value).update(
                {"status": EnumDatasetStatus.failed.value, "changed_by_fk": created_by_fk})

            session.query(DatasetAug).filter(DatasetAug.id == dataset_aug_id,
                                             DatasetAug.status == EnumDatasetAugStatus.pending.value).update(
                {"err_msg": f"源数据集目录{src_dataset_path}为空", 'status': EnumDatasetAugStatus.failed.value,
                 "changed_by_fk": created_by_fk})

        return

    # 获取源数据文件路径
    # Get the source data file path
    if all_files:
        data_json = all_files[0]
        data_json = os.path.join(SharedMntDir, src_dataset_path, data_json)
    # 获取文件名
    # Get the file name
    file_name = os.path.basename(data_json)
    # 创建目标数据集目录
    # Create the destination dataset directory
    os.makedirs(os.path.join(SharedMntDir, dst_dataset_path), exist_ok=True)
    # 构建目标数据集文件路径
    # Construct the destination dataset file path
    dst_dataset_file = os.path.join(SharedMntDir, dst_dataset_path, file_name)
    # 将源数据集内容写入目标文件
    # Write the content of the source dataset to the destination file
    with open(dst_dataset_file, 'w', encoding='utf-8') as dst_file:
        with open(data_json, 'r', encoding='utf-8') as file:
            for line in file:
                line = line.strip()
                if line is None or len(line) == 0:
                    continue

                dst_file.write(line + '\n')

    # 设置消息的哈希键
    # Set the hash key for the message
    key = str(dataset_aug_id)
    # 计算每个源样本需要生成的样本数
    # Calculate the number of samples to be generated for each source sample
    gen_data_num_per = gen_sample_num / src_dataset_num
    cnt = 0
    num = 0
    if gen_data_num_per < 1:
        gen_data_num_per = 1

    # 使用事务性消息生产者
    # Use a transactional message producer
    with transaction() as producer:

        msgs = []

        # 定义刷新消息的函数
        # Define a function to flush messages
        def flush_msgs():
            if len(msgs) > 0:
                producer.send_msg(MsgKeyDatasetAugTaskInfer,
                                  {"dataset_aug_id": dataset_aug_id, "dst_dataset_file": dst_dataset_file,
                                   "dst_dataset_version_id": dst_dataset_version_id,
                                   "samples": msgs, "created_by_fk": created_by_fk},
                                  hash_key=key)
                msgs.clear()

        # 定义批量发送推理消息的函数
        # Define a function to send inference messages in batches
        def batch_send_infer_msg(msg_data):
            if msg_data is None:
                return

            msgs.append(
                {"sample": msg_data['sample'], "gen_num": msg_data['gen_num'], "progress": msg_data['progress']})
            if len(msgs) >= DATASET_AUG_CONFIGS.get('concurrent_num', 1):
                flush_msgs()

        # 读取源数据文件，生成并发送推理任务消息
        # Read the source data file, generate and send inference task messages
        with open(data_json, 'r', encoding='utf-8') as file:
            for line in file:
                if num >= gen_sample_num:
                    break

                line = line.strip()
                if line is None or len(line) == 0:
                    continue

                gen_num = int(gen_data_num_per * (cnt + 1) - num)
                n = gen_num // max_batch_size
                m = gen_num % max_batch_size
                for i in range(n):
                    batch_send_infer_msg(
                        {"sample": line, "gen_num": max_batch_size, "progress": int(num * 100 / gen_sample_num)})
                    num = num + max_batch_size
                if m > 0:
                    batch_send_infer_msg(
                        {"sample": line, "gen_num": m, "progress": int(num * 100 / gen_sample_num)})
                    num = num + m

                cnt = cnt + 1

        # 刷新剩余的消息
        # Flush the remaining messages
        flush_msgs()
        # 发送推理完成消息
        # Send the inference done message
        producer.send_msg(MsgKeyDatasetAugTaskInferDone, {
            "dataset_aug_id": dataset_aug_id,
            "dst_dataset_file": dst_dataset_file,
            "src_dataset_num": src_dataset_num,
            "dst_dataset_version_id": dst_dataset_version_id,
            "created_by_fk": created_by_fk
        }, hash_key=key)

        # 刷新生产者，确保消息发送
        # Flush the producer to ensure messages are sent
        producer.flush()
        # 更新数据增强任务状态为处理中
        # Update the data augmentation task status to processing
        with session_scope() as session:
            session.query(DatasetAug).filter(DatasetAug.id == dataset_aug_id,
                                             DatasetAug.status == EnumDatasetAugStatus.pending.value).update(
                {"status": EnumDatasetAugStatus.processing.value, "progress": 0, "changed_by_fk": created_by_fk})
            session.commit()


@app.task(MsgKeyDatasetAugTaskInfer)
def dataset_aug_infer(ctx: MsgContext, message):
    # 解析JSON格式的消息
    # Parse the JSON formatted message
    data = json.loads(message)
    dataset_aug_id = data["dataset_aug_id"]
    dst_dataset_file = data["dst_dataset_file"]
    samples = data["samples"]
    created_by_fk = data["created_by_fk"]
    dst_dataset_version_id = data["dst_dataset_version_id"]

    total_gen_num = 0
    progress = 0
    for item in samples:
        total_gen_num += item.get('gen_num')
        progress = item.get('progress')

    # 检查数据增强任务是否存在且在处理中
    # Check if the data augmentation task exists and is in processing
    with session_scope() as session:
        dataset_aug = session.query(DatasetAug.id) \
            .filter(DatasetAug.id == dataset_aug_id,
                    DatasetAug.status.in_([EnumDatasetAugStatus.processing.value])).first()

        if dataset_aug is None:
            log.error(f"数据增强任务{dataset_aug_id}不存在")
            return

    # 定义获取增强数据的函数
    # Define a function to get augmented data
    def get_augmented_data(sample, gen_num):
        total = []
        while len(total) < gen_num:
            lst = gen_augmented_data(sample)
            if len(lst) == 0:
                log.error(f"数据增强任务{dataset_aug_id}生成样本失败,样本: {sample}")
                return total
            for s in lst:
                total.append(s)
        return total

    # 使用线程池并发生成增强数据
    # Use a thread pool to generate augmented data concurrently
    all_samples = []
    with ThreadPoolExecutor(max_workers=len(samples)) as executor:
        futures = []
        for item in samples:
            future = executor.submit(get_augmented_data, item.get('sample'), item.get('gen_num'))
            futures.append(future)
        for future in futures:
            ret = []
            with log_exception:
                ret = future.result()
            all_samples.extend(ret)

    # 截取所需数量的样本
    # Truncate the samples to the required number
    all_samples = all_samples[:total_gen_num]
    # 将生成的样本追加到目标文件中
    # Append the generated samples to the destination file
    with open(dst_dataset_file, "a") as f:
        for item in all_samples:
            f.write(item + '\n')

    # 更新进度
    # Update the progress
    with session_scope() as session:
        session.query(Dataset).filter(Dataset.id == dst_dataset_version_id,
                                      Dataset.status == EnumDatasetStatus.processing.value).update(
            {"progress": progress, "changed_by_fk": created_by_fk})

        session.query(DatasetAug).filter(DatasetAug.id == dataset_aug_id,
                                         DatasetAug.status == EnumDatasetAugStatus.processing.value).update(
            {"progress": progress, "changed_by_fk": created_by_fk})

        session.commit()


# 使用app.task装饰器注册一个任务，消息键为MsgKeyDatasetAugTaskInferDone
# Register a task with the app.task decorator, with the message key MsgKeyDatasetAugTaskInferDone
@app.task(MsgKeyDatasetAugTaskInferDone)
def dataset_aug_infer_done(ctx: MsgContext, message):
    # 解析JSON格式的消息
    # Parse the JSON formatted message
    data = json.loads(message)

    dataset_aug_id = data["dataset_aug_id"]
    dst_dataset_version_id = data["dst_dataset_version_id"]
    dst_dataset_file = data["dst_dataset_file"]
    created_by_fk = data["created_by_fk"]
    src_dataset_num = data["src_dataset_num"]

    # 统计目标文件中的样本数量
    # Count the number of samples in the destination file
    cnt = 0
    with open(dst_dataset_file, 'r', encoding='utf-8') as file:
        for line in file:
            line = line.strip()
            if line is None or len(line) == 0:
                continue

            cnt += 1

    # 更新任务状态为成功
    # Update the task status to succeed
    with session_scope() as session:
        session.query(Dataset).filter(Dataset.id == dst_dataset_version_id,
                                      Dataset.status == EnumDatasetStatus.processing.value).update(
            {"entries_num": str(cnt), "status": EnumDatasetStatus.succeed.value, "changed_by_fk": created_by_fk})

        session.query(DatasetAug).filter(DatasetAug.id == dataset_aug_id,
                                         DatasetAug.status == EnumDatasetAugStatus.processing.value).update(
            {"err_msg": "", "progress": 100, 'status': EnumDatasetAugStatus.succeed.value,
             "processed_num": cnt - src_dataset_num,
             "changed_by_fk": created_by_fk})
        session.commit()