# 导入base64模块，用于进行Base64编码和解码
# Import the base64 module for Base64 encoding and decoding
import base64
# 导入concurrent.futures模块，用于异步执行任务
# Import the concurrent.futures module for asynchronous task execution
import concurrent.futures
# 导入glob模块，用于查找符合特定规则的文件路径名
# Import the glob module to find file paths matching a specified pattern
import glob
# 导入json模块，用于处理JSON数据
# Import the json module for handling JSON data
import json
# 导入logging模块，用于记录日志
# Import the logging module for logging
import logging
# 导入os模块，用于与操作系统交互
# Import the os module for interacting with the operating system
import os

# 从myapp.biz.evaluation导入评估相关的业务逻辑函数
# Import business logic functions related to evaluation from myapp.biz.evaluation
from myapp.biz.evaluation import calc_result_when_auto_mode, calculate_evaluation_task_result
# 从myapp.const.evaluation导入评估相关的常量和枚举
# Import evaluation-related constants and enumerations from myapp.const.evaluation
from myapp.const.evaluation import (
    EnumEvaluationDatasetStatus,
    EnumEvaluationTaskMode,
    EnumEvaluationTaskStatus,
    MsgKeyTaskCreated,
    MsgKeyTaskInfer,
    MsgKeyTaskInferDone,
    MsgKeyTaskRetry,
    MsgKeyTaskSubmit,
)
# 从myapp.consumer.app导入MsgContext和app实例
# Import MsgContext and the app instance from myapp.consumer.app
from myapp.consumer.app import MsgContext, app
# 从myapp.models.model_evaluation_task导入评估相关的数据库模型
# Import evaluation-related database models from myapp.models.model_evaluation_task
from myapp.models.model_evaluation_task import EvaluationDataset, EvaluationTask
# 导入maas_api模块，用于调用模型即服务（MaaS）的API
# Import the maas_api module to call the Model as a Service (MaaS) API
import myapp.third.maas.api as maas_api
# 从myapp.third.mq.producer导入消息发送函数
# Import the message sending function from myapp.third.mq.producer
from myapp.third.mq.producer import send_msg_and_flush
# 从myapp.utils.sess导入数据库会话管理相关的工具
# Import database session management related tools from myapp.utils.sess
from myapp.utils.sess import scan_table, session_scope


# 获取一个名为__name__的logger实例
# Get a logger instance named __name__
log = logging.getLogger(__name__)
# 从环境变量中获取评估的最大问答对数量，默认为100
# Get the maximum number of QA pairs for evaluation from environment variables, default is 100
EVALUATION_MAX_QA_NUM = int(os.environ.get('EVALUATION_MAX_QA_NUM', 100))
# 从环境变量中获取评估的最大并发数，默认为5
# Get the maximum number of concurrent evaluations from environment variables, default is 5
EVALUATION_MAX_CONCURRENT_NUM = int(os.environ.get('EVALUATION_MAX_CONCURRENT_NUM', 5))


# 定义一个函数，用于将图片文件编码为Base64字符串
# Define a function to encode an image file into a Base64 string
def encode_image(image_path):
    # 以二进制读取模式打开图片文件
    # Open the image file in binary read mode
    with open(image_path, 'rb') as image_file:
        # 读取文件内容，进行Base64编码，并解码为utf-8字符串
        # Read the file content, encode it in Base64, and decode it to a utf-8 string
        return base64.b64encode(image_file.read()).decode('utf-8')


# 定义处理任务创建事件的函数
# Define a function to handle task creation events
def handle_task_created(ctx: MsgContext, message):
    # 记录日志，包含上下文和消息数据
    # Log the context and message data
    log.info(f'ctx:{ctx}, data: {message}')

    # 解析JSON格式的消息
    # Parse the JSON formatted message
    data = json.loads(message)
    # 获取数据集路径
    # Get the dataset path
    dataset_path = data.get('dataset_path')
    # 获取评估模式
    # Get the evaluation mode
    mode = data.get('mode')
    # 获取任务ID
    # Get the task ID
    task_id = data.get('id')
    # 如果任务ID为空，则记录错误并返回
    # If the task ID is None, log an error and return
    if task_id is None:
        log.error('tak_id 为None')
        return

    # 获取模型信息
    # Get the model information
    models = data.get('models')
    # 获取创建者的外键
    # Get the foreign key of the creator
    created_by_fk = data.get('created_by_fk')

    # 使用数据库会话作用域
    # Use a database session scope
    with session_scope() as session:
        # 查询评估任务
        # Query the evaluation task
        res = session.query(EvaluationTask).filter(EvaluationTask.id == task_id).first()
        # 如果任务不存在，则记录错误并返回
        # If the task does not exist, log an error and return
        if res is None:
            log.error(f'任务不存在，task_id:{task_id}')
            return

        # 如果任务已在运行，则记录错误并返回
        # If the task is already running, log an error and return
        if res.status == EnumEvaluationTaskStatus.running.value:
            log.error(f'任务已运行，task_id:{task_id}')
            return

        # 如果数据集路径为空，则记录错误并抛出异常
        # If the dataset path is None, log an error and raise an exception
        if dataset_path is None:
            log.error(f'缺少数据集目录，task_id:{task_id}')
            raise Exception(f'缺少数据集目录，task_id:{task_id}')
    # 查找数据集目录下的所有jsonl文件
    # Find all jsonl files in the dataset directory
    json_files = glob.glob(os.path.join(dataset_path, '*.jsonl'))
    # 如果没有找到json文件，则记录错误并抛出异常
    # If no json files are found, log an error and raise an exception
    if len(json_files) == 0:
        log.error(f'数据集目录下没有json文件，task_id:{task_id}')
        raise Exception(f'数据集目录下没有json文件，task_id:{task_id}')
    # 如果模型类型是llm（大语言模型）
    # If the model type is llm (Large Language Model)
    if data.get('model_type') == 'llm':
        # 处理自然语言任务
        # Handle natural language processing tasks
        qa_hash = {}
        data_json = json_files[0]
        items = []
        # 打开数据集文件
        # Open the dataset file
        with open(data_json, encoding='utf-8') as file:
            # 逐行读取文件
            # Read the file line by line
            for line in file:
                # 如果处理的问答对数量达到上限，则跳出循环
                # If the number of processed QA pairs reaches the limit, break the loop
                if len(qa_hash) >= EVALUATION_MAX_QA_NUM:
                    log.info(f'最多支持前{EVALUATION_MAX_QA_NUM}条')
                    break

                line = line.strip()
                qa = json.loads(line)
                conversations = qa.get('conversations', [])
                question = ''
                answer = ''
                # 从对话中提取问题和答案
                # Extract the question and answer from the conversation
                for item in conversations:
                    if item.get('from') == 'question':
                        question = item.get('value', '')
                    if item.get('from') == 'answer':
                        answer = item.get('value', '')
                        break
                    if item.get('role') == 'user':
                        question = item.get('content', '')
                    if item.get('role') == 'assistant':
                        answer = item.get('content', '')
                        break
                # 计算问答对的哈希值
                # Calculate the hash value of the QA pair
                data_hash = EvaluationDataset.calc_hash(question, answer)
                # 如果问答对已存在，则跳过
                # If the QA pair already exists, skip it
                exist = qa_hash.get(data_hash)
                if exist:
                    log.info('发现重复的QA对，已跳过')
                    continue

                qa_hash[data_hash] = True
                items.append({'input': question, 'output': answer, 'detail': models})
                # 如果累积的问答对数量达到并发上限，则发送推理任务消息
                # If the accumulated number of QA pairs reaches the concurrent limit, send an inference task message
                if len(items) >= EVALUATION_MAX_CONCURRENT_NUM:
                    send_msg_and_flush(
                        MsgKeyTaskInfer,
                        {
                            'task_id': task_id,
                            'mode': mode,
                            'created_by_fk': created_by_fk,
                            'items': items,
                            'model_type': data.get('model_type'),
                        },
                        hash_key=str(task_id),
                    )
                    items = []
        # 发送剩余的推理任务
        # Send the remaining inference tasks
        if len(items) > 0:
            send_msg_and_flush(
                MsgKeyTaskInfer,
                {
                    'task_id': task_id,
                    'mode': mode,
                    'created_by_fk': created_by_fk,
                    'items': items,
                    'model_type': data.get('model_type'),
                },
                hash_key=str(task_id),
            )
    else:
        # 处理图文理解任务
        # Handle image-text understanding tasks
        qa_hash = {}
        data_json = json_files[0]
        items = []
        with open(data_json, encoding='utf-8') as file:
            all_content = file.read()
            json_obj = json.loads(all_content)
            for line in json_obj:
                if len(qa_hash) >= EVALUATION_MAX_QA_NUM:
                    log.info(f'最多支持前{EVALUATION_MAX_QA_NUM}条')
                    break
                if line.get('id') is not None:
                    question = line.get('conversations')[0].get('question')
                    answer = line.get('conversations')[0].get('answer')
                    pic_path = line.get('image')
                else:
                    question = line.get('messages')[0].get('content').replace('<image>', '')
                    answer = line.get('messages')[1].get('content')
                    pic_path = line.get('images')[0]
                data_hash = EvaluationDataset.calc_hash(pic_path, answer)
                exist = qa_hash.get(data_hash)
                if exist:
                    log.info('发现重复的QA对，已跳过')
                    continue
                qa_hash[data_hash] = True
                items.append(
                    {
                        'input': question,
                        'output': answer,
                        'detail': models,
                        'pic_path': dataset_path + pic_path,
                    }
                )
                if len(items) >= EVALUATION_MAX_CONCURRENT_NUM:
                    send_msg_and_flush(
                        MsgKeyTaskInfer,
                        {
                            'task_id': task_id,
                            'mode': mode,
                            'created_by_fk': created_by_fk,
                            'items': items,
                            'model_type': data.get('model_type'),
                        },
                        hash_key=str(task_id),
                    )
                    items = []
        if len(items) > 0:
            send_msg_and_flush(
                MsgKeyTaskInfer,
                {
                    'task_id': task_id,
                    'mode': mode,
                    'created_by_fk': created_by_fk,
                    'items': items,
                    'model_type': data.get('model_type'),
                },
                hash_key=str(task_id),
            )
    # 发送推理完成消息
    # Send the inference done message
    send_msg_and_flush(
        MsgKeyTaskInferDone,
        {
            'task_id': task_id,
            'mode': mode,
            'changed_by_fk': created_by_fk,
            'model_type': data.get('model_type'),
        },
        hash_key=str(task_id),
    )
    # 更新任务状态为运行中
    # Update the task status to running
    with session_scope() as session:
        session.query(EvaluationTask).filter(
            EvaluationTask.id == task_id,
            EvaluationTask.status == EnumEvaluationTaskStatus.pending.value,
        ).update(
            {
                'status': EnumEvaluationTaskStatus.running.value,
                'total_cnt': len(qa_hash),
                'changed_by_fk': created_by_fk,
            }
        )
        session.commit()


# 注册评估任务创建的消费者
# Register a consumer for evaluation task creation
@app.task(MsgKeyTaskCreated)
def evaluation_task_created(ctx: MsgContext, message):
    log.info(f'ctx:{ctx}, data: {message}')

    data = json.loads(message)
    task_id = data.get('id')
    if task_id is None:
        log.error('tak_id 为None')
        return

    try:
        # 处理任务创建逻辑
        # Handle the task creation logic
        handle_task_created(ctx, message)
    except Exception as e:
        # 如果出错，则更新任务状态为失败
        # If an error occurs, update the task status to failed
        log.error('处理消息出错', exc_info=True)
        with session_scope() as session:
            session.query(EvaluationTask).filter(
                EvaluationTask.id == task_id,
            ).update({'status': EnumEvaluationTaskStatus.failed.value, 'err_msg': str(e)})
            session.commit()


# 注册评估任务提交的消费者
# Register a consumer for evaluation task submission
@app.task(MsgKeyTaskSubmit)
def evaluation_task_submit(ctx: MsgContext, message):
    msg = json.loads(message)
    if not msg:
        logging.error('msg为空')
        return
    task_id = msg.get('task_id')
    if not task_id:
        logging.error('task_id为空')
        return
    with session_scope() as dbsession:
        task = dbsession.query(EvaluationTask).filter_by(id=task_id).first()
        if not task:
            logging.error('评测任务不存在')
            return
        if task.status != EnumEvaluationTaskStatus.submit.value:
            logging.error('评测任务状态不是submit,不进行结果汇总')
            return
        try:
            # 计算评估任务结果
            # Calculate the evaluation task result
            calculate_evaluation_task_result(task, dbsession)
        except Exception:
            log.error('汇总结果报错', exc_info=True)


# 注册评估任务重试的消费者
# Register a consumer for evaluation task retry
@app.task(MsgKeyTaskRetry)
def evaluation_task_retry(ctx: MsgContext, message):
    msg = json.loads(message)
    task_id = msg.get('id')
    mode = msg.get('mode')
    created_by_fk = msg.get('created_by_fk')

    # 更新任务状态为运行中
    # Update the task status to running
    with session_scope() as session:
        session.query(EvaluationTask).filter(
            EvaluationTask.id == task_id,
            EvaluationTask.status == EnumEvaluationTaskStatus.pending.value,
        ).update(
            {'status': EnumEvaluationTaskStatus.running.value, 'changed_by_fk': created_by_fk}
        )
        session.commit()

    # 扫描失败的评估数据集并重试
    # Scan for failed evaluation datasets and retry
    items = []
    for datasets, _ in scan_table(
        EvaluationDataset,
        filters=[
            EvaluationDataset.task_id == task_id,
            EvaluationDataset.status == EnumEvaluationDatasetStatus.failed.value,
        ],
    ):
        for dataset in datasets:
            items.append(
                {
                    'input': dataset.input,
                    'output': dataset.output,
                    'detail': json.loads(dataset.detail),
                }
            )

            if len(items) >= EVALUATION_MAX_CONCURRENT_NUM:
                send_msg_and_flush(
                    MsgKeyTaskInfer,
                    {
                        'task_id': dataset.task_id,
                        'created_by_fk': dataset.created_by_fk,
                        'items': items,
                        'is_retry': True,
                    },
                    hash_key=str(task_id),
                )
                items = []

    if len(items) > 0:
        send_msg_and_flush(
            MsgKeyTaskInfer,
            {
                'task_id': task_id,
                'created_by_fk': created_by_fk,
                'items': items,
                'is_retry': True,
            },
            hash_key=str(task_id),
        )

    # 发送推理完成消息
    # Send the inference done message
    send_msg_and_flush(
        MsgKeyTaskInferDone,
        {
            'task_id': task_id,
            'mode': mode,
            'changed_by_fk': created_by_fk,
        },
        hash_key=str(task_id),
    )


# 注册评估任务推理的消费者
# Register a consumer for evaluation task inference
@app.task(MsgKeyTaskInfer)
def evaluation_task_infer(ctx: MsgContext, message):
    msg = json.loads(message)
    task_id = msg.get('task_id')
    is_retry = msg.get('is_retry', False)
    created_by_fk = msg.get('created_by_fk')
    model_type = msg.get('model_type')
    items = msg.get('items', [])
    if task_id is None:
        log.error('tak_id 为None')
        return

    # 检查评估任务是否存在
    # Check if the evaluation task exists
    with session_scope() as session:
        res = session.query(EvaluationTask.id).filter(EvaluationTask.id == task_id).first()
        if res is None:
            log.error('推理任务已被删除')
            return

    data = []
    for item in items:
        input_val = item.get('input')
        output_val = item.get('output')
        hash_val = EvaluationDataset.calc_hash(input_val, output_val)
        detail = item.get('detail')
        pic_path = item.get('pic_path')

        if not is_retry:
            with session_scope() as session:
                res = (
                    session.query(EvaluationDataset.id)
                    .filter(
                        EvaluationDataset.task_id == task_id, EvaluationDataset.hash == hash_val
                    )
                    .first()
                )
                if res:
                    log.error('推理数据已存在')
                    continue

        data.append(
            {
                'input': input_val,
                'output': output_val,
                'hash': hash_val,
                'detail': detail,
                'status': EnumEvaluationDatasetStatus.un_evaluated.value,
                'pic_path': pic_path,
            }
        )

    if len(data) == 0:
        log.error('推理数据为空')
        return

    # 调用推理服务
    # Call the inference service
    models_num = len(data[0].get('detail'))
    err_msg = None
    with concurrent.futures.ThreadPoolExecutor(max_workers=models_num * len(data)) as executor:
        all_futures = []
        for item in data:
            detail = item.get('detail')
            for model in detail:
                o = model.get('output')
                if o and len(o) > 0:
                    continue
                if model_type == 'llm':
                    all_futures.append(
                        {'future': executor.submit(infer, item['input'], model), 'data': model}
                    )
                else:
                    message_content = [
                        {'type': 'text', 'text': item['input']},
                        {
                            'type': 'image_url',
                            'image_url': {
                                'url': f"data:image/jpeg;base64,{encode_image(item['pic_path'])}"
                            },
                        },
                    ]
                    all_futures.append(
                        {'future': executor.submit(infer, message_content, model), 'data': model}
                    )
        for item in all_futures:
            d = item.get('data')
            future = item.get('future')
            try:
                out = future.result()
                d['output'] = out
            except Exception as e:
                log.error('调用推理服务出错', exc_info=True)
                err_msg = str(e)
    if err_msg is not None:
        with session_scope() as session:
            session.query(EvaluationTask).filter(
                EvaluationTask.id == task_id,
            ).update({'err_msg': err_msg})
            session.commit()

    for item in data:
        detail = item.get('detail')
        for model in detail:
            if model.get('output') is None:
                item['status'] = EnumEvaluationDatasetStatus.failed.value
    if is_retry:
        with session_scope() as session:
            for item in data:
                hash_val = item.get('hash')
                detail = item.get('detail')
                status = item.get('status')
                session.query(EvaluationDataset).filter(
                    EvaluationDataset.task_id == task_id,
                    EvaluationDataset.hash == hash_val,
                    EvaluationDataset.status == EnumEvaluationDatasetStatus.failed.value,
                ).update(
                    {
                        'detail': json.dumps(detail, ensure_ascii=False),
                        'status': status,
                        'changed_by_fk': created_by_fk,
                    }
                )
                session.commit()
            return

    with session_scope() as session:
        for item in data:
            dataset = EvaluationDataset()
            dataset.status = item.get('status')
            dataset.task_id = task_id
            dataset.input = item.get('input')
            dataset.output = item.get('output')
            dataset.hash = item.get('hash')
            dataset.detail = json.dumps(item.get('detail'), ensure_ascii=False)
            dataset.changed_by_fk = created_by_fk
            dataset.created_by_fk = created_by_fk

            session.add(dataset)
            session.commit()


# 注册评估任务推理完成的消费者
# Register a consumer for evaluation task inference completion
@app.task(MsgKeyTaskInferDone)
def evaluation_task_infer_done(ctx: MsgContext, message):
    data = json.loads(message)
    task_id = data.get('task_id')
    changed_by_fk = data.get('changed_by_fk')
    mode = data.get('mode')
    if task_id is None:
        log.error('tak_id 为None')
        return

    result = {}
    if mode == EnumEvaluationTaskMode.auto:
        result = {'overall_evaluation': calc_result_when_auto_mode(task_id)}

    with session_scope() as session:
        task = session.query(EvaluationTask.total_cnt).filter(EvaluationTask.id == task_id).first()
        if task is None:
            log.error(f'评测任务{task_id}已被删除')
            return

        cnt = (
            session.query(EvaluationDataset)
            .filter(
                EvaluationDataset.task_id == task_id,
                EvaluationDataset.status == EnumEvaluationDatasetStatus.un_evaluated.value,
            )
            .count()
        )
        status = (
            EnumEvaluationTaskStatus.evaluating.value
            if task[0] == cnt
            else EnumEvaluationTaskStatus.done.value
        )
        if mode == EnumEvaluationTaskMode.auto:
            status = (
                EnumEvaluationTaskStatus.resulted.value
                if status == EnumEvaluationTaskStatus.evaluating.value
                else status
            )
        if cnt == 0:
            status = EnumEvaluationTaskStatus.failed.value

        session.query(EvaluationTask).filter(
            EvaluationTask.id == task_id,
            EvaluationTask.status == EnumEvaluationTaskStatus.running.value,
        ).update(
            {
                'valid_cnt': cnt,
                'status': status,
                'changed_by_fk': changed_by_fk,
                'result': json.dumps(result, ensure_ascii=False),
            }
        )
        session.commit()


# 定义推理函数
# Define the inference function
def infer(question=None, config=None):
    model_code = config.get('model_code')
    parameters = config.get('parameters', {})

    infer_url = config.get('infer_url', '')
    # 调用MaaS API进行聊天补全
    # Call the MaaS API for chat completions
    return maas_api.chat_completions(
        question, retry_times=0, model=model_code, infer_url=infer_url, **parameters
    )