# +----------------------------------------------------------------------
# | ChatWork智能聊天办公系统
# +----------------------------------------------------------------------
# | 软件声明: 本系统并非自由软件,未经授权任何形式的商业使用均属非法。
# | 版权保护: 任何企业和个人不允许对程序代码以任何形式任何目的复制/分发。
# | 授权要求: 如有商业使用需求,请务必先与版权所有者取得联系并获得正式授权。
# +----------------------------------------------------------------------
# | Author: ChatWork Team <2474369941@qq.com>
# +----------------------------------------------------------------------
import math
import time
from datetime import datetime
from events import scheduler
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from apscheduler.triggers.date import DateTrigger
from tortoise.expressions import Q
from common.enums.apply import DocsEnum
from common.enums.models import ModelsEnum
from common.enums.public import CrontabEnum
from common.models.dev import DevModelsModel
from common.models.sys import SysCrontabModel
from common.models.docs import DocsFilesModel
from common.postgres.public import PgDocumentsModel
from common.chain.vector_server import VectorService

sche = AsyncIOScheduler()


async def execute(**kwargs):
    """ 畅阅训练任务 """
    start_time = time.time()
    crontab_ix: int = int(kwargs["w_ix"])
    crontab_id: int = int(kwargs["w_id"])
    crontab_job: str = str(kwargs["w_job"])
    max_process: int = int(kwargs.get("process") or "10")
    max_process = max(min(max_process, 50), 1)
    if not scheduler.get_job(crontab_job):
        scheduler.pause_job(crontab_job)

    # 当仅支持单并发
    if crontab_ix != 1:
        return

    try:
        await dispatch(int(max_process))
        await SysCrontabModel.compute(crontab_id, start_time)
    except Exception as e:
        print(str(e))
        await SysCrontabModel.compute(crontab_id, start_time, status=CrontabEnum.CRON_ERROR, error=str(e))


async def dispatch(max_process: int = 5):
    """
    调拨分配文档训练任务

    Args:
        max_process (int): 表示最多可同时处理多少个文件

    Author:
        zero
    """
    # 计算可调拨数
    running_count: int = await DocsFilesModel.filter(status=DocsEnum.FILE_ING, is_delete=0).count()
    surplus_count: int = max_process - running_count

    # 推入后台任务
    if surplus_count:
        # 2分钟更新时间没变化可能进程挂了 (重新处理)
        t = int(time.time()) - 120
        files = await (DocsFilesModel
                       .filter(is_delete=0)
                       .filter(status=DocsEnum.FILE_ING)
                       .filter(update_time__lte=t)
                       .limit(surplus_count)
                       .order_by("update_time", "-status")
                       .all())

        if not files:
            files = await (DocsFilesModel
                           .filter(is_delete=0)
                           .filter(status=DocsEnum.FILE_WAIT)
                           .limit(surplus_count)
                           .order_by("id")
                           .all())

        if files:
            wait_lists = [item.id for item in files]
            await (DocsFilesModel
                   .filter(is_delete=0)
                   .filter(id__in=wait_lists)
                   .update(
                        status=DocsEnum.FILE_ING,
                        update_time=time.time()
                   ))

            for item in files:
                print("文档任务: " + str(item.id))
                params: dict = {"file_id": item.id}
                sche.add_job(handle_task, DateTrigger(run_date=datetime.now()), kwargs=params)
            sche.start()


async def handle_task(**kwargs):
    """
    这是一个后台任务 (不会阻塞请求进程)
    该方法主要是拿到要处理的文件对里面的数据向量化
    """
    # 接收参数
    file_id: int = int(kwargs.get("file_id") or "0")

    try:
        # 查询文件
        file = await DocsFilesModel.filter(id=file_id).first()
        if file is None:
            return True

        # 验证文件
        if file.is_delete:
            return await PgDocumentsModel.filter(file_id=file_id).update(
                error="文档已被删除",
                status=DocsEnum.FILE_FAIL
            )

        # 向量模型
        try:
            await DevModelsModel.check_models(models=file.vector_models, scene=ModelsEnum.TYPE_VECTOR)
        except Exception as e:
            return await DocsFilesModel.filter(id=file_id).update(
                error=str(e),
                status=DocsEnum.TRAIN_FAIL
            )

        # 循环向量化处理
        total_count: int = await PgDocumentsModel.filter(file_id=file.id, status=0).count()
        loops_count: int = math.ceil(total_count / 10) + 5
        for _ in range(loops_count):
            status = await _train_chunks(file.id, file.vector_models)
            if status or await _verify_status(file.id):
                break
            else:
                await DocsFilesModel.filter(id=file_id).update(update_time=time.time())
    except Exception as e:
        await _update_train_fail(file_id=file_id, uuids=[], error=str(e))


async def _train_chunks(file_id: int, vector_models: str) -> bool | None:
    """
    使用向量模型训练

    Args:
         file_id (int): 文件ID
         vector_models (str): 向量模型,如: zhipu:text-embedding-3

    Returns:
        bool: True=解析结束(出错了),None=继续

    Author:
        zero
    """
    uuids = []
    texts = []
    order = ["-create_time", "chunk_index"]
    where = [Q(file_id=file_id), Q(status__in=[DocsEnum.TRAIN_WAIT, DocsEnum.TRAIN_ING])]
    lists = await (PgDocumentsModel.filter(*where).order_by(*order).limit(10).all())
    for item in lists:
        uuids.append(item.uuid)
        texts.append(item.chunk_texts)

    # 调整状态
    await PgDocumentsModel.filter(uuid__in=uuids).update(status=DocsEnum.TRAIN_ING)

    # 发起请求
    try:
        vectorService = VectorService()
        embedding_arr = await vectorService.to_embed(vector_models, texts)
    except Exception as e:
        error: str = e.args[1]
        return await _update_train_fail(file_id=file_id, uuids=uuids, error=error)

    # 更新结果
    if embedding_arr:
        for index, item in enumerate(lists):
            emb_arr = embedding_arr[index]
            emb_str = "[" + ",".join(str(s) for s in emb_arr) + "]"
            item.status = DocsEnum.TRAIN_YES
            item.embedding = emb_str
            item.dimension = len(emb_arr)
            item.vector_model = vector_models
            item.update_time = int(time.time())
            await item.save()
        return None
    else:
        return await _update_train_fail(file_id=file_id, uuids=uuids, error="Emb Loop parsing failed")


async def _verify_status(file_id: int) -> bool:
    """
    验证是否完成

    Args:
         file_id (int): 文件ID

    Returns：
        bool: True=已完成, False=未完成

    Author:
        zero
    """
    surplus: int = await PgDocumentsModel.filter(
        file_id=file_id,
        status__in=[DocsEnum.TRAIN_WAIT, DocsEnum.TRAIN_ING]
    ).count()
    if surplus <= 0:
        await DocsFilesModel.filter(id=file_id).update(
            status=DocsEnum.FILE_YES,
            update_time=int(time.time())
        )
        return True
    return False


async def _update_train_fail(file_id: int, uuids: list, error: str) -> bool:
    """
    标记训练失败

    Args:
         file_id (int): 文件ID
         uuids (List[str]): 向量库数据ID
         error (str): 失败原因

    Returns：
        bool: True=表示处理完成,不要让程序再往下执行了。

    Author:
        zero
    """
    await DocsFilesModel.filter(id=file_id).update(
        error=error,
        status=DocsEnum.FILE_FAIL,
        update_time=int(time.time())
    )
    if uuids:
        await PgDocumentsModel.filter(uuid__in=uuids).update(
            error=error,
            status=DocsEnum.TRAIN_WAIT,
            update_time=int(time.time())
        )
    return True
