# +----------------------------------------------------------------------
# | ChatWork智能聊天办公系统
# +----------------------------------------------------------------------
# | 软件声明: 本系统并非自由软件,未经授权任何形式的商业使用均属非法。
# | 版权保护: 任何企业和个人不允许对程序代码以任何形式任何目的复制/分发。
# | 授权要求: 如有商业使用需求,请务必先与版权所有者取得联系并获得正式授权。
# +----------------------------------------------------------------------
# | Author: ChatWork Team <2474369941@qq.com>
# +----------------------------------------------------------------------
import json
import time
from events import scheduler
from common.enums.apply import MusicEnum
from common.enums.apply import VideoEnum
from common.enums.apply import DrawsEnum
from common.enums.agent import AgentEnum
from common.enums.wallet import WalletEnum
from common.enums.public import CrontabEnum
from common.cache.queue_cache import QueueCache
from common.models.sys import SysCrontabModel
from common.models.users import UserModel
from common.models.users import UserWalletModel
from common.models.music import MusicRecordsModel
from common.models.video import VideoRecordsModel
from common.models.draws import DrawsRecordsModel
from common.models.agents import AgentKnowModel
from common.models.agents import AgentKnowArchiveModel
from common.models.agents import AgentKnowSplittingModel
from common.postgres.public import PgKnowledgeModel


async def execute(**kwargs):
    """ 任务调度器 """
    start_time = time.time()
    crontab_ix: int = int(kwargs["w_ix"])
    crontab_id: int = int(kwargs["w_id"])
    crontab_job: str = str(kwargs["w_job"])
    if not scheduler.get_job(crontab_job):
        scheduler.pause_job(crontab_job)

    # 当仅支持单并发
    if crontab_ix != 1:
        return

    try:
        await _distribute_em(max(min(int(kwargs.get("em", "50")), 100), 1))
        await _distribute_qa(max(min(int(kwargs.get("qa", "20")), 100), 1))
        await _distribute_draws(max(min(int(kwargs.get("draws", "10")), 100), 1))
        await _distribute_video(max(min(int(kwargs.get("video", "10")), 100), 1))
        await _distribute_music(max(min(int(kwargs.get("music", "10")), 100), 1))

        await SysCrontabModel.compute(crontab_id, start_time)
    except Exception as e:
        await SysCrontabModel.compute(crontab_id, start_time, status=CrontabEnum.CRON_ERROR, error=str(e))


async def _distribute_em(max_process: int):
    """
    调拨知识库任务

    Args:
         max_process (int): 表示最多可同时处理多少条数据

    Author:
        zero
    """
    # 队列中元素数量
    queue_length = await QueueCache.get_queue_length(QueueCache.EM_JOB)
    if queue_length:
        return True

    # 处理超时未训练
    err_records = await (PgKnowledgeModel
                         .filter(status__in=[AgentEnum.EMB_LINE, AgentEnum.EMB_ING])
                         .filter(is_delete=0)
                         .limit(max_process)
                         .order_by("create_time")
                         .values("uuid"))

    data_lists = [str(item["uuid"]) for item in err_records]

    # 获取等待中数据
    if len(data_lists) < max_process:
        surplus_queue_length = max_process - len(data_lists)
        wait_records = await (PgKnowledgeModel
                              .filter(status=AgentEnum.EMB_WAIT)
                              .filter(is_delete=0)
                              .limit(surplus_queue_length)
                              .order_by("create_time")
                              .values("uuid"))

        wait_lists = [str(item["uuid"]) for item in wait_records]
        data_lists.extend(wait_lists)

    # 处理数据任务
    if data_lists:
        # 更新数据排队中
        await (PgKnowledgeModel
               .filter(uuid__in=data_lists)
               .filter(is_delete=0)
               .update(status=AgentEnum.EMB_LINE, update_time=time.time()))

        # 将数据推入队列
        for val in data_lists:
            await QueueCache.push_em(val)
    return None


async def _distribute_qa(max_queue_num: int):
    """ 文档QA队列加载 """
    # 队列中元素数量
    queue_length = await QueueCache.get_queue_length(QueueCache.QA_JOB)
    if queue_length >= max_queue_num:
        return True

    # 检查拆分中任务
    archives = await AgentKnowArchiveModel.filter(qa_status=AgentEnum.EMB_ING).limit(5).values("id")
    for archive in archives:
        await _check_qa_complete(archive["id"])

    # 处理超时未训练
    err_records = await (AgentKnowSplittingModel
                         .filter(status__in=[AgentEnum.EMB_LINE, AgentEnum.EMB_ING])
                         .filter(update_time__lt=int(time.time()) - (60 * 15))
                         .limit(max_queue_num)
                         .order_by("id")
                         .values("id"))

    data_lists = [int(item["id"]) for item in err_records]

    # 获取等待中数据
    if len(data_lists) < max_queue_num:
        surplus_queue_length = max_queue_num - len(data_lists)
        wait_records = await (AgentKnowSplittingModel
                              .filter(status=AgentEnum.EMB_WAIT)
                              .limit(surplus_queue_length)
                              .order_by("id")
                              .values("id"))

        wait_lists = [int(item["id"]) for item in wait_records]
        data_lists.extend(wait_lists)

    # 推入数据队列中
    if data_lists:
        # 更新数据排队中
        await (AgentKnowSplittingModel
               .filter(id__in=data_lists)
               .update(status=AgentEnum.EMB_LINE, update_time=time.time()))

        # 将数据推入队列
        for val in data_lists:
            await QueueCache.push_qa(str(val))
    return None


async def _distribute_draws(max_process: int):
    """ 绘画队列加载 """
    # 队列中元素数量
    queue_length = await QueueCache.get_queue_length(QueueCache.DRAW_JOB)
    if queue_length:
        return True

    # 查询超时未完成 (10分钟未处理完)
    expire_time: int = int(10 * 60)
    current_time: int = int(time.time())
    err_records = await (DrawsRecordsModel
                         .filter(is_delete=0)
                         .filter(status=DrawsEnum.STATUS_ING)
                         .filter(start_time__lte=(current_time - expire_time))
                         .filter(engine__in=["mj", "doubao", "dalle"])
                         .limit(max_process)
                         .order_by("id")
                         .values("id", "user_id", "points", "create_time"))

    # 处理超时未完成
    for record in err_records:
        # 已超时
        await DrawsRecordsModel.filter(id=record["id"]).update(
            fail_reason="任务响应超时",
            status=DrawsEnum.STATUS_FAIL,
            update_time=int(time.time())
        )
        # 回退积分
        if record["points"] > 0:
            await UserWalletModel.inc(
                scene=WalletEnum.POINTS,
                user_id=record["user_id"],
                change_type=WalletEnum.POINTS_INC_DRAWS_FAIL,
                change_amount=record["points"],
                project="AI绘画"
            )

    # 获取等待中数据
    wait_lists = []
    if len(err_records) < max_process:
        surplus_queue_length = max_process - len(err_records)
        wait_records = await (DrawsRecordsModel
                              .filter(is_delete=0)
                              .filter(engine__in=["doubao", "dalle"])
                              .filter(status=DrawsEnum.STATUS_WAIT)
                              .limit(surplus_queue_length)
                              .order_by("id")
                              .values("id"))

        wait_lists = [item["id"] for item in wait_records]

    # 处理数据任务
    if wait_lists:
        # 更新数据排队中
        await (DrawsRecordsModel
               .filter(is_delete=0)
               .filter(id__in=wait_lists)
               .update(
                    status=DrawsEnum.STATUS_ING,
                    start_time=int(time.time()),
                    update_time=int(time.time())
               ))

        # 将数据推入队列
        for val in wait_lists:
            await QueueCache.push_draw(val)
    return None


async def _distribute_video(max_process: int):
    """ 视频队列加载 """
    # 队列中元素数量
    queue_length = await QueueCache.get_queue_length(QueueCache.VIDEO_JOB)
    if queue_length:
        return True

    # 查询超时未完成 (10分钟未处理完)
    expire_time: int = int(12 * 60)
    current_time: int = int(time.time())
    err_records = await (VideoRecordsModel
                         .filter(is_delete=0)
                         .filter(status=VideoEnum.STATUS_ING)
                         .filter(start_time__lte=(current_time - expire_time))
                         .limit(max_process)
                         .order_by("id")
                         .values("id", "user_id", "use_points", "create_time"))

    # 处理超时未完成
    for record in err_records:
        # 已超时
        await VideoRecordsModel.filter(id=record["id"]).update(
            fail_reason="任务响应超时",
            status=VideoEnum.STATUS_FAIL,
            update_time=int(time.time())
        )
        # 回退积分
        if record["use_points"] > 0:
            await UserWalletModel.inc(
                scene=WalletEnum.POINTS,
                user_id=record["user_id"],
                change_type=WalletEnum.POINTS_INC_VIDEO_FAIL,
                change_amount=record["points"],
                project="AI视频"
            )

    # 获取等待中数据
    wait_lists = []
    if len(err_records) < max_process:
        surplus_queue_length = max_process - len(err_records)
        wait_records = await (VideoRecordsModel
                              .filter(is_delete=0)
                              .filter(status=VideoEnum.STATUS_WAIT)
                              .limit(surplus_queue_length)
                              .order_by("id")
                              .values("id"))

        wait_lists = [item["id"] for item in wait_records]

    # 处理数据任务
    if wait_lists:
        # 更新数据排队中
        await (VideoRecordsModel
               .filter(is_delete=0)
               .filter(id__in=wait_lists)
               .update(
                    status=VideoEnum.STATUS_ING,
                    start_time=int(time.time()),
                    update_time=int(time.time())
               ))

        # 将数据推入队列
        for val in wait_lists:
            await QueueCache.push_video(val)
    return None


async def _distribute_music(max_process: int):
    """ 音乐队列加载 """
    # 队列中元素数量
    queue_length = await QueueCache.get_queue_length(QueueCache.MUSIC_JOB)
    if queue_length:
        return True

    # 查询超时未完成 (10分钟未处理完)
    expire_time: int = int(12 * 60)
    current_time: int = int(time.time())
    err_records = await (MusicRecordsModel
                         .filter(status=MusicEnum.STATUS_ING)
                         .filter(start_time__lte=(current_time - expire_time))
                         .filter(is_delete=0)
                         .limit(max_process)
                         .order_by("id")
                         .values("id", "user_id", "use_points", "create_time"))

    # 处理超时未完成
    for record in err_records:
        # 已超时
        await MusicRecordsModel.filter(id=record["id"]).update(
            fail_reason="任务响应超时",
            status=MusicEnum.STATUS_FAIL,
            update_time=int(time.time())
        )
        # 回退积分
        if record["use_points"] > 0:
            await UserWalletModel.inc(
                scene=WalletEnum.POINTS,
                user_id=record["user_id"],
                change_type=WalletEnum.POINTS_INC_MUSIC_FAIL,
                change_amount=record["points"],
                project="AI音乐"
            )

    # 获取等待中数据
    wait_lists = []
    if len(err_records) < max_process:
        surplus_queue_length = max_process - len(err_records)
        wait_records = await (MusicRecordsModel
                              .filter(is_delete=0)
                              .filter(status=MusicEnum.STATUS_WAIT)
                              .limit(surplus_queue_length)
                              .order_by("id")
                              .values(str("id")))

        wait_lists = [item["id"] for item in wait_records]

    # 处理数据任务
    if wait_lists:
        # 更新数据排队中
        await (MusicRecordsModel
               .filter(is_delete=0)
               .filter(id__in=wait_lists)
               .update(
                    status=MusicEnum.STATUS_ING,
                    start_time=int(time.time()),
                    update_time=int(time.time())
               ))

        # 将数据推入队列
        for val in wait_lists:
            await QueueCache.push_music(val)
    return None


async def _check_qa_complete(fid: int):
    # 还没完成的任务
    surplus = await (AgentKnowSplittingModel
                     .filter(archive_id=fid)
                     .filter(status__in=[
                            AgentEnum.EMB_WAIT,
                            AgentEnum.EMB_LINE,
                            AgentEnum.EMB_ING
                     ]).count())

    # 拆分任务未完成
    if surplus > 0:
        return None

    # 拆分数据
    qa_lists = []
    usage_points: int = 0
    usage_tokens: dict = {
        "name": "QA拆分",
        "scene": "qa",
        "model": "",
        "alias": "",
        "task_time": "",
        "use_points": 0,
        "total_tokens": 0,
        "prompt_tokens": 0,
        "completion_tokens": 0
    }

    # 提取数据
    queue = []
    lists = await AgentKnowSplittingModel.filter(archive_id=fid, status=AgentEnum.EMB_YES).all()
    for item in lists:
        usage_tokens["model"] = item.model_name
        usage_tokens["alias"] = item.model_alias
        usage_tokens["task_time"] = str(item.task_time)
        data = json.loads(item.results)
        if data:
            usages = json.loads(item.usage_tokens)
            usage_points += item.usage_points
            usage_tokens["use_points"] += item.usage_points
            usage_tokens["total_tokens"] += usages["total_tokens"]
            usage_tokens["prompt_tokens"] += usages["prompt_tokens"]
            usage_tokens["completion_tokens"] += usages["completion_tokens"]
            for q, a in data.items():
                if q not in queue:
                    qa_lists.append({
                        "Q": q,
                        "A": a,
                        "file_name": item.file_name,
                        "file_size": item.file_size,
                        "file_path": item.file_path
                    })
                    queue.append(q)

    # 标记失败
    if not qa_lists:
        return await AgentKnowArchiveModel.filter(id=fid).update(
            fail_reason="无法成功拆分数据",
            qa_status=AgentEnum.EMB_FAIL,
            update_time=int(time.time()),
        )

    # 标记成功
    await AgentKnowArchiveModel.filter(id=fid).update(
        fail_reason="",
        qa_status=AgentEnum.EMB_YES,
        update_time=int(time.time()),
    )

    # 用户扣费
    user = await UserModel.filter(id=lists[0].import_uid).first()
    user.points = max(user.points - usage_points, 0)
    await user.save()

    # 查知识库
    know = await AgentKnowModel.filter(id=lists[0].know_id).first().values("id", "code", "name")

    # 账户日志
    await UserWalletModel.dec(
        scene=WalletEnum.POINTS,
        user_id=lists[0].import_uid,
        change_type=WalletEnum.POINTS_DEC_AGENT_QA,
        change_amount=usage_points,
        project=know.get("name"),
        source_sn=know.get("code"),
        source_id=know.get("id"),
        additional=json.dumps([usage_tokens], ensure_ascii=False),
        remarks="知识拆分消耗积分")

    # 推入向量化
    index = 0
    for qa in qa_lists:
        await PgKnowledgeModel.create(
            know_id=lists[0].know_id,
            archive_id=lists[0].archive_id,
            import_uid=lists[0].import_uid,
            question=qa.get("Q", ""),
            answer=qa.get("A", ""),
            chunk_index=index,
            create_time=int(time.time()),
            update_time=int(time.time()),
            metadata=json.dumps({
                "file_name": qa.get("file_name", "-"),
                "file_size": qa.get("file_size", 0),
                "file_path": qa.get("file_path", "")
            }, ensure_ascii=False)
        )
        index += 1

    return None
