# +----------------------------------------------------------------------
# | ChatWork智能聊天办公系统
# +----------------------------------------------------------------------
# | 软件声明: 本系统并非自由软件,未经授权任何形式的商业使用均属非法。
# | 版权保护: 任何企业和个人不允许对程序代码以任何形式任何目的复制/分发。
# | 授权要求: 如有商业使用需求,请务必先与版权所有者取得联系并获得正式授权。
# +----------------------------------------------------------------------
# | Author: ChatWork Team <2474369941@qq.com>
# +----------------------------------------------------------------------
import re
import time
import json
import logging
from typing import Dict
from tortoise.transactions import in_transaction
from exception import AppException
from common.core.cron import CronBase
from common.enums.agent import AgentEnum
from common.enums.wallet import WalletEnum
from common.enums.models import ModelsEnum
from common.cache.queue_cache import QueueCache
from common.cache.pool_cache import KeyPoolCache
from common.models.users import UserModel, UserWalletModel
from common.models.dev import DevModelsModel
from common.models.agents import AgentKnowModel
from common.models.agents import AgentKnowArchiveModel
from common.models.agents import AgentKnowSplittingModel
from common.postgres.public import PgKnowledgeModel
from common.chain.chain_server import ChatUtils, AIChannelFactory, FlowsSchema

logger = logging.getLogger(__name__)


class Command(CronBase):
    @classmethod
    async def run(cls, **kwargs):
        start_time = time.time()
        try:
            # 读取任务
            archive_id = await QueueCache.queue_pop(QueueCache.QA_JOB)
            if not archive_id:
                return None

            # 查询文件
            archive = await AgentKnowArchiveModel.filter(id=int(archive_id)).first()
            if not archive:
                return None
            if archive.is_delete:
                return await cls.handle_abnormal(aid=int(archive_id), error="文档已被删除")

            # 查知识库
            aid = int(archive.id)
            kid = int(archive.know_id)
            know = await AgentKnowModel.filter(id=archive.know_id, is_delete=int(0)).first()
            if not know:
                return await cls.handle_abnormal(aid=int(aid), error="知识库已被删除")
            if know.is_disable:
                return await cls.handle_abnormal(aid=int(aid), error="知识库已被禁用")

            # 查询模型
            try:
                models = await DevModelsModel.check_models(know.splits_model, ModelsEnum.TYPE_CHAT)
            except Exception as e:
                return await cls.handle_abnormal(aid=int(aid), error=str(e))

            # 查询用户
            user = await UserModel.filter(id=know.user_id, is_delete=int(0)).first()
            if not user:
                return await cls.handle_abnormal(aid=aid, error="账号已注销")
            if user.is_disable:
                return await cls.handle_abnormal(aid=aid, error="账号已冻结")
            if user.points <= 0 < int(models["price"]):
                return await cls.handle_abnormal(aid=aid, error="账号余额不足")

            # 标记处理
            archive.qa_progress = 0.00
            archive.qa_status = AgentEnum.EMB_ING
            archive.update_time = int(time.time())
            await archive.save()

            # 获取分片
            where = [AgentEnum.EMB_WAIT, AgentEnum.EMB_LINE, AgentEnum.EMB_ING]
            m = AgentKnowSplittingModel.filter(know_id=kid, archive_id=aid).filter(status__in=where)

            # 循环处理
            processed: int = 0
            count: int = await m.count()
            nums = int(count / 10) + 5
            for _ in range(nums):
                data = await m.order_by("id").first()
                if not data:
                    break

                try:
                    check_key = models["config"].get("check_key")
                    apikey = await cls.get_apikey(models["channel"], check_key)
                except Exception as e:
                    await cls.handle_abnormal(aid=aid, error=str(e))
                    raise AppException(str(e))

                processed += 1
                await cls.send_qa(apikey, models, data)
                await cls.update_progress(processed, count, archive)

            # 完成拆分
            await cls.save_qa(code=archive.code, aid=aid, models=models, start_time=start_time)
        except Exception as e:
            logger.error("Error queue_splitting: " + str(e))

    @classmethod
    async def update_progress(cls, processed: int, count: int, archive: AgentKnowArchiveModel):
        try:
            progress = (processed / count) * 100
            progress = float(f"{progress:.2f}")
        except ZeroDivisionError:
            progress = 100.00
        archive.qa_progress = progress
        await archive.save()

    @classmethod
    async def send_qa(cls, apikey: dict, models: dict, data: AgentKnowSplittingModel):
        # 开始时间
        start_time = time.time()

        # 发起请求
        answer = ""
        messages = cls.get_messages(data.content)
        aiServer = AIChannelFactory.create(models["channel"], models["config"], apikey)
        stream = await aiServer.llm(messages)
        async for chunk in stream:
            answer += chunk.choices[0].delta.content

        # 计算消耗
        usages: dict = ChatUtils.usage(messages, answer)

        # 正则提取
        qaDict: dict = cls.extract_qa_pairs(answer)

        # 保存结果
        data.usage_tokens = json.dumps(usages, ensure_ascii=False)
        data.results = json.dumps(qaDict, ensure_ascii=False)
        data.status = AgentEnum.EMB_YES
        data.task_time = time.time() - start_time
        data.update_time = int(time.time())
        await data.save()
        return None

    @classmethod
    async def save_qa(cls, code: str, aid: int, models: dict, start_time: float):
        # 拆分数据
        usages = FlowsSchema(
            name="QA拆分",
            scene="qa",
            alias=models["alias"],
            model=models["model"],
            task_time=f"{(time.time() - start_time):.2f}",
            use_points=0,
            total_tokens=0,
            prompt_tokens=0,
            completion_tokens=0
        )

        # 提取数据
        lists = await AgentKnowSplittingModel.filter(archive_id=aid, status=AgentEnum.EMB_YES).all()
        for item in lists:
            data = json.loads(item.results or "{}")
            if not data:
                continue

            index = 0
            us = json.loads(item.usage_tokens)
            usages.total_tokens = us["total_tokens"]
            usages.prompt_tokens = us["prompt_tokens"]
            usages.completion_tokens = us["completion_tokens"]

            qa_lists = []
            for q, a in data.items():
                index += 1
                qa_lists.append(PgKnowledgeModel(
                    know_id=lists[0].know_id,
                    user_id=lists[0].import_uid,
                    last_uid=lists[0].import_uid,
                    archive_id=lists[0].archive_id,
                    question=q,
                    answer=a,
                    chunk_index=index,
                    create_time=int(time.time()),
                    update_time=int(time.time()),
                    metadata=json.dumps({
                        "file_name": item.file_name,
                        "file_size": item.file_size,
                        "file_path": item.file_path
                    }, ensure_ascii=False)
                ))

            if qa_lists:
                await PgKnowledgeModel.bulk_create(qa_lists)

        # 消耗积分
        usage_points: int = ChatUtils.compute_price(usages.total_tokens, models["price"])
        usages.use_points = usage_points

        async with in_transaction("mysql"):
            # 标记成功
            await AgentKnowArchiveModel.filter(id=aid).update(
                fail_reason="",
                qa_progress=100.00,
                qa_status=AgentEnum.EMB_YES,
                status=AgentEnum.EMB_WAIT,
                update_time=int(time.time())
            )
            # 清空数据
            await AgentKnowSplittingModel.filter(archive_id=aid).delete()
            # 用户扣费
            if usage_points > 0 and lists:
                user = await UserModel.filter(id=lists[0].import_uid).first()
                user.points = max(user.points - usage_points, 0)
                await user.save()
            # 账户日志
            if lists:
                await UserWalletModel.dec(
                    scene=WalletEnum.POINTS,
                    user_id=lists[0].import_uid,
                    change_type=WalletEnum.POINTS_DEC_KB_QA,
                    change_amount=usage_points,
                    project=lists[0].file_name,
                    source_sn=code,
                    source_id=aid,
                    additional=json.dumps([usages.model_dump()], ensure_ascii=False),
                    remarks="QA拆分消耗积分")

    @classmethod
    async def get_apikey(cls, channel: str, check_key: bool = True):
        """ 获取密钥 """
        apikey = {}
        if check_key:
            apikey = await KeyPoolCache(scene="chat", channel=channel).get_key() or {}
            if not apikey:
                raise AppException("当前模型尚未配置密钥,请与管理员联系")
        return apikey

    @classmethod
    async def handle_abnormal(cls, aid: int, error: str):
        logger.error(f"Error queue_splitting({str(aid)}): " + error)
        if error == "文档已被删除":
            await AgentKnowArchiveModel.filter(id=aid).update(fail_reason=error)
        elif error in ["知识库已被删除", "账号已注销"]:
            await AgentKnowArchiveModel.filter(id=aid, is_delete=0).update(
                status=AgentEnum.EMB_FAIL,
                fail_reason=error,
                is_delete=1,
                delete_time=int(time.time())
            )
        else:
            await AgentKnowArchiveModel.filter(id=aid, is_delete=0).update(
                status=AgentEnum.EMB_FAIL,
                fail_reason=error,
                update_time=int(time.time())
            )

    @classmethod
    def get_messages(cls, content: str):
        prompts = AgentEnum.get_prompt_tpl("qa")
        user = prompts["user"].replace("{{text}}", content)
        return [
            {"role": "system", "content": prompts["system"]},
            {"role": "user", "content": user}
        ]

    @classmethod
    def extract_qa_pairs(cls, text) -> Dict[str, str]:
        """
        从给定的文本中提取问题和答案对。

        Args:
            text (str): 包含问题和答案的文本。

        Returns:
            dict: 一个字典，其中键是问题内容，值是对应的答案内容。
        """
        pattern = r'Q\d+:\s*(.*?)\nA\d+:\s*(.*?)(?=\nQ\d+:|$)'
        matches = re.findall(pattern, text, re.DOTALL)
        return {question.strip(): answer.strip() for question, answer in matches}
