# +----------------------------------------------------------------------
# | ChatWork
# +----------------------------------------------------------------------
# | 这不是一个自由软件,您只能在不用于商业目的的前提下对程序代码进行修改和使用。
# | 任何企业和个人不允许对程序代码以任何形式任何目的再发布,商业使用请获取授权。
# +----------------------------------------------------------------------
# | Author: ChatWork Team <2474369941@qq.com>
# +----------------------------------------------------------------------
import json
import time
import aiofiles.os
from typing import List, Dict
from tortoise.queryset import Q
from tortoise.functions import Count
from tortoise.transactions import in_transaction
from config import get_settings
from hypertext import PagingResult
from exception import AppException
from common.enums.agent import AgentEnum
from common.utils.urls import UrlUtil
from common.utils.times import TimeUtil
from common.utils.tools import ToolsUtil
from common.utils.files import FilesUtil
from common.models.agents import AgentKnowModel
from common.models.agents import AgentKnowArchiveModel
from common.models.agents import AgentKnowSplittingModel
from common.postgres.public import PgKnowledgeModel
from common.service.clean_logic import CleanLogic
from apps.api.schemas.agent.know import archive_schema as schema
from apps.api.service.agent.know.teamed_service import KnowTeamedService
from langchain_text_splitters import RecursiveCharacterTextSplitter
from llama_index.readers.web import BeautifulSoupWebReader


class KnowArchiveService:
    """ 知识库文档 """

    @classmethod
    async def whole(cls, user_id: int, params: schema.KnowArchiveWholeIn):
        # 验知识库
        know = await AgentKnowModel.filter(code=params.kb).first()
        if not know:
            raise AppException("知识库不存在了")
        if know.is_delete:
            raise AppException("知识库已被删除")
        if know.is_disable:
            raise AppException("知识库已被禁用")

        # 验证权限
        if user_id != int(know.user_id):
            await KnowTeamedService.has_permission(know.code, user_id, AgentEnum.POWER_VIEW)

        # 查询文档
        lists = await (AgentKnowArchiveModel
                       .filter(know_id=know.id)
                       .all()
                       .values("code", "name"))

        # 处理文档
        _lists = []
        for item in lists:
            _lists.append(schema.KnowArchiveWholeVo(
                code=item["code"],
                name=item["name"]
            ))
        return _lists

    @classmethod
    async def lists(cls, user_id: int, params: schema.KnowArchiveSearchIn) -> PagingResult[schema.KnowArchiveListVo]:
        """
        文档列表。

        Args:
            user_id (int): 用户ID
            params (schema.KnowArchiveSearchIn): 文档查询参数。

        Returns:
            PagingResult[schema.KnowArchiveListVo]: 文档分页列表Vo。

        Author:
            zero
        """
        # 验知识库
        know = await AgentKnowModel.filter(code=params.kb).first()
        if not know:
            raise AppException("知识库不存在了")
        if know.is_delete:
            raise AppException("知识库已被删除")
        if know.is_disable:
            raise AppException("知识库已被禁用")

        # 验证权限
        if know.user_id != user_id:
            await KnowTeamedService.has_permission(know.code, user_id, AgentEnum.POWER_VIEW)

        # 查询条件
        where = [Q(know_id=know.id), Q(is_delete=0)]
        if params.keyword:
            where.append(Q(name__icontains=params.keyword))

        # 查询数据
        _model = AgentKnowArchiveModel.filter(*where).order_by("-update_time")
        _pager = await AgentKnowArchiveModel.paginate(
            model=_model,
            page_no=params.page_no,
            page_size=params.page_size
        )

        ids = [item["id"] for item in _pager.lists if item["id"]]

        # 统计文档数据
        total_data = {}
        waits_data = {}
        if ids:
            # 全部的数据量
            total_query = await (PgKnowledgeModel
                                 .filter(archive_id__in=ids)
                                 .filter(is_delete=0)
                                 .annotate(count=Count("archive_id"))
                                 .group_by("archive_id")
                                 .values_list("archive_id", "count"))

            # 待学习数据量
            waits_query = await (PgKnowledgeModel
                                 .filter(archive_id__in=ids)
                                 .filter(status__in=[AgentEnum.EMB_WAIT, AgentEnum.EMB_ING])
                                 .filter(is_delete=0)
                                 .annotate(count=Count("archive_id"))
                                 .group_by("archive_id")
                                 .values_list("archive_id", "count"))

            total_data = {k: v for k, v in total_query}
            waits_data = {k: v for k, v in waits_query}

        # 处理数据
        _lists = []
        for item in _pager.lists:
            _lists.append(schema.KnowArchiveListVo(
                code=item["code"],
                name=item["name"],
                scene=item["scene"],
                qa_status=item["qa_status"],
                train_mode=item["train_mode"],
                train_total=total_data.get(item["id"], 0),
                train_waits=waits_data.get(item["id"], 0),
                is_disable=item["is_disable"],
                fail_reason=item["fail_reason"] or "",
                create_time=item["create_time"],
                update_time=item["update_time"]
            ))

        _pager.lists = _lists
        return _pager

    @classmethod
    async def inspect(cls, user_id: int, ids: List[str]):
        """
        检测文档状态
        """
        lists = await AgentKnowArchiveModel.filter(user_id=user_id, code__in=ids).all()
        ids = [item.id for item in lists]

        # 统计文档数据
        total_data = {}
        waits_data = {}
        if ids:
            # 全部的数据量
            total_query = await (PgKnowledgeModel
                                 .filter(archive_id__in=ids)
                                 .filter(is_delete=0)
                                 .annotate(count=Count("archive_id"))
                                 .group_by("archive_id")
                                 .values_list("archive_id", "count"))

            # 待学习数据量
            waits_query = await (PgKnowledgeModel
                                 .filter(is_delete=0)
                                 .filter(archive_id__in=ids)
                                 .filter(status__in=[AgentEnum.EMB_WAIT, AgentEnum.EMB_ING])
                                 .annotate(count=Count("archive_id"))
                                 .group_by("archive_id")
                                 .values_list("archive_id", "count"))

            total_data = {k: v for k, v in total_query}
            waits_data = {k: v for k, v in waits_query}

        # 处理数据
        _lists = []
        for item in lists:
            _lists.append(schema.KnowArchiveListVo(
                code=item.code,
                name=item.name,
                scene=item.scene,
                qa_status=item.qa_status,
                train_mode=item.train_mode,
                train_total=total_data.get(item.id, 0),
                train_waits=waits_data.get(item.id, 0),
                is_disable=item.is_disable,
                fail_reason=item.fail_reason or "",
                create_time=TimeUtil.timestamp_to_date(item.create_time),
                update_time=TimeUtil.timestamp_to_date(item.update_time)
            ))

        return _lists

    @classmethod
    async def detail(cls, user_id: int, fid: str) -> schema.KnowArchiveDetailVo:
        """
        文档详情。

        Args:
            user_id (int): 用户ID
            fid (str): 文档编号。

        Returns:
            schema.KnowArchiveDetailVo: 文档详情Vo。

        Author:
            zero
        """
        # 查询数据
        detail = await AgentKnowArchiveModel.filter(code=fid).get()

        # 验知识库
        know = await AgentKnowModel.filter(id=detail.know_id).first()
        if not know:
            raise AppException(str("知识库不存在了"))
        if know.is_delete:
            raise AppException("知识库已被删除")
        if know.is_disable:
            raise AppException("知识库已被禁用")

        # 验证权限
        if know.user_id != user_id:
            await KnowTeamedService.has_permission(know.code, user_id, AgentEnum.POWER_VIEW)

        # 返回数据
        return schema.KnowArchiveDetailVo(
            code=detail.code,
            name=detail.name,
            size=detail.size,
            train_mode=detail.train_mode,
            is_disable=detail.is_disable,
            create_time=TimeUtil.timestamp_to_date(detail.create_time),
            update_time=TimeUtil.timestamp_to_date(detail.update_time)
        )

    @classmethod
    async def rename(cls, user_id: int, post: schema.KnowArchiveRenameIn):
        """
        文档命名。

        Args:
            user_id (int): 用户ID
            post (schema.KnowArchiveRenameIn): 文档重命名参数。

        Author:
            zero
        """
        # 验证文档
        archive = await AgentKnowArchiveModel.filter(code=post.fid).first()
        if not archive:
            raise AppException("文档不存在哦")
        if archive.is_delete:
            raise AppException("文档已被删除")

        # 验知识库
        know = await AgentKnowModel.filter(id=archive.know_id).first()
        if not know:
            raise AppException("知识库不存在了")
        if know.is_disable:
            raise AppException("知识库已被删除")
        if know.is_disable:
            raise AppException("知识库已被禁用")

        # 验证权限
        if user_id != know.user_id:
            await KnowTeamedService.has_permission(know.code, int(user_id), AgentEnum.POWER_EDIT)

        # 更新数据
        await AgentKnowArchiveModel.filter(id=archive.id).update(
            name=post.name,
            update_time=int(time.time())
        )

    @classmethod
    async def merge(cls, user_id: int, post: schema.KnowArchiveMergeIn):
        """
        文档合并。

        Args:
            user_id (int): 用户ID
            post (schema.KnowArchiveMergeIn): 文档合并参数。

        Author:
            zero
        """
        source_fid = post.source_fid  # 当前文件编号
        target_fid = post.target_fid  # 目标文件编号

        source = await AgentKnowArchiveModel.filter(code=source_fid).first()
        if not source:
            raise AppException("当前文件丢失,请刷新列表")

        target = await AgentKnowArchiveModel.filter(code=target_fid).first()
        if not target:
            raise AppException("目标文件丢失,请刷新列表")

        # 不允许跨库
        if source.know_id != target.know_id:
            raise AppException("不允许跨知识库合并")

        # 验知识库
        know = await AgentKnowModel.filter(id=source.know_id).first()
        if not know:
            raise AppException("知识库不存在了")
        if know.is_delete:
            raise AppException("知识库已被删除")
        if know.is_disable:
            raise AppException("知识库已被禁用")

        # 验证权限
        if user_id != know.user_id:
            await KnowTeamedService.has_permission(know.code, user_id, AgentEnum.POWER_EDIT)

        # 变更数据关联文件
        await PgKnowledgeModel.filter(archive_id=source.id).update(
            archive_id=target.id,
            update_time=int(time.time())
        )

        # 删除当前的文件
        await AgentKnowArchiveModel.filter(id=source.id).update(
            is_delete=1,
            delete_time=int(time.time())
        )

    @classmethod
    async def delete(cls, user_id: int, fid: str):
        """
        文档命名。

        Args:
            user_id (int): 用户ID
            fid (str): 文档编号。

        Author:
            zero
        """
        # 验证文档
        archive = await AgentKnowArchiveModel.filter(code=fid).first()
        if not archive:
            raise AppException("文档不存在哦")
        if archive.is_delete:
            raise AppException("文档已被删除")

        # 验知识库
        know = await AgentKnowModel.filter(id=int(archive.know_id)).first()
        if not know:
            raise AppException("知识库不存在了")
        if know.is_disable:
            raise AppException("知识库已被删除")
        if know.is_disable:
            raise AppException("知识库已被禁用")

        # 验证权限
        if user_id != know.user_id:
            await KnowTeamedService.has_permission(know.code, user_id, AgentEnum.POWER_EDIT)

        # 删除文档
        await AgentKnowArchiveModel.filter(id=archive.id).update(
            is_delete=1,
            delete_time=int(time.time())
        )

        # 关联表清理
        await CleanLogic.clean_know_archive(know.id)

    @classmethod
    async def status(cls, user_id: int, fid: str):
        """
        文档状态。

        Args:
            user_id (int): 用户ID
            fid (str): 文档编号。

        Author:
            zero
        """
        # 验证文档
        archive = await AgentKnowArchiveModel.filter(code=fid).first()
        if not archive:
            raise AppException("文档不存在哦")
        if archive.is_delete:
            raise AppException("文档已被删除")

        # 验知识库
        know = await AgentKnowModel.filter(id=int(archive.know_id)).first()
        if know is None:
            raise AppException("知识库不存在了")
        if know.is_disable:
            raise AppException("知识库已被删除")
        if know.is_disable:
            raise AppException("知识库已被禁用")

        # 验证权限
        if user_id != know.user_id:
            await KnowTeamedService.has_permission(know.code, user_id, AgentEnum.POWER_EDIT)

        # 更新文档
        await AgentKnowArchiveModel.filter(id=archive.id).update(
            is_disable=0 if archive.is_disable else 1,
            update_time=int(time.time())
        )

    @classmethod
    async def imports(cls, post: schema.KnowArchiveImportIn, user_id: int):
        """
        文档导入。

        Args:
            post (schema.KnowArchiveImportIn): 导入参数。
            user_id (int): 用户ID。

        Author:
            zero
        """
        # 验知识库
        know = await AgentKnowModel.filter(code=str(post.kb)).first()
        if not know:
            raise AppException("知识库不存在了")
        if know.is_delete:
            raise AppException("知识库已被删除")
        if know.is_disable:
            raise AppException("知识库已被禁用")

        # 验证权限
        if user_id != know.user_id:
            await KnowTeamedService.has_permission(know.code, user_id, AgentEnum.POWER_EDIT)

        # 处理文件
        async with in_transaction("mysql"):
            for file in post.files:
                # 解析文件
                results = await cls.parse(schema.KnowArchiveParseIn(
                    kb=post.kb,
                    scene=post.scene,
                    chunk_size=post.chunk_size,
                    chunk_overlap=post.chunk_overlap,
                    separators=post.separators,
                    url=file["url"],
                ), del_cache=True)

                # 生成集合
                salt = ToolsUtil.make_rand_char(20) + str(file["url"])
                code = ToolsUtil.make_md5_str(str(time.time()) + str(user_id) + salt)
                archive = await AgentKnowArchiveModel.create(
                    know_id=know.id,
                    user_id=know.user_id,
                    last_uid=user_id,
                    scene=post.scene,
                    code=code,
                    name=file["name"],
                    size=file.get("size", 0),
                    path=UrlUtil.to_relative_url(file["url"]),
                    train_mode=post.train_mode,
                    separators=post.separators,
                    chunk_size=post.chunk_size,
                    chunk_overlap=post.chunk_overlap,
                    create_time=int(time.time()),
                    update_time=int(time.time())
                )

                # 写入数据
                file_path = UrlUtil.to_relative_url(file["url"])
                if post.train_mode != "qa":
                    uuids = []
                    index = 0
                    for item in results.lists:
                        text: str = item.get("question", "").strip() + item.get("answer", "").strip()
                        signed: str = ToolsUtil.make_md5_str(text)
                        pg = await PgKnowledgeModel.create(
                            know_id=know.id,
                            user_id=know.user_id,
                            last_uid=know.user_id,
                            archive_id=archive.id,
                            import_uid=user_id,
                            signed=signed,
                            chunk_index=index,
                            question=item.get("question", ""),
                            answer=item.get("answer", ""),
                            create_time=int(time.time()),
                            update_time=int(time.time()),
                            metadata=json.dumps({
                                "file_name": file["name"],
                                "file_size": file.get("size", 0),
                                "file_path": file_path
                            }, ensure_ascii=False)
                        )
                        index += 1
                        uuids.append(str(pg.uuid))
                    await PgKnowledgeModel.update_ts_vectory(uuids)
                else:
                    for item in results.lists:
                        await AgentKnowSplittingModel.create(
                            know_id=know.id,
                            archive_id=archive.id,
                            import_uid=user_id,
                            file_name=file["name"],
                            file_size=file.get("size", 0),
                            file_path=file_path,
                            content=item.get("question", ""),
                            create_time=int(time.time()),
                            update_time=int(time.time())
                        )

    @classmethod
    async def parse(cls, post: schema.KnowArchiveParseIn, del_cache: bool = False) -> schema.KnowArchiveParseVo:
        """
        文档解析。

        Args:
            post (schema.KnowArchiveParseIn): 解析参数。
            del_cache (bool): 删除缓存。

        Author:
            zero
        """
        scene: str = post.scene
        chunk_overlap: int = post.chunk_overlap
        chunk_size: int = post.chunk_size
        separators: list = ["\n\n", "\n", " ", ""]
        source_url: str = UrlUtil.to_root_path(post.url)
        file_path: str = UrlUtil.to_relative_url(post.url)
        sign = ToolsUtil.make_md5_str(str(chunk_size) + str(chunk_overlap) + post.separators + file_path)
        cache_path = f"{get_settings().APP_PATH}/runtime/parsing/know/{sign}.md"
        if post.separators:
            separators = [post.separators]

        # 从缓存中读取解析结果
        try:
            if await aiofiles.os.path.exists(cache_path):
                content = await ToolsUtil.read_file(cache_path)
                if content:
                    if del_cache:
                        await aiofiles.os.remove(cache_path)
                    results = json.loads(content)
                    lists = results.get("lists", [])
                    return schema.KnowArchiveParseVo(
                        split_count=results.get("count"),
                        lists=lists if del_cache else lists[:20]
                    )
        except Exception as e:
            print(str(e))

        # 从文件中获取解析结果
        try:
            if scene == "link":
                results = cls.capture([post.url])
                chunks = RecursiveCharacterTextSplitter(
                    chunk_size=chunk_size,
                    chunk_overlap=chunk_overlap,
                    separators=separators
                ).split_text(results[0]['content'])
                count = len(chunks)
                lists = [{"question": chunk} for chunk in chunks]
            elif scene == "text":
                chunks = RecursiveCharacterTextSplitter(
                    chunk_size=chunk_size,
                    chunk_overlap=chunk_overlap,
                    separators=separators
                ).split_text(post.url)
                count = len(chunks)
                lists = [{"question": chunk} for chunk in chunks]
            elif scene == "csv":
                lists = await FilesUtil.parse_qa(source_url)
                count = len(lists)
            else:
                content = await FilesUtil.parse_raw(source_url)
                chunks = RecursiveCharacterTextSplitter(
                    chunk_size=chunk_size,
                    chunk_overlap=chunk_overlap,
                    separators=separators
                ).split_text(content)
                count = len(chunks)
                lists = [{"question": chunk} for chunk in chunks]
        except Exception as e:
            raise AppException(str(e))

        # 异步文本缓存
        if not del_cache:
            structure = {"count": count, "lists": lists}
            json_text_str = json.dumps(structure, indent=4, ensure_ascii=False)
            await ToolsUtil.write_file(cache_path, json_text_str)

        # 返回解析结果
        return schema.KnowArchiveParseVo(
            split_count=count,
            lists=lists if del_cache else lists[:20]
        )

    @classmethod
    def capture(cls, urls: List[str]) -> List[Dict[str, str]]:
        """
        抓取网页内容

        Args:
            urls (List[str]): url地址列表

        Returns:
            List[Dict[str, str]]

        Author:
            zero
        """
        try:
            results = []
            documents = BeautifulSoupWebReader().load_data(urls)
            for document in documents:
                results.append({"url": document.id_, "content": document.text})

            return results
        except Exception as e:
            raise AppException(str(e))
