'''
* This is the projet for Brtc LlmOps Platform
* @Author Leon-liao <liaosiliang@alltman.com>
* @Description //TODO 
* @File: indexing_service.py
* @Time: 2025/11/13
* @All Rights Reserve By Brtc
'''
import logging
import re
import uuid
from dataclasses import dataclass
from datetime import datetime
from typing import Any
from uuid import UUID
from injector import inject
from redis import Redis
from sqlalchemy.sql.functions import func
from langchain_core.documents import Document as LCDocument
from pkg.sqlalchemy import SQLAlchemy
from internal.core.file_extractor.file_extractor import FileExtractor
from internal.entity.document_entity import DocumentStatus, SegmentStatus
from internal.model import Document, Segment
from .keyword_table_service import KeywordTableService
from .process_rule_service import ProcessRuleService
from .jieba_service import JiebaService
from .base_service import BaseService
from .embeddings_service import EmbeddingsService
from .vector_database_service import VectorDataBaseService
from internal.lib.helper import generate_text_hash
from ..entity.cache_entity import LOCK_DOCUMENT_UPDATE_ENABLED
from ..exception.exception import NotFoundException


@inject
@dataclass
class IndexingService(BaseService):
    db:SQLAlchemy
    redis_client:Redis
    file_extractor:FileExtractor
    embedding_service:EmbeddingsService
    process_rule_service:ProcessRuleService
    jieba_service:JiebaService
    vector_database_service:VectorDataBaseService
    keyword_table_service:KeywordTableService


    def _completed(self, document: Document, lc_segments:list[LCDocument])->None:
        """存储文档片段到向量数据库中, 并完成状态更新"""
        #1、循环遍历片段列表数据, 将文档状态以及片段更新
        for lc_segment in lc_segments:
            lc_segment.metadata["document_enabled"] = True
            lc_segment.metadata["segment_enabled"] = True
        #2、调用向量数据库, 每次存储10条数据， 避免一次过多数据
        try:
            for i in range(0, len(lc_segments), 10):
                chunks = lc_segments[i:i + 10]
                ids  = [chunk.metadata["node_id"] for chunk in chunks]
                self.vector_database_service.add_documents(chunks, ids=ids)
                with self.db.auto_commit():
                    self.db.session.query(Segment).filter(
                        Segment.node_id.in_(ids)
                    ).update({
                        "status": SegmentStatus.COMPLETED,
                        "completed_at": datetime.now(),
                        "enabled": True
                    })
        except Exception as e:
            logging.exception(f"构建文档 _completed错误， 错误信息:{e}")
            with self.db.auto_commit():
                self.db.session.query(Segment).filter(
                    Segment.node_id.in_(ids)
                ).update({
                    "status": SegmentStatus.ERROR,
                    "completed_at": datetime.now(),
                    "stopped_at": datetime.now(),
                    "enabled": False,
                    "error": str(e)
                })
        #6、更新文档状态
        self.update(
            document,
            status=DocumentStatus.COMPLETED,
            completed_at=datetime.now(),
            enabled=True
        )



    def _indexing(self, document: Document, lc_segments:list[LCDocument])->None:
        """根据传递的信息， 构建索引， 涵盖关键词提取， 词表构建"""
        for lc_segment in lc_segments:
            #1、提取每一个片段对应的关键词, 关键词数量最多不超过10个
            keywords = self.jieba_service.extract_keywords(lc_segment.page_content, 10)
            #2、逐条更新文档片段的关键词
            self.db.session.query(Segment).filter(
                Segment.id == lc_segment.metadata["segment_id"]
            ).update({
                "keywords":keywords,
                "status":SegmentStatus.INDEXING,
                "indexing_completed_at":datetime.now(),
            })

            #3、获取当前知识库的关键词表
            keyword_table_record = self.keyword_table_service.get_keyword_table_from_dataset_id(document.dataset_id)
            keyword_table = {
               field:set(value) for field, value in keyword_table_record.keyword_table.items()
            }

            #4、循环将关键词添加到关键词表中
            for keyword in keywords:
                if keyword not in keyword_table:
                    keyword_table[keyword] = set()
                keyword_table[keyword].add(lc_segment.metadata["segment_id"])
            #5、更新关键词表
            self.update(
                keyword_table_record,
                keyword_table={field:list(value) for field, value in keyword_table.items()}
            )
        #6、更新文档状态
        self.update(
            document,
            indexing_completed_at=datetime.now(),
        )


    def _splitting(self, document: Document, lc_documents:list[LCDocument])->list[LCDocument]:
        """根据传递的信息进行文本分割， 拆成合适的大小块"""
        try:
            #1、根据process_rule 获取文本分割器
            process_rule = document.process_rule
            text_splitter = self.process_rule_service.get_text_splitter_by_process_rule(
                process_rule,
                self.embedding_service.calculate_token_count
            )
            #2、按照process_rule 规则清除多余的字符串
            for lc_document in lc_documents:
                lc_document.page_content = self.process_rule_service.clean_text_by_process_rule(
                    lc_document.page_content,
                    process_rule
                )
            #3、分割文档为片段
            lc_segments = text_splitter.split_documents(lc_documents)
            #4、获取对应的对应文档下最大的片段位置
            position = self.db.session.query(func.coalesce(func.max(Segment.position), 0)).filter(
                Segment.document_id == document.id
            ).scalar()
            #5、循环处理片段数据, 并添加元素据, 同时存储到postgres 中
            segments = []
            for lc_segment in lc_segments:
                position += 1
                content = lc_segment.page_content
                segment = self.create(
                    Segment,
                    account_id=document.account_id,
                    dataset_id=document.dataset_id,
                    document_id=document.id,
                    node_id = uuid.uuid4(),
                    position = position,
                    content = content,
                    character_count = len(content),
                    token_count = self.embedding_service.calculate_token_count(content),
                    hash = generate_text_hash(content),
                    status = SegmentStatus.WAITING
                )
                lc_segment.metadata ={
                    "account_id": str(document.account_id),
                    "dataset_id": str(document.dataset_id),
                    "document_id": str(document.id),
                    "segment_id": str(segment.id),
                    "node_id": str(segment.node_id),
                    "document_enabled":False,
                    "segment_enabled":False
                }
                segments.append(segment)
            #6、更新文档的状态数据
            self.update(
                document,
                token_count = sum([segment.token_count for segment in segments]),
                status = SegmentStatus.INDEXING,
                splitting_completed_at = datetime.now(),
            )
            return lc_segments
        except Exception as e:
            logging.error(f"_splitting 出现异常:{e}")
            return []


    def _parsing(self, document: Document)->list[LCDocument]:
        """解析传递的文档未langchain 文档列表"""
        #1、获取upload file 并加载Langchain 文档
        upload_file = document.upload_file
        lc_documents = self.file_extractor.load(upload_file, False, True)
        #2、循环梳理langchain 文档, 并删除多余的空白字符串
        for lc_document in lc_documents:
            lc_document.page_content = self._clean_extra_text(lc_document.page_content)
        #3、更新文档状态并记录时间
        self.update(
            document,
            character_count=sum([len(lc_document.page_content) for lc_document in lc_documents]),
            status=DocumentStatus.SPLITTING,
            parsing_completed_at = datetime.now()
        )
        return lc_documents


    def build_documents(self, document_ids:list[UUID])->None:
        """根据传递的文档id列表构建 知识库文档、涵盖了温江加载  分割 索引 数据库存储"""
        #1、根据传递的文档id获取所有文档
        documents = self.db.session.query(Document).filter(
            Document.id.in_(document_ids)
        ).all()

        #2、执行遍历所有的文档完成对每一个文档的构建
        for document in documents:
            try:
                #3、更新文档的状态未解析中
                self.update(document,
                            status = DocumentStatus.PARSING,
                            processing_started_at = datetime.now())
                #4、执行文档加载, 并更新文档状态与时间
                lc_documents = self._parsing(document)

                #5、执行文档分割
                lc_segments = self._splitting(document, lc_documents)

                #6、执行文档索引构建
                self._indexing(document, lc_segments)

                #7、存储操作，完成文档的存储
                self._completed(document, lc_segments)

            except Exception as e:
                logging.exception("构建文档发生错误, 错误信息如下:%(error)s", {"error", e})
                self.update(document,
                            status = DocumentStatus.ERROR,
                            error=str(e),
                            stopped_at = datetime.now())


    def update_document_enabled(self, document_id:UUID)-> None:
        """根据传递的id 更新文档状态， 同时修改 weaviate 向量数据库记录"""
        #1、构建缓存键
        cache_key = LOCK_DOCUMENT_UPDATE_ENABLED.format(document_id = document_id)
        #2、根据传递的document_id 获取文档记录
        document = self.get(Document,document_id)
        if document is None:
            logging.exception(f"当前文档不存在文档id:{document_id}")
            raise NotFoundException("当前文档不存在！！")
        #3、查询归属当前文档的所有片段节点id
        segments = self.db.session.query(Segment).with_entities(Segment.id, Segment.node_id, Segment.enabled).filter(
            Segment.document_id == document_id,
            Segment.status == SegmentStatus.COMPLETED,
        ).all()
        segment_ids = [id  for id ,_, _ in segments]
        node_ids = [node_id for  _,node_id, _ in segments]
        try:
            #4、执行循环所有node_id 并更新向量数据库
            collection = self.vector_database_service.collection
            for node_id in node_ids:
                try:
                    collection.data.update(
                        uuid=node_id,
                        properties={
                            "document_enabled":document.enabled,
                        }
                    )
                except Exception as e:
                    with self.db.auto_commit():
                        self.db.sesion.query(Segment).filter(
                            Segment.id == node_id
                        ).update({
                            "error": str(e),
                            "status":SegmentStatus.ERROR,
                            "enabled":False,
                            "disabled_at":datetime.now(),
                            "stopped_at":datetime.now(),
                        })
            #5、更新关键词表对饮的数据
            if document.enabled is True:
                #6、从禁用 改为启用,需要新增关键词
                enabled_segments_ids = [id  for id, _, enabled in segments if enabled is True]
                self.keyword_table_service.add_keyword_table_from_ids(document.dataset_id, enabled_segments_ids)
            else:
                self.keyword_table_service.delete_keyword_table_from_ids(document.dataset_id, segment_ids)
        except Exception as e:
            #5、记录日志并将状态回原来的状态
            logging.exception(f"修改文档启用状态失败{str(e)}")
            origin_enable = not document.enabled
            self.update(
                document,
                enabled = origin_enable,
                disabled_at = None if origin_enable else datetime.now(),
            )
        finally:
            # 释放锁
            self.redis_client.delete(cache_key)



    @classmethod
    def _clean_extra_text(cls, text: str) -> str:
        """清除过滤传递的多余空白字符串"""
        text = re.sub(r'<\|', '<', text)
        text = re.sub(r'\|>', '>', text)
        text = re.sub(r'[\x00-\x08\x0B\x0C\x0E-\x1F\x7F\xEF\xBF\xBE]', '', text)
        text = re.sub('\uFFFE', '', text)  # 删除零宽非标记字符
        return text

















