import asyncio
import shutil
import tempfile
from datetime import datetime
from pathlib import Path
from typing import List, Optional

from llama_index.core import Document
from llama_index.core.schema import BaseNode
from loguru import logger

from ai_platform.config.resource import get_minio_service, get_vector_store, get_storage_context, get_vector_index, \
    get_property_graph_index, get_knowledge_service, get_tree_index
from ai_platform.models.wiki_document import WikiDocument
from .loader import DataLoader
from .transformer import DataTransformer


class FileDataPipeline:

    def __init__(self, collection_name: str = "default_collection"):
        """初始化数据流水线
        """
        self.collection_name = collection_name
        self.loader = DataLoader()
        self.minio_service = get_minio_service()
        self.knowledge_service = get_knowledge_service()
        self.vector_db = get_vector_store()
        self.storage_context = get_storage_context()
        self.index = get_vector_index()
        self.property_graph_index = get_property_graph_index()
        self.tree_index = get_tree_index()

    async def process_minio_file(self,
        knowledge_base_id: str,
        object_name: str,
        bucket_name: Optional[str] = None):
        """处理单个MinIO文件
        """

        start_time = datetime.now()
        temp_dir = None

        try:
            logger.info(f"开始处理MinIO文件: {object_name}")

            # 更新文件处理状态为processing
            # await self.knowledge_service.update_knowledge_base_file_status(
            #     kb_id=knowledge_base_id,
            #     object_name=object_name,
            #     status="processing"
            # )
            logger.info(f"文件状态已更新为processing: {object_name}")

            logger.info("从MinIO下载文件")
            temp_dir = await self._download_from_minio(bucket_name, object_name)

            logger.info("从临时文件夹加载文档内容")
            documents = await self.loader.load_from_dir(Path(temp_dir))
            logger.info(f"加载完成，文档数量: {len(documents)}")

            if not documents:
                raise ValueError("未能从文件中提取任何文档内容")

            base_setting = await self.knowledge_service.get_knowledge_base_settings(knowledge_base_id)
            transformer = DataTransformer(base_setting)
            pipeline = transformer.create_pipeline(documents)
            nodes = await pipeline.arun(documents)
            # 设置数据所属的知识库
            for node in nodes:
                node.metadata["knowledge_base_id"] = knowledge_base_id
                self.insert_nodes_background(nodes, base_setting.index_type)

            # 更新文件处理状态为completed
            # status_updated = await self.knowledge_service.update_knowledge_base_file_status(
            #     kb_id=knowledge_base_id,
            #     object_name=object_name,
            #     status="completed"
            # )
            # if status_updated:
            #     logger.info(f"文件状态已更新为completed: {object_name}")
            # else:
            #     logger.warning(f"文件状态更新失败: {object_name}")

            processing_time = (datetime.now() - start_time).total_seconds()
            logger.info(f"MinIO文件处理完成: {object_name}, 耗时: {processing_time:.2f}秒")
        except Exception as e:
            logger.exception(f"MinIO文件处理失败: {e}")
            # 处理失败时更新状态为failed
            try:
                # await self.knowledge_service.update_knowledge_base_file_status(
                #     kb_id=knowledge_base_id,
                #     object_name=object_name,
                #     status="failed"
                # )
                logger.info(f"文件状态已更新为failed: {object_name}")
            except Exception as status_error:
                logger.exception(f"更新失败状态时出错: {status_error}")
        finally:
            # 清理临时文件
            if temp_dir and Path(temp_dir).exists():
                shutil.rmtree(temp_dir, ignore_errors=True)

    async def _download_from_minio(self, bucket_name: str, object_name: str) -> str:
        """从MinIO下载文件到临时目录"""
        try:
            # 创建临时目录
            temp_dir = tempfile.mkdtemp(prefix="minio_pipeline_")

            # 获取文件内容
            file_data = await self.minio_service.read_file(bucket_name, object_name)
            if not file_data:
                raise ValueError(f"无法从MinIO读取文件: {object_name}")

            # 保存到临时文件
            file_name = Path(object_name).name
            temp_file_path = Path(temp_dir) / file_name

            with open(temp_file_path, 'wb') as f:
                f.write(file_data)

            logger.info(f"文件下载到临时目录: {temp_file_path}")
            return str(temp_dir)

        except Exception as e:
            logger.exception(f"从MinIO下载文件失败: {e}")
            raise

    async def _load_documents(self, temp_dir: str) -> List[Document]:
        """从临时目录加载文档"""
        try:
            temp_path = Path(temp_dir)
            file_docs = await  self.loader.load_from_dir(temp_path)
            return file_docs

        except Exception as e:
            logger.exception(f"加载文档失败: {e}")
            raise

    async def _store_to_vector_db(self, nodes: List[BaseNode], index_type: str):
        """将文档存储到向量数据库"""
        try:
            logger.info(f"开始写入文档，文档数据：{len(nodes)}")
            if not nodes:
                logger.warning("没有文档需要存储")
                return
            if index_type == "knowledge_graph":
                await self.property_graph_index.ainsert_nodes(nodes)
            elif index_type == "long_document":
                await self.tree_index.ainsert_nodes(nodes)
            else:
                # 常规向量索引
                await self.index.ainsert_nodes(nodes)
            logger.info(f"向现有索引添加 {len(nodes)} 个文档，索引类型: {index_type}")
        except Exception as e:
            logger.exception(f"存储到向量数据库失败: {e}")
            raise

    def insert_nodes_background(self, nodes: List[BaseNode], index_type: str):
        asyncio.create_task(self._store_to_vector_db(nodes, index_type))

        
    def insert_wiki_data_background(self, nodes: List[BaseNode], index_type: str):
        asyncio.create_task(self._store_to_vector_db(nodes, index_type))
    
    async def _filter_updated_wiki_documents(self, documents: List[Document], knowledge_base_id: str) -> List[Document]:
        """过滤需要更新的wiki文档
        
        Args:
            documents: 所有加载的文档
            knowledge_base_id: 知识库ID
            
        Returns:
            需要更新的文档列表
        """
        try:
            from sqlalchemy import text
            from sqlalchemy.ext.asyncio import AsyncSession
            
            engine = await self.knowledge_service._get_engine()
            documents_to_update = []
            
            async with AsyncSession(engine) as session:
                for doc in documents:
                    # 从文档元数据中获取wiki页面ID和更新时间
                    wiki_page_id = doc.metadata.get('doc_id') or doc.metadata.get('page_id')
                    doc_updated_at = doc.metadata.get('updated_at')
                    
                    if not wiki_page_id or not doc_updated_at:
                        # 如果没有页面ID或更新时间，默认需要更新
                        documents_to_update.append(doc)
                        continue
                    
                    # 查询向量数据库中是否存在该wiki页面的数据
                    query_sql = text("""
                        SELECT metadata_->>'updated_at' as stored_updated_at
                        FROM data_data_vector_store 
                        WHERE metadata_->>'knowledge_base_id' = :kb_id 
                        AND (metadata_->>'doc_id' = :page_id OR metadata_->>'page_id' = :page_id)
                        ORDER BY id DESC
                        LIMIT 1
                    """)
                    
                    result = await session.execute(query_sql, {
                        'kb_id': knowledge_base_id,
                        'page_id': wiki_page_id
                    })
                    
                    stored_row = result.fetchone()
                    
                    if not stored_row:
                        # 如果向量数据库中没有该页面数据，需要添加
                        logger.info(f"新wiki页面需要添加: {wiki_page_id}")
                        documents_to_update.append(doc)
                    else:
                        stored_updated_at = stored_row[0]
                        
                        # 比较更新时间
                        if str(doc_updated_at) != str(stored_updated_at):
                            logger.info(f"wiki页面需要更新: {wiki_page_id}, 存储时间: {stored_updated_at}, 文档时间: {doc_updated_at}")
                            documents_to_update.append(doc)
                        else:
                            logger.debug(f"wiki页面无需更新: {wiki_page_id}")
            
            return documents_to_update
            
        except Exception as e:
            logger.exception(f"过滤wiki文档时出错: {e}")
            # 如果出错，返回所有文档以确保数据同步
            return documents
    
    async def _remove_old_wiki_nodes(self, documents: List[Document], knowledge_base_id: str):
        """删除旧的wiki数据节点
        
        Args:
            documents: 需要更新的文档列表
            knowledge_base_id: 知识库ID
        """
        try:
            from sqlalchemy import text
            from sqlalchemy.ext.asyncio import AsyncSession
            
            engine = await self.knowledge_service._get_engine()
            
            async with AsyncSession(engine) as session:
                for doc in documents:
                    wiki_page_id = doc.metadata.get('doc_id') or doc.metadata.get('page_id')
                    
                    if not wiki_page_id:
                        continue
                    
                    # 删除该wiki页面的旧数据
                    delete_sql = text("""
                        DELETE FROM data_data_vector_store 
                        WHERE metadata_->>'knowledge_base_id' = :kb_id 
                        AND (metadata_->>'doc_id' = :page_id OR metadata_->>'page_id' = :page_id)
                    """)
                    
                    result = await session.execute(delete_sql, {
                        'kb_id': knowledge_base_id,
                        'page_id': wiki_page_id
                    })
                    
                    deleted_count = result.rowcount
                    if deleted_count > 0:
                        logger.info(f"删除wiki页面旧数据: {wiki_page_id}, 删除节点数: {deleted_count}")
                
                await session.commit()
                
        except Exception as e:
            logger.exception(f"删除旧wiki节点时出错: {e}")
            # 删除失败不影响后续插入操作，只记录错误

    async def process_wiki_data(self,
        knowledge_base_id: str,
        wiki_dir_path: str):
        """处理wiki目录数据并同步到向量数据库
        
        Args:
            knowledge_base_id: 知识库ID
            wiki_dir_path: wiki数据目录路径
        """
        start_time = datetime.now()
        
        try:
            logger.info(f"开始处理wiki数据目录: {wiki_dir_path}")
            
            # 检查目录是否存在
            wiki_path = Path(wiki_dir_path)
            if not wiki_path.exists():
                raise ValueError(f"wiki目录不存在: {wiki_dir_path}")
            
            logger.info("从wiki目录加载文档内容")
            documents = await self.loader.load_from_wiki_page_info(wiki_path)
            logger.info(f"加载完成，文档数量: {len(documents)}")
            
            if not documents:
                raise ValueError("未能从wiki目录中提取任何文档内容")
            
            # 检查哪些文档需要更新
            documents_to_update = await self._filter_updated_wiki_documents(documents, knowledge_base_id)
            
            if not documents_to_update:
                logger.info("没有需要更新的wiki文档")
                processing_time = (datetime.now() - start_time).total_seconds()
                logger.info(f"wiki数据检查完成，无需更新，耗时: {processing_time:.2f}秒")
                return
            
            logger.info(f"需要更新的文档数量: {len(documents_to_update)}")
            
            # 获取知识库设置并创建转换器
            base_setting = await self.knowledge_service.get_knowledge_base_settings(knowledge_base_id)
            transformer = DataTransformer(base_setting)
            pipeline = transformer.create_pipeline(documents_to_update)
            nodes = await pipeline.arun(documents_to_update)
            
            # 设置数据所属的知识库
            for node in nodes:
                node.metadata["knowledge_base_id"] = knowledge_base_id
                
            # 删除旧的wiki数据节点
            await self._remove_old_wiki_nodes(documents_to_update, knowledge_base_id)
            
            # 后台插入新节点到向量数据库
            self.insert_wiki_data_background(nodes, base_setting.index_type)
            
            processing_time = (datetime.now() - start_time).total_seconds()
            logger.info(f"wiki数据处理完成: {wiki_dir_path}, 更新文档数: {len(documents_to_update)}, 耗时: {processing_time:.2f}秒")
            
        except Exception as e:
            logger.exception(f"wiki数据处理失败: {e}")
            raise
    
    async def process_wiki_document(self, wiki_doc: WikiDocument, knowledge_base_id: str):
        """处理单个WikiDocument对象并存入向量数据库
        
        Args:
            wiki_doc: WikiDocument对象
            knowledge_base_id: 知识库ID
        """
        start_time = datetime.now()
        
        try:
            logger.info(f"开始处理WikiDocument: {wiki_doc.doc_id}")
            
            if not wiki_doc.content:
                logger.warning(f"WikiDocument内容为空，跳过处理: {wiki_doc.doc_id}")
                return
            
            # 创建Document对象
            document = Document(
                text=wiki_doc.content,
                metadata={
                    "doc_id": wiki_doc.doc_id,
                    "title": wiki_doc.title,
                    "doc_type": wiki_doc.doc_type,
                    "url": wiki_doc.url,
                    "updated_at": wiki_doc.updated_at,
                    "author": wiki_doc.author,
                    "knowledge_base_id": knowledge_base_id,
                    **wiki_doc.metadata  # 合并额外的元数据
                }
            )
            
            # 检查是否需要更新（删除旧数据）
            await self._remove_old_wiki_nodes([document], knowledge_base_id)
            
            # 获取知识库设置并创建转换器
            base_setting = await self.knowledge_service.get_knowledge_base_settings(knowledge_base_id)
            transformer = DataTransformer(base_setting)
            pipeline = transformer.create_pipeline([document])
            nodes = await pipeline.arun([document])
            
            # 设置数据所属的知识库
            for node in nodes:
                node.metadata["knowledge_base_id"] = knowledge_base_id
            
            # 后台插入节点到向量数据库
            self.insert_wiki_data_background(nodes, base_setting.index_type)
            
            processing_time = (datetime.now() - start_time).total_seconds()
            logger.info(f"WikiDocument处理完成: {wiki_doc.doc_id}, 节点数: {len(nodes)}, 耗时: {processing_time:.2f}秒")
            
        except Exception as e:
            logger.exception(f"WikiDocument处理失败: {e}")
            raise
