# @Author: Tim Liu
# @Date: 2024-05-29
# @Last Modified by: Cursor
# @Last Modified time: 2025-01-04

# @Description: api endpoints for Document Ingestion

from fastapi import APIRouter, Depends, Security, BackgroundTasks
from langchain_milvus import Zilliz
from sqlalchemy.ext.asyncio import AsyncSession
from datetime import datetime
import uuid

from utils.response import SuccessResponse

from core.database import db_getter
from core.exception import CustomException

from crewplus.helper.verify import get_api_key
from crewplus.apps.apikey.models.account_api_key import AccountApiKey
from crewplus.apps.rag.cruds.knowledge_base import KnowledgeBaseDal
from crewplus.apps.rag.models.knowledge_base import KnowledgeBaseDB
from crewplus.apps.rag.models.task import TaskDB, TaskStatus
from crewplus.apps.rag.models.document import DocumentDB

from crewplus.apps.rag.cruds.task import TaskDal
from crewplus.apps.rag.schemas.task import TaskSimpleOut

from crewplus.apps.rag.schemas.ingest_request import IngestRequest
from crewplus.apps.rag.schemas.source_type import SourceType
from crewplus.apps.rag.graph.knowledge_graph_builder import KnowledgeGraphBuilder

from crewplus.services.vdb_service import VDBService
from crewplus.services.graph_engines.neo4j_engine import Neo4jEngine

from langchain_community.vectorstores import Milvus

from .dispatcher_registry import dispatcher_registry, default_dispatcher

from config.settings import *
import logging

from . import cruds
from .processors.sharepoint_processor import SharepointDocumentProcessor

from .schemas.ingest_status import IngestStatus
from .cruds.ontology import OntologyDal
from .graph.schema_manager import SchemaManager

app = APIRouter()

async def setup_ingestion(message: IngestRequest, apikey: AccountApiKey, db: AsyncSession):
    """Setup the knowledge base, vector store, and dispatcher for document ingestion."""
    # get collection name by id or name ,if id is given ,get name by id ,after name if name is given
    collection_name = None
    logging.info("setup_ingestion----start")
    logging.info("message:%s",message)
    if message.kbase_id:
        kbase: KnowledgeBaseDB = await KnowledgeBaseDal(db).get_data(id=message.kbase_id)
        logging.info("kbase1:%s",kbase)
        if kbase is None:
            logging.info("kbase1 is None")
            raise CustomException("Invalid knowledge base id", code=IngestStatus.ERROR.value)
        logging.info("kbase1 is not None and name:%s",kbase.name)
        collection_name = kbase.name
    else:
        kbase_name = message.kbase_name
        logging.info("kbase_name:%s",kbase_name)
        logging.info("collection_name:%s",collection_name)
        if collection_name is None and kbase_name is not None:
            logging.info("kbase_name is not None and is not None")
            kbase: KnowledgeBaseDB = await KnowledgeBaseDal(db).get_data(name=kbase_name)
            logging.info("kbase2:%s",kbase)
            if kbase is None:
                raise CustomException("Knowledge base name does not exist", code=IngestStatus.ERROR.value)
            collection_name = kbase.name
            message.kbase_id = kbase.id
        else:
            raise CustomException("Empty knowledge base name, please give a proper name for knowledge base", code=IngestStatus.ERROR.value)

    # Get embeddings model and vector store
    logging.info("setup_ingestion----get_vector_store start")
    vdbsrv = VDBService()
    embeddings = vdbsrv.get_embeddings()
    milvus_store: Zilliz = vdbsrv.get_vector_store(collection_name, embeddings)
    logging.info("setup_ingestion----get_vector_store end")
    # Attempt to get a custom dispatcher for the user
    dispatcher = dispatcher_registry.get(apikey.user_id, default_dispatcher)
    logging.info("dispatcher end")
    if not dispatcher:
        raise CustomException("Document processor dispatcher not found", code=IngestStatus.ERROR.value)

    # Bind the vector store and embeddings to the dispatcher
    dispatcher.bind_vstore(milvus_store, embeddings)

    return dispatcher

async def upsert_graph_and_update_status(document: DocumentDB, message: IngestRequest, apikey: AccountApiKey, db: AsyncSession) -> DocumentDB:
    """Retrieve ontology, upsert graph, and update document ingestion status."""
    # Get the ontology related to the user
    logging.info("upsert_graph_and_update_status----获取ontology start")
    ontology = await OntologyDal(db).get_data(create_user_id=apikey.user_id)
    logging.info("document type1:%s",type(document))
    logging.info("upsert_graph_and_update_status----获取ontology over")
    # Create Neo4jEngine instance
    neo4j_engine = Neo4jEngine(
        url=NEO4J_GRAPH_ENDPOINT,
        user=NEO4J_USER,
        password=NEO4J_PASSWORD
    )

    # Create KnowledgeGraphBuilder instance
    graph_builder = KnowledgeGraphBuilder(db=db)
    graph_builder.register_graph_instance(user_id=apikey.user_id, graph_instance=neo4j_engine)

    # Bind embedder and vector store
    vdbsrv = VDBService()
    embeddings = vdbsrv.get_embeddings()
    graph_builder.bind_embedder(embeddings)
    milvus_store: Zilliz = vdbsrv.get_vector_store(collection_name=message.kbase_name, embeddings=embeddings)
    graph_builder.bind_vstore(user_id=apikey.user_id, vector_store=milvus_store)
    logging.info("await upsert graph start")
    logging.info("document:%s",document)
    logging.info("document type2:%s",type(document))

# Upsert the graph with the new document
    await graph_builder.upsert_graph(
        user_id=apikey.user_id,
        message=message,
        document=document,
        ontology_name=ontology.name if ontology else None
        #schema_content=SchemaManager.HAZARD_SUBSTANCE  # Pass the appropriate schema if needed
    )
    logging.info("await upsert graph end")
    logging.info("document_ingestion_status:%s " ,document)
    # Update document ingestion status to GRAPH_BUILT
    logging.info("document type3:%s",type(document))

    document.ingestion_status = IngestStatus.GRAPH_BUILT.value
    logging.info("document status error test")
    await cruds.DocumentDal(db).put_data(document.id, document)
    return document

@app.post("/ingest_document",
          summary="Gathering knowledge from your data sources. The ingest_document endpoint is designed to convert raw data to vectors.",
          description="The function synchronously returns the document ingestion result with ingestion status")
async def ingest_document(message: IngestRequest, apikey: AccountApiKey = Security(get_api_key), db: AsyncSession = Depends(db_getter)):
    """Gathering knowledge from your data sources. The ingest_document endpoint is designed to convert raw data to vectors.

    Args:
        message (IngestRequest): the document to be ingested, for example,
        {
            "type": "html",
            "url": "https://docs.smith.langchain.com/overview",
            "kbase_id": "124"
            "api-key": "YOUR_API_KEY"
        }
        apikey (optional): api-key shall be filled, or x-api-key shall be filled in header.

    Raises:
        CustomException: invalid api key
    """    
    logging.info("ingesting document ---- %s", str(message))
    
    # check api key 
    if apikey is None:
        raise CustomException("invalid api key", code=IngestStatus.ERROR.value)    

    document = None  # Initialize document to None
    
    try:
        dispatcher = await setup_ingestion(message, apikey, db)

        # Dispatch the document processing
        document = await dispatcher.dispatch(message)
        document.create_user_id = apikey.user_id
        document.ingestion_status = IngestStatus.INGESTED.value
        logging.info("orginal document:%s",document)
        logging.info("document type1:%s",type(document))
        # Save the processed document
        saved_document = await cruds.DocumentDal(db).create_data(data=document)
        logging.info("saved document:%s",saved_document)
        logging.info("saved document type2:%s",type(saved_document))
        # Upsert the graph and update status if vector_only is False
        if not message.vector_only:
            updated_document = await upsert_graph_and_update_status(saved_document, message, apikey, db)
        else:
            updated_document = saved_document
            
        return SuccessResponse(updated_document)
    except Exception as e:
        logging.error("Error during ingestion: %s", e)
        
        # Set document ingestion status to ERROR
        if document is not None:
            document.ingestion_status = IngestStatus.ERROR.value
            await cruds.DocumentDal(db).put_data(document.id, document)
        
        raise CustomException("An error occurred during ingestion", code=IngestStatus.ERROR.value) from e

# TODO: design safe custom processor registration process. e.g. cloud functions, sandbox, resource ACL validated
# @app.post("/register_processor",
#           summary="Register a custom document processor for an account.",
#           description="Allows an account to register a custom document processor.")
# async def register_processor(file_type: str, source_type: str, processor_class: str, apikey: AccountApiKey = Security(get_api_key)):
#     """Register a custom document processor for the account associated with the API key."""
#     if apikey.user_id not in dispatcher_registry:
#         dispatcher_registry[apikey.user_id] = DocumentProcessorDispatcher()
#         # Initialize default processors for the new account dispatcher
#         initialize_default_processors(dispatcher_registry[apikey.user_id])

#     dispatcher = dispatcher_registry[apikey.user_id]

#     # Dynamically create an instance of the custom processor class
#     processor_instance = globals()[processor_class]()  # Assuming processor_class is the class name as a string
#     dispatcher.register(file_type, source_type, processor_instance)
#     return {"message": "Custom document processor registered successfully."}

@app.post("/aingest_document",
          summary="Asynchronously gather knowledge from your data sources. The aingest_document endpoint is designed to convert raw data to vectors.",
          description="The function returns a task ID upon an ingestion started. You can use this task_id to check for the ingestion status on the ingestion_status endpoint.")
async def aingest_document(message: IngestRequest, background_tasks: BackgroundTasks, apikey: AccountApiKey = Security(get_api_key), db: AsyncSession = Depends(db_getter)):
    """Asynchronously gather knowledge from your data sources. The aingest_document endpoint is designed to convert raw data to vectors."""
    
    logging.info("Starting async ingestion for document ---- %s", str(message))
    
    # check api key 
    if apikey is None:
        raise CustomException("invalid api key", code=IngestStatus.ERROR.value)    

    # Create a new task
    task_id = str(uuid.uuid4())
    new_task = TaskDB(id=task_id, status=TaskStatus.PENDING, create_user_id=apikey.user_id)
    db.add(new_task)
    await db.commit()

    # Add the ingestion process to background tasks
    background_tasks.add_task(process_ingestion, message, apikey, db, task_id)

    return {"task_id": task_id}

async def process_ingestion(message: IngestRequest, apikey: AccountApiKey, db: AsyncSession,task_id: str):
    """Process the document ingestion asynchronously."""
    logging.info(task_id)
    task = await db.get(TaskDB, task_id)
    logging.info(task)
    logging.info("taskinfo")
    document = None  # Initialize document to None
    try:
        # Update task status to in progress
        task.status = TaskStatus.IN_PROGRESS
        task.start_time = datetime.now()
        await db.commit()
        logging.info("db commit")
        #TODO: refactor to pure dispatcher design pattern
        # if message.source_type==SourceType.SHAREPOINT.value:
        #     dispatcher = await setup_ingestion_sharepoint(message, apikey, db)
        # else:
        #     dispatcher = await setup_ingestion(message, apikey, db)
        logging.info("cruds doc start")
        dispatcher = await setup_ingestion(message, apikey, db)
        # Dispatch the document processing
        logging.info("await dispatch start")
        document = await dispatcher.dispatch(message,message.kbase_name)
        document.create_user_id = apikey.user_id
        # document.create_user_id = 1
        logging.info("await dispatch over")
        document.ingestion_status = IngestStatus.INGESTED.value
        logging.info("doc ingestion status end")
        logging.info("orginal document:%s",document)
        logging.info("document type3:%s",type(document))
        # Save the processed document
        saved_doc = await cruds.DocumentDal(db).create_data(data=document)
        await db.commit()
        # cruds.DocumentDal(db).create_data(data=document) return a dict not a documentDB,so need to transform it
        saved_document=DocumentDB()
        logging.info("document build start")

        for key, value in saved_doc.items():
            setattr(saved_document, key, value)
        saved_document.task_id=task_id
        logging.info("doc into db commit")

        logging.info("saved_document:"+str(saved_document))
        #test update message vector_only=True,unused graph
        # message.vector_only=True
        logging.info("开始更新图")
        # Upsert the graph and update status if vector_only is False
        logging.info("message vector_only:%s",message.vector_only)
        if not message.vector_only:
            logging.info("由于设定开始更新图")
            await upsert_graph_and_update_status(saved_document, message, apikey, db)
        logging.info("更新图结束")
        # Update task status to completed
        task.status = TaskStatus.COMPLETED
        task.end_time = datetime.now()
        await db.commit()
    except Exception as e:
        logging.error("Error during async ingestion: %s", e)
        # Set document ingestion status to ERROR
        if document is not None:
            document.ingestion_status = IngestStatus.ERROR.value
            await cruds.DocumentDal(db).put_data(document.id, document)

        # Update task status to failed
        task.status = TaskStatus.FAILED
        task.end_time = datetime.now()
        await db.commit()
        raise CustomException("An error occurred during async ingestion", code=IngestStatus.ERROR.value) from e

@app.get("/task_status/{task_id}",
         summary="Check the status of a task.",
         description="Returns the status of the task with the given task ID.")
async def task_status(task_id: str, apikey: AccountApiKey = Security(get_api_key), db: AsyncSession = Depends(db_getter)):
    """Check the status of a task.

    Args:
        task_id (str): The ID of the task to check.
        apikey (AccountApiKey): The API key for authentication.

    Returns:
        dict: A dictionary containing the task ID and its current status.
    """
    if apikey is None:
        raise CustomException("Invalid API key", code=IngestStatus.ERROR.value)

    task_data = await TaskDal(db).get_data(task_id, v_schema=TaskSimpleOut)
    if not task_data:
        raise CustomException("Task not found", code=IngestStatus.ERROR.value)
    
    return SuccessResponse(task_data)

@app.get("/task_documents/{task_id}",
         summary="Get documents and their statuses for a task.",
         description="Returns all documents and their ingestion statuses for the given task ID.")
async def task_documents(task_id: str, apikey: AccountApiKey = Security(get_api_key), db: AsyncSession = Depends(db_getter)):
    if apikey is None:
        raise CustomException("Invalid API key", code=IngestStatus.ERROR.value)

    documents = await cruds.DocumentDal(db).get_datas(v_where=[DocumentDB.task_id == task_id])
    return SuccessResponse(documents)

@app.post("/aingest_library",
         summary="Get documents and their statuses for a task.",
         description="Returns all documents and their ingestion statuses for the given task ID.")
async def aingest_library(message: IngestRequest,
                                       background_tasks: BackgroundTasks,
                                       apikey: AccountApiKey = Security(get_api_key)):
    if apikey is None:
        raise CustomException("Invalid API key", code=IngestStatus.ERROR.value)
    # documents = SharepointDocumentProcessor(client_id="79099d1c-d6e2-4c2a-9c9b-0a3d916cb149",
    #                                         client_secret="Xyy8Q~iYFNkNdH4ueEFkE5KWmB9XguWEshaTZaEZ",
    #                                         tenant_id="9130150b-a1b0-467c-bc51-e44100203b85").get_documents()
    logging.info("sharepoint_library_documents start")
    documents = SharepointDocumentProcessor(client_id=message.config.get("client_id"),
                                            client_secret=message.config.get("client_secret"),
                                            tenant_id=message.config.get("tenant_id")).get_documents()
    logging.info("library message: %s", message)
    task_ids = []  # 用于存储所有任务的task_id
    total_documents = len(documents)
    for document in documents:
        message = IngestRequest(
            url=document.source_url,
            download_url=document.download_url,
            kbase_id=message.kbase_id,
            kbase_name=message.kbase_name,
            file_type=document.file_type,
            source_type=document.source_type,
            title=document.title,
            content=document.content,
            vector_only=message.vector_only,
            chunk_size=message.chunk_size,  # Default chunk size
            parser=message.parser if message.parser is not None else "pdf",  # Specify parser if needed
            config={}  # Add any additional metadata if needed
        )
        task_id = str(uuid.uuid4())
        task_ids.append(task_id)
        async for new_db in db_getter():
            # reloaded_apikey = await get_account_by_apikey(apikey.api_key, db=new_db)
            new_task = TaskDB(id=task_id,name=document.content,status=TaskStatus.PENDING, create_user_id=apikey.user_id)
            new_db.add(new_task)
            await new_db.commit()
            # await process_ingestion(message, apikey ,new_db, task_id)
            background_tasks.add_task(process_ingestion, message, apikey,new_db,task_id)
    return SuccessResponse(data={        
        "total_documents": total_documents,
        "kbase_id": message.kbase_id,
        "kbase_name": message.kbase_name,
        "task_ids": task_ids
    })

# async def setup_ingestion_sharepoint(message: IngestRequest, apikey: AccountApiKey, db: AsyncSession):
#     """Setup the knowledge base, vector store, and dispatcher for document ingestion."""
#     collection_name = message.kbase_name
#     # Get embeddings model and vector store
#     vdbsrv = VDBService()
#     embeddings = vdbsrv.get_embeddings()
#     milvus_store: Zilliz = vdbsrv.get_vector_store(collection_name, embeddings)
#     # Attempt to get a custom dispatcher for the user
#     dispatcher = dispatcher_registry.get(apikey.user_id, default_dispatcher)
#     if not dispatcher:
#         raise CustomException("Document processor dispatcher not found", code=IngestStatus.ERROR.value)
#
#     # Bind the vector store and embeddings to the dispatcher
#     dispatcher.bind_vstore(milvus_store, embeddings)
#
#     return dispatcher