# -*- coding: utf-8 -*-
# @Author: Cursor
# @Date: 2025-01-04
# @Last Modified by: Tim Liu
# @Last Modified time: 2024-01-04

# @Description: document processor for image ingestion

from datetime import datetime
from typing import Optional  

import logging
from core.exception import CustomException
from config.settings import *

from crewplus.apps.rag.schemas.ingest_status import IngestStatus

from crewplus.apps.rag.processors.base_processor import DocumentProcessor
from crewplus.apps.rag.schemas import Document
from langchain_core.documents import Document as LangchainDocument
from crewplus.apps.rag.schemas.ingest_request import IngestRequest

from crewplus.services.vdb_service import VDBService

from crewplus.apps.rag.loaders.image_document_loader import ImageDocumentLoader

from crewplus.apps.rag.utils.document_util import unify_documents_meta 
from langchain_core.vectorstores.base import VectorStore
from langchain_milvus import Zilliz
from langchain_core.embeddings import Embeddings

class ImageDocumentProcessor(DocumentProcessor):
    async def process(self, message: IngestRequest, collection_name: str, vector_store: Optional[VectorStore] = None, embeddings: Optional[Embeddings] = None) -> Document:
        logging.info("images processor")
        # Implement the logic to process image documents
        document = Document()  # Create a new Document instance

        # Relate this vectorized Document to the knowledge base
        document.source_url = message.url
        document.kbase_id = message.kbase_id
        document.file_type = message.file_type
        document.source_type = message.source_type
        document.title = message.title
        document.content= message.content
        
        # get start UTC datetime
        document.ingestion_start_time = str(datetime.now())

        try:
            vdbsrv = VDBService()
            embeddings = vdbsrv.get_embeddings()
            milvus_store: Zilliz = vdbsrv.get_vector_store(collection_name, embeddings)
                 
            vdbsrv.delete_old_indexes(message.url, milvus_store)
                
            # Load: First we need to load a document ( here is image ).
            if(document.content is None or document.content == ""):
                loader = ImageDocumentLoader( file_path=message.url, parser=message.parser )
                docs = loader.load()
                
                # fill content
                document.content = docs[0].page_content
            else:
                # do NOT parse audio file if content is provided
                docs = [LangchainDocument(page_content=message.content, metadata={"file_type": "image","source": message.url}) ]
            
            # Split: Do NOT split image content, keep integrity
            logging.info("ingesting image --  -- " + message.url)
            
            udocs = unify_documents_meta(docs, file_type=message.file_type, title=message.title)
            logging.info("image 1")
            # TODO: use async afrom_documents for production (async), async shall be used with apscheduler
            milvus_store.from_documents(
                udocs,
                embedding=embeddings,
                collection_name=collection_name,
                connection_args=MILVUS_CONNECTION_ARGS
            )
            logging.info("image 2")
            # get end UTC datetime
            document.ingestion_end_time = str(datetime.now())       
            
            document.ingestion_status = IngestStatus.INGESTED  
        except Exception as e:
            logging.error("这是images error")
            document.ingestion_status = IngestStatus.NOT_INGESTED
            
            raise CustomException(str(e), code=IngestStatus.NOT_INGESTED)

        return document