# -*- coding: utf-8 -*-
# @Author: Cursor
# @Date: 2025-01-04
# @Last Modified by: Tim Liu
# @Last Modified time: 2024-01-04

# @Description: document processor for html ingestion

from datetime import datetime
import logging

from langchain_milvus import Milvus

from core.exception import CustomException
from config.settings import *
from typing import Optional

from crewplus.apps.rag.schemas.ingest_status import IngestStatus

from crewplus.apps.rag.processors.base_processor import DocumentProcessor
from crewplus.apps.rag.schemas import Document  
from crewplus.apps.rag.schemas.ingest_request import IngestRequest

from crewplus.services.vdb_service import VDBService

#from langchain import hub
from langchain_community.document_loaders import WebBaseLoader

from langchain_core.vectorstores.base import VectorStore
from langchain_core.embeddings import Embeddings

from langchain_text_splitters import RecursiveCharacterTextSplitter

from crewplus.apps.rag.utils.document_util import unify_documents_meta, get_document_content_from_file

class HtmlDocumentProcessor(DocumentProcessor):
    """Processor for handling HTML documents and extracting relevant information."""
    async def process(self, message: IngestRequest, collection_name: str, vector_store: Optional[VectorStore] = None, embeddings: Optional[Embeddings] = None) -> Document:
        # Implement the logic to process HTML documents
        document = Document()  # Create a new Document instance
        # ... (processing logic here)
        
        # Relate this vectorized Document to the knowledge base
        document.source_url = message.url
        document.kbase_id = message.kbase_id
        document.source_type = message.type
        
        # get start UTC datetime
        document.ingestion_start_time = str(datetime.now())

        try:
            vdbsrv = VDBService()
            embeddings = vdbsrv.get_embeddings()
            milvus_store: Milvus = vdbsrv.get_vector_store(collection_name, embeddings)    
                
            vdbsrv.delete_old_indexes(message.url, milvus_store)
                
            # Load: First we need to load a document ( e.g. website url, pdf or docx ).
            loader = WebBaseLoader(
                web_paths=(message.url,),
                #bs_kwargs={"parse_only": bs_strainer},
            )
            docs = loader.load()

            document.content = get_document_content_from_file(docs)
            
            # Split: Next we need to split the document into chunks.
            text_splitter = RecursiveCharacterTextSplitter(chunk_size=message.chunk_size, chunk_overlap=DEFAULT_RAG_TEXT_CHUNK_OVERLAP)  
            splits = text_splitter.split_documents(docs)  
            
            # use splits[0] to flush data's title and summary
            if splits and len(splits) > 0:
                document.title = splits[0].metadata.get('title')
                document.summary = splits[0].metadata.get('description')
            
            # TODO: fill content
        
            logging.info("ingesting html -- file loaded and split done -- " + message.url)
            
            udocs = unify_documents_meta(splits, source_type=message.type)

            # TODO: use async afrom_documents for production (async), async shall be used with apscheduler
            milvus_store.from_documents(
                udocs,
                embedding=embeddings,
                collection_name=collection_name,
                connection_args=MILVUS_CONNECTION_ARGS
            )  
            
            # get end UTC datetime
            document.ingestion_end_time = str(datetime.now())       
            
            document.ingestion_status = IngestStatus.INGESTED  
        except Exception as e:
            document.ingestion_status = IngestStatus.NOT_INGESTED
            
            raise CustomException(str(e), code=IngestStatus.NOT_INGESTED)
        
        return document            
