# -*- coding: utf-8 -*-
# @Author: Tim Liu
# @Date: 2024-07-11
# @Last Modified by: Tim Liu
# @Last Modified time: 2024-07-11

# @Description: Image Document Loader for RAG

from typing import Iterator

from langchain_core.document_loaders import BaseLoader
from langchain_core.documents import Document
from langchain_core.messages import HumanMessage

from crewplus.services.chat_openai_service import ChatOpenAIService
from crewplus.services.docintel_service import DocintelService

from config.settings import *

class ImageDocumentLoader(BaseLoader):
    """An image document loader that loads an image file into Document."""

    def __init__(self, file_path: str, parser: str = None) -> None:
        """Initialize the loader with a file path.

        Args:
            file_path: The path to the file to load.
        """
        self.file_path = file_path
        self.parser = parser

    def lazy_load(self) -> Iterator[Document]:  # <-- Does not take any arguments
        """A lazy loader that reads a file line by line.

        When you're implementing lazy load methods, you should use a generator
        to yield documents one by one.
        """
        
        if self.parser == 'docintel':
            docintel_srv = DocintelService()
        
            result = docintel_srv.analyze_document(self.file_path)
        else:
            llm = ChatOpenAIService(callbacks=[]).get_azure_llm_deployment(deployment_id='GPT4o', temperature=0.0)

            image_prompt = "Please describe this image within 300 tokens"
            
            human_message = HumanMessage(content=[
                { "type": "text", "text": image_prompt },
                { "type": "image_url", "image_url": { "url": self.file_path } }
            ]) 
            
            result = llm.invoke([ human_message ])

        yield Document(
            page_content=result.content,
            metadata={"file_type": "image", "source": self.file_path},
        )