import logging
from typing import List, Optional

from langchain_core.documents import Document

from langchain_community.document_loaders import TextLoader
from langchain.text_splitter import TextSplitter
from langchain_community.document_loaders.helpers import detect_file_encodings

logger = logging.getLogger(__name__)


class AsyncTextLoader(TextLoader):
    """Load text file.


    Args:
        file_path: Path to the file to load.

        encoding: File encoding to use. If `None`, the file will be loaded
        with the default system encoding.

        autodetect_encoding: Whether to try to autodetect the file encoding
            if the specified encoding fails.
    """

    def __init__(
        self,
        file_path: str,
        encoding: Optional[str] = None,
        autodetect_encoding: bool = False,
    ):
        """Initialize with file path."""
        self.file_path = file_path
        self.encoding = encoding
        self.autodetect_encoding = autodetect_encoding


    async def async_load(self) -> List[Document]:
        """Load from file path."""
        text = ""
        try:
            async with open(self.file_path, encoding=self.encoding) as f:
                    text = await f.read()
        except UnicodeDecodeError as e:
            if self.autodetect_encoding:
                detected_encodings = detect_file_encodings(self.file_path)
                for encoding in detected_encodings:
                    logger.debug(f"Trying encoding: {encoding.encoding}")
                    try:
                        async with open(self.file_path, encoding=encoding.encoding) as f:
                            text = await f.read()
                        break
                    except UnicodeDecodeError:
                        continue
            else:
                raise RuntimeError(f"Error loading {self.file_path}") from e
        except Exception as e:
            raise RuntimeError(f"Error loading {self.file_path}") from e

        metadata = {"source": self.file_path}
        return [Document(page_content=text, metadata=metadata)]
    
    async def load_and_split(
            self, text_splitter: Optional[TextSplitter] = None
        ) -> List[Document]:
        """Load Documents and split into chunks. Chunks are returned as Documents.

        Args:
            text_splitter: TextSplitter instance to use for splitting documents.
              Defaults to RecursiveCharacterTextSplitter.

        Returns:
            List of Documents.
        """
        from langchain.text_splitter import RecursiveCharacterTextSplitter

        if text_splitter is None:
            _text_splitter: TextSplitter = RecursiveCharacterTextSplitter()
        else:
            _text_splitter = text_splitter
        docs = await self.async_load()
        return _text_splitter.split_documents(docs)
